diff --git a/.dockerignore b/.dockerignore index ee2e8af78dd3..c32286be6a01 100644 --- a/.dockerignore +++ b/.dockerignore @@ -39,7 +39,7 @@ contracts/.git !etc/multivm_bootloaders !cargo !bellman-cuda -!prover/vk_setup_data_generator_server_fri/data/ +!prover/crates/bin/vk_setup_data_generator_server_fri/data/ !.github/release-please/manifest.json !etc/env/file_based diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 764b85baccaf..dba6efd2fdff 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -18,4 +18,3 @@ - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. -- [ ] Spellcheck has been run via `zk spellcheck`. diff --git a/.github/release-please/manifest.json b/.github/release-please/manifest.json index 8dfb41d5827a..a26bd9fb6b09 100644 --- a/.github/release-please/manifest.json +++ b/.github/release-please/manifest.json @@ -1,4 +1,4 @@ { - "core": "24.7.0", - "prover": "15.0.0" + "core": "24.11.0", + "prover": "16.1.0" } diff --git a/.github/workflows/build-contract-verifier-template.yml b/.github/workflows/build-contract-verifier-template.yml index 3068b3414774..fff4474cdef4 100644 --- a/.github/workflows/build-contract-verifier-template.yml +++ b/.github/workflows/build-contract-verifier-template.yml @@ -49,6 +49,7 @@ jobs: echo $(pwd)/bin >> $GITHUB_PATH echo CI=1 >> .env echo IN_DOCKER=1 >> .env + docker login -u ${{ secrets.DOCKERHUB_USER }} -p ${{ secrets.DOCKERHUB_TOKEN }} - name: Download contracts run: | @@ -138,7 +139,7 @@ jobs: COMPONENT: ${{ matrix.components }} PLATFORM: ${{ matrix.platforms }} run: | - ci_run rustup default nightly-2024-05-07 + ci_run run_retried rustup default nightly-2024-05-07 platform=$(echo $PLATFORM | tr '/' '-') ci_run zk docker $DOCKER_ACTION --custom-tag=${IMAGE_TAG_SUFFIX} --platform=${PLATFORM} $COMPONENT - name: Show sccache stats diff --git a/.github/workflows/build-core-template.yml b/.github/workflows/build-core-template.yml index 49b619a7f944..5f8418dee9fe 100644 --- a/.github/workflows/build-core-template.yml +++ b/.github/workflows/build-core-template.yml @@ -58,6 +58,7 @@ jobs: echo $(pwd)/bin >> $GITHUB_PATH echo CI=1 >> .env echo IN_DOCKER=1 >> .env + docker login -u ${{ secrets.DOCKERHUB_USER }} -p ${{ secrets.DOCKERHUB_TOKEN }} - name: Download contracts run: | @@ -147,7 +148,7 @@ jobs: COMPONENT: ${{ matrix.components }} PLATFORM: ${{ matrix.platforms }} run: | - ci_run rustup default nightly-2024-05-07 + ci_run run_retried rustup default nightly-2024-05-07 platform=$(echo $PLATFORM | tr '/' '-') ci_run zk docker $DOCKER_ACTION --custom-tag=${IMAGE_TAG_SUFFIX} --platform=${PLATFORM} $COMPONENT - name: Show sccache stats diff --git a/.github/workflows/build-docker-from-tag.yml b/.github/workflows/build-docker-from-tag.yml index 138e93810934..7e5257796643 100644 --- a/.github/workflows/build-docker-from-tag.yml +++ b/.github/workflows/build-docker-from-tag.yml @@ -59,9 +59,21 @@ jobs: image_tag_suffix: ${{ needs.setup.outputs.image_tag_suffix }} en_alpha_release: true + build-push-tee-prover-images: + name: Build and push images + needs: [setup] + uses: ./.github/workflows/build-tee-prover-template.yml + if: contains(github.ref_name, 'core') + secrets: + DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }} + DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} + ATTIC_TOKEN: ${{ secrets.ATTIC_TOKEN }} + with: + image_tag_suffix: ${{ needs.setup.outputs.image_tag_suffix }} + build-push-contract-verifier: name: Build and push image - needs: [ setup ] + needs: [setup] uses: ./.github/workflows/build-contract-verifier-template.yml if: contains(github.ref_name, 'contract_verifier') secrets: diff --git a/.github/workflows/build-local-node-docker.yml b/.github/workflows/build-local-node-docker.yml index e5e8fb69fb1d..bd3ecd12e627 100644 --- a/.github/workflows/build-local-node-docker.yml +++ b/.github/workflows/build-local-node-docker.yml @@ -29,6 +29,7 @@ jobs: echo $(pwd)/bin >> $GITHUB_PATH echo CI=1 >> .env echo IN_DOCKER=1 >> .env + docker login -u ${{ secrets.DOCKERHUB_USER }} -p ${{ secrets.DOCKERHUB_TOKEN }} # TODO: Remove after when we can upgrade hardhat-plugins - name: pre-download compiilers diff --git a/.github/workflows/build-prover-fri-gpu-gar.yml b/.github/workflows/build-prover-fri-gpu-gar.yml index 4a83af559e50..11f151c8a136 100644 --- a/.github/workflows/build-prover-fri-gpu-gar.yml +++ b/.github/workflows/build-prover-fri-gpu-gar.yml @@ -29,6 +29,10 @@ jobs: run: | gsutil -m rsync -r gs://matterlabs-setup-data-us/${{ inputs.setup_keys_id }} docker/prover-gpu-fri-gar + - name: Auth Docker hub registry + run: | + docker login -u ${{ secrets.DOCKERHUB_USER }} -p ${{ secrets.DOCKERHUB_TOKEN }} + - name: Login to us-central1 GAR run: | gcloud auth print-access-token --lifetime=7200 --impersonate-service-account=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com | docker login -u oauth2accesstoken --password-stdin https://us-docker.pkg.dev @@ -44,10 +48,9 @@ jobs: with: context: docker/prover-gpu-fri-gar build-args: | - PROVER_IMAGE=${{ inputs.image_tag_suffix }} + PROVER_IMAGE=${{ inputs.protocol_version }}-${{ inputs.image_tag_suffix }} push: true tags: | - us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/prover-fri-gpu-gar:2.0-${{ inputs.image_tag_suffix }} us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/prover-fri-gpu-gar:2.0-${{ inputs.protocol_version }}-${{ inputs.image_tag_suffix }} - name: Login to Asia GAR @@ -56,9 +59,6 @@ jobs: - name: Build and push to Asia GAR run: | - docker buildx imagetools create \ - --tag asia-docker.pkg.dev/matterlabs-infra/matterlabs-docker/prover-fri-gpu-gar:2.0-${{ inputs.image_tag_suffix }} \ - us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/prover-fri-gpu-gar:2.0-${{ inputs.image_tag_suffix }} docker buildx imagetools create \ --tag asia-docker.pkg.dev/matterlabs-infra/matterlabs-docker/prover-fri-gpu-gar:2.0-${{ inputs.protocol_version }}-${{ inputs.image_tag_suffix }} \ us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/prover-fri-gpu-gar:2.0-${{ inputs.protocol_version }}-${{ inputs.image_tag_suffix }} @@ -69,9 +69,6 @@ jobs: - name: Build and push to Europe GAR run: | - docker buildx imagetools create \ - --tag europe-docker.pkg.dev/matterlabs-infra/matterlabs-docker/prover-fri-gpu-gar:2.0-${{ inputs.image_tag_suffix }} \ - us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/prover-fri-gpu-gar:2.0-${{ inputs.image_tag_suffix }} docker buildx imagetools create \ --tag europe-docker.pkg.dev/matterlabs-infra/matterlabs-docker/prover-fri-gpu-gar:2.0-${{ inputs.protocol_version }}-${{ inputs.image_tag_suffix }} \ us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/prover-fri-gpu-gar:2.0-${{ inputs.protocol_version }}-${{ inputs.image_tag_suffix }} diff --git a/.github/workflows/build-prover-template.yml b/.github/workflows/build-prover-template.yml index d03ae124b176..cce0fb1ecbe9 100644 --- a/.github/workflows/build-prover-template.yml +++ b/.github/workflows/build-prover-template.yml @@ -70,6 +70,7 @@ jobs: echo $(pwd)/bin >> $GITHUB_PATH echo CI=1 >> .env echo IN_DOCKER=1 >> .env + docker login -u ${{ secrets.DOCKERHUB_USER }} -p ${{ secrets.DOCKERHUB_TOKEN }} - name: start-services run: | @@ -185,13 +186,13 @@ jobs: component: - witness-vector-generator steps: - - - name: Set up QEMU - uses: docker/setup-qemu-action@v3 - - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 + - name: Auth Docker hub registry + run: | + docker login -u ${{ secrets.DOCKERHUB_USER }} -p ${{ secrets.DOCKERHUB_TOKEN }} + - name: Login to us-central1 GAR run: | gcloud auth print-access-token --lifetime=7200 --impersonate-service-account=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com | docker login -u oauth2accesstoken --password-stdin https://us-docker.pkg.dev @@ -199,9 +200,6 @@ jobs: - name: Login and push to Asia GAR run: | gcloud auth print-access-token --lifetime=7200 --impersonate-service-account=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com | docker login -u oauth2accesstoken --password-stdin https://asia-docker.pkg.dev - docker buildx imagetools create \ - --tag asia-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.component }}:2.0-${{ inputs.image_tag_suffix }} \ - us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.component }}:2.0-${{ inputs.image_tag_suffix }} docker buildx imagetools create \ --tag asia-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.component }}:2.0-${{ needs.build-images.outputs.protocol_version }}-${{ inputs.image_tag_suffix }} \ us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.component }}:2.0-${{ needs.build-images.outputs.protocol_version }}-${{ inputs.image_tag_suffix }} @@ -209,9 +207,6 @@ jobs: - name: Login and push to Europe GAR run: | gcloud auth print-access-token --lifetime=7200 --impersonate-service-account=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com | docker login -u oauth2accesstoken --password-stdin https://europe-docker.pkg.dev - docker buildx imagetools create \ - --tag europe-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.component }}:2.0-${{ inputs.image_tag_suffix }} \ - us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.component }}:2.0-${{ inputs.image_tag_suffix }} docker buildx imagetools create \ --tag europe-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.component }}:2.0-${{ needs.build-images.outputs.protocol_version }}-${{ inputs.image_tag_suffix }} \ us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.component }}:2.0-${{ needs.build-images.outputs.protocol_version }}-${{ inputs.image_tag_suffix }} diff --git a/.github/workflows/build-tee-prover-template.yml b/.github/workflows/build-tee-prover-template.yml new file mode 100644 index 000000000000..e05f368aa8b9 --- /dev/null +++ b/.github/workflows/build-tee-prover-template.yml @@ -0,0 +1,79 @@ +name: Build TEE Prover images +on: + workflow_call: + secrets: + DOCKERHUB_USER: + description: "DOCKERHUB_USER" + required: true + DOCKERHUB_TOKEN: + description: "DOCKERHUB_TOKEN" + required: true + ATTIC_TOKEN: + description: "ATTIC_TOKEN" + required: false + inputs: + image_tag_suffix: + description: "Optional suffix to override tag name generation" + type: string + required: false + action: + description: "Action with docker image" + type: string + default: "push" + required: false +jobs: + build-images: + name: Build and Push Docker Images + env: + IMAGE_TAG_SUFFIX: ${{ inputs.image_tag_suffix }} + runs-on: [matterlabs-ci-runner] + steps: + - uses: actions/checkout@v4 + if: ${{ github.event_name == 'workflow_dispatch' }} + with: + ref: ${{ github.event.inputs.target_branch }} + + - uses: actions/checkout@v4 + if: ${{ github.event_name != 'workflow_dispatch' }} + + - uses: cachix/install-nix-action@v27 + with: + extra_nix_config: | + access-tokens = github.com=${{ github.token }} + trusted-public-keys = cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY= tee-pot:SS6HcrpG87S1M6HZGPsfo7d1xJccCGev7/tXc5+I4jg= + substituters = https://cache.nixos.org/ https://attic.teepot.org/tee-pot + sandbox = true + + - name: Setup Attic cache + uses: ryanccn/attic-action@v0 + with: + endpoint: https://attic.teepot.org/ + cache: tee-pot + token: ${{ secrets.ATTIC_TOKEN }} + + - name: Build Docker images + id: build + run: | + nix build -L .#container-tee-prover-azure + export IMAGE_TAG=$(docker load -i result | grep -Po 'Loaded image.*: \K.*') + echo "IMAGE_TAG=${IMAGE_TAG}" >> "$GITHUB_OUTPUT" + echo "IMAGE_NAME=${IMAGE_TAG%:*}" >> "$GITHUB_OUTPUT" + + - name: Login to Docker registries + if: ${{ inputs.action == 'push' }} + run: | + docker login -u ${{ secrets.DOCKERHUB_USER }} -p ${{ secrets.DOCKERHUB_TOKEN }} + gcloud auth configure-docker us-docker.pkg.dev -q + + - name: Push Docker images + if: ${{ inputs.action == 'push' }} + run: | + export IMAGE_TAG="${{ steps.build.outputs.IMAGE_TAG }}" + export IMAGE_NAME="${{ steps.build.outputs.IMAGE_NAME }}" + for repo in matterlabsrobot us-docker.pkg.dev/matterlabs-infra/matterlabs-docker; do + for tag in "${IMAGE_TAG}" "${IMAGE_NAME}:latest" "${IMAGE_NAME}:${IMAGE_TAG_SUFFIX}"; do + docker tag "${IMAGE_TAG}" "${repo}/${tag}" + docker push "${repo}/${tag}" + done + done + diff --git a/.github/workflows/check-spelling.yml b/.github/workflows/check-spelling.yml deleted file mode 100644 index 8ffa29c1ea9b..000000000000 --- a/.github/workflows/check-spelling.yml +++ /dev/null @@ -1,42 +0,0 @@ -name: Check Spelling - -on: - push: - branches: - - main - pull_request: - merge_group: - -env: - CARGO_TERM_COLOR: always - -jobs: - spellcheck: - runs-on: [matterlabs-ci-runner] - steps: - - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 - with: - submodules: "recursive" - - name: Use Node.js - uses: actions/setup-node@v3 - with: - node-version: 18 - - - name: Setup environment - run: | - echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV - echo $(pwd)/bin >> $GITHUB_PATH - echo IN_DOCKER=1 >> .env - - - name: Start services - run: | - run_retried docker compose pull zk - docker compose up -d zk - - - name: Build zk - run: | - ci_run zk - - - name: Run spellcheck - run: | - ci_run zk spellcheck diff --git a/.github/workflows/ci-common-reusable.yml b/.github/workflows/ci-common-reusable.yml index 191c69180631..657fbd682b7c 100644 --- a/.github/workflows/ci-common-reusable.yml +++ b/.github/workflows/ci-common-reusable.yml @@ -1,6 +1,13 @@ name: Workflow template for CI jobs to be ran on both Prover and Core Components on: workflow_call: + secrets: + DOCKERHUB_USER: + description: "DOCKERHUB_USER" + required: true + DOCKERHUB_TOKEN: + description: "DOCKERHUB_TOKEN" + required: true jobs: build: @@ -18,6 +25,7 @@ jobs: echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV echo $(pwd)/bin >> $GITHUB_PATH echo IN_DOCKER=1 >> .env + docker login -u ${{ secrets.DOCKERHUB_USER }} -p ${{ secrets.DOCKERHUB_TOKEN }} - name: Start services run: | diff --git a/.github/workflows/ci-core-lint-reusable.yml b/.github/workflows/ci-core-lint-reusable.yml index 4b67a8ab5cd2..ac7aefc7bfd9 100644 --- a/.github/workflows/ci-core-lint-reusable.yml +++ b/.github/workflows/ci-core-lint-reusable.yml @@ -1,7 +1,13 @@ name: Workflow template for Core Linting CI jobs on: workflow_call: - + secrets: + DOCKERHUB_USER: + description: "DOCKERHUB_USER" + required: true + DOCKERHUB_TOKEN: + description: "DOCKERHUB_TOKEN" + required: true jobs: code_lint: @@ -17,6 +23,7 @@ jobs: echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV echo $(pwd)/bin >> $GITHUB_PATH echo IN_DOCKER=1 >> .env + docker login -u ${{ secrets.DOCKERHUB_USER }} -p ${{ secrets.DOCKERHUB_TOKEN }} - name: Start services run: | @@ -35,6 +42,7 @@ jobs: run: | ci_run zk fmt --check ci_run zk lint rust --check + ci_run zk lint toolbox --check ci_run zk lint js --check ci_run zk lint ts --check ci_run zk lint md --check diff --git a/.github/workflows/ci-core-reusable.yml b/.github/workflows/ci-core-reusable.yml index b15bc0c41997..c0c816aa8e28 100644 --- a/.github/workflows/ci-core-reusable.yml +++ b/.github/workflows/ci-core-reusable.yml @@ -1,6 +1,13 @@ name: Workflow template for CI jobs for Core Components on: workflow_call: + secrets: + DOCKERHUB_USER: + description: "DOCKERHUB_USER" + required: true + DOCKERHUB_TOKEN: + description: "DOCKERHUB_TOKEN" + required: true inputs: compilers: description: "JSON of required compilers and their versions" @@ -12,6 +19,9 @@ jobs: lint: name: lint uses: ./.github/workflows/ci-core-lint-reusable.yml + secrets: + DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }} + DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} unit-tests: runs-on: [matterlabs-ci-runner] @@ -26,6 +36,7 @@ jobs: echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV echo $(pwd)/bin >> $GITHUB_PATH echo IN_DOCKER=1 >> .env + docker login -u ${{ secrets.DOCKERHUB_USER }} -p ${{ secrets.DOCKERHUB_TOKEN }} # TODO: Remove when we after upgrade of hardhat-plugins - name: pre-download compilers @@ -79,6 +90,7 @@ jobs: echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV echo $(pwd)/bin >> $GITHUB_PATH echo IN_DOCKER=1 >> .env + docker login -u ${{ secrets.DOCKERHUB_USER }} -p ${{ secrets.DOCKERHUB_TOKEN }} - name: Loadtest configuration run: | @@ -135,7 +147,7 @@ jobs: base_token: ["Eth", "Custom"] deployment_mode: ["Rollup", "Validium"] env: - SERVER_COMPONENTS: "api,tree,eth,state_keeper,housekeeper,commitment_generator,vm_runner_protective_reads${{ matrix.consensus && ',consensus' || '' }}" + SERVER_COMPONENTS: "api,tree,eth,state_keeper,housekeeper,commitment_generator,vm_runner_protective_reads,vm_runner_bwip,da_dispatcher${{ matrix.consensus && ',consensus' || '' }}${{ matrix.base_token == 'Custom' && ',base_token_ratio_persister' || '' }}" runs-on: [matterlabs-ci-runner] steps: @@ -151,6 +163,7 @@ jobs: echo IN_DOCKER=1 >> .env echo RUN_CONTRACT_VERIFICATION_TEST=true >> .env echo ZKSYNC_DEBUG_LOGS=true >> .env + docker login -u ${{ secrets.DOCKERHUB_USER }} -p ${{ secrets.DOCKERHUB_TOKEN }} - name: Download zksolc/solc and zkvyper/vyper run: | @@ -205,7 +218,7 @@ jobs: # `sleep 5` because we need to wait until server started properly - name: Run server run: | - ci_run zk server --use-node-framework --components=$SERVER_COMPONENTS &>server.log & + ci_run zk server --components=$SERVER_COMPONENTS &>server.log & ci_run sleep 5 - name: Run contract verifier @@ -230,9 +243,10 @@ jobs: fi ENABLE_CONSENSUS=${{ matrix.consensus }} \ DEPLOYMENT_MODE=${{ matrix.deployment_mode }} \ + SNAPSHOTS_CREATOR_VERSION=${{ matrix.deployment_mode == 'Validium' && '0' || '1' }} \ DISABLE_TREE_DURING_PRUNING=${{ matrix.base_token == 'Eth' }} \ ETH_CLIENT_WEB3_URL="http://reth:8545" \ - PASSED_ENV_VARS="ENABLE_CONSENSUS,DEPLOYMENT_MODE,DISABLE_TREE_DURING_PRUNING,ETH_CLIENT_WEB3_URL" \ + PASSED_ENV_VARS="ENABLE_CONSENSUS,DEPLOYMENT_MODE,DISABLE_TREE_DURING_PRUNING,SNAPSHOTS_CREATOR_VERSION,ETH_CLIENT_WEB3_URL" \ ci_run yarn recovery-test snapshot-recovery-test - name: Genesis recovery test @@ -287,6 +301,10 @@ jobs: if: always() run: ci_run cat core/tests/upgrade-test/upgrade.log || true + - name: Show fee-projection.log logs + if: always() + run: ci_run cat core/tests/ts-integration/fees.log || true + - name: Show sccache logs if: always() run: | @@ -304,7 +322,7 @@ jobs: runs-on: [matterlabs-ci-runner] env: - SERVER_COMPONENTS: "api,tree,eth,state_keeper,housekeeper,commitment_generator,vm_runner_protective_reads${{ matrix.consensus && ',consensus' || '' }}" + SERVER_COMPONENTS: "api,tree,eth,state_keeper,housekeeper,commitment_generator,vm_runner_protective_reads,vm_runner_bwip,da_dispatcher${{ matrix.consensus && ',consensus' || '' }}${{ matrix.base_token == 'Custom' && ',base_token_ratio_persister' || '' }}" EXT_NODE_FLAGS: "${{ matrix.consensus && '-- --enable-consensus' || '' }}" steps: @@ -321,6 +339,7 @@ jobs: echo IN_DOCKER=1 >> .env echo RUN_CONTRACT_VERIFICATION_TEST=true >> .env echo ZKSYNC_DEBUG_LOGS=true >> .env + docker login -u ${{ secrets.DOCKERHUB_USER }} -p ${{ secrets.DOCKERHUB_TOKEN }} - name: Start services run: | diff --git a/.github/workflows/ci-docs-reusable.yml b/.github/workflows/ci-docs-reusable.yml index 82ef312c9832..446b7b32d3e7 100644 --- a/.github/workflows/ci-docs-reusable.yml +++ b/.github/workflows/ci-docs-reusable.yml @@ -1,7 +1,13 @@ name: Workflow template for CI jobs against docs on: workflow_call: - + secrets: + DOCKERHUB_USER: + description: "DOCKERHUB_USER" + required: true + DOCKERHUB_TOKEN: + description: "DOCKERHUB_TOKEN" + required: true jobs: lint: runs-on: [matterlabs-ci-runner] @@ -16,6 +22,7 @@ jobs: echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV echo $(pwd)/bin >> $GITHUB_PATH echo IN_DOCKER=1 >> .env + docker login -u ${{ secrets.DOCKERHUB_USER }} -p ${{ secrets.DOCKERHUB_TOKEN }} - name: Start services run: | diff --git a/.github/workflows/ci-prover-reusable.yml b/.github/workflows/ci-prover-reusable.yml index 6a8813a0a343..993203eb854e 100644 --- a/.github/workflows/ci-prover-reusable.yml +++ b/.github/workflows/ci-prover-reusable.yml @@ -1,7 +1,13 @@ name: Workflow template for CI jobs for Prover Components on: workflow_call: - + secrets: + DOCKERHUB_USER: + description: "DOCKERHUB_USER" + required: true + DOCKERHUB_TOKEN: + description: "DOCKERHUB_TOKEN" + required: true jobs: lint: runs-on: [matterlabs-ci-runner] @@ -18,6 +24,7 @@ jobs: echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV echo $(pwd)/bin >> $GITHUB_PATH echo IN_DOCKER=1 >> .env + docker login -u ${{ secrets.DOCKERHUB_USER }} -p ${{ secrets.DOCKERHUB_TOKEN }} - name: Start services run: | @@ -49,6 +56,7 @@ jobs: echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV echo $(pwd)/bin >> $GITHUB_PATH echo IN_DOCKER=1 >> .env + docker login -u ${{ secrets.DOCKERHUB_USER }} -p ${{ secrets.DOCKERHUB_TOKEN }} - name: Start services run: | diff --git a/.github/workflows/ci-zk-toolbox-reusable.yml b/.github/workflows/ci-zk-toolbox-reusable.yml index f3238566eeec..8c8434c6711a 100644 --- a/.github/workflows/ci-zk-toolbox-reusable.yml +++ b/.github/workflows/ci-zk-toolbox-reusable.yml @@ -1,6 +1,13 @@ name: Workflow template for CI jobs for Core Components on: workflow_call: + secrets: + DOCKERHUB_USER: + description: "DOCKERHUB_USER" + required: true + DOCKERHUB_TOKEN: + description: "DOCKERHUB_TOKEN" + required: true env: CLICOLOR: 1 @@ -9,6 +16,9 @@ jobs: lint: name: lint uses: ./.github/workflows/ci-core-lint-reusable.yml + secrets: + DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }} + DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} build: runs-on: [matterlabs-ci-runner] @@ -24,6 +34,7 @@ jobs: echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV echo $(pwd)/bin >> $GITHUB_PATH echo IN_DOCKER=1 >> .env + docker login -u ${{ secrets.DOCKERHUB_USER }} -p ${{ secrets.DOCKERHUB_TOKEN }} - name: Start services run: | @@ -47,7 +58,7 @@ jobs: path: zk_toolbox.tar compression-level: 0 - integration_test: + tests: runs-on: [matterlabs-ci-runner] needs: [build] @@ -72,6 +83,7 @@ jobs: echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV echo $(pwd)/bin >> $GITHUB_PATH echo IN_DOCKER=1 >> .env + docker login -u ${{ secrets.DOCKERHUB_USER }} -p ${{ secrets.DOCKERHUB_TOKEN }} - name: Start services run: | @@ -80,6 +92,10 @@ jobs: - name: Initialize ecosystem run: | + ci_run git config --global --add safe.directory /usr/src/zksync + ci_run git config --global --add safe.directory /usr/src/zksync/contracts/system-contracts + ci_run git config --global --add safe.directory /usr/src/zksync/contracts + ci_run zk_inception ecosystem init --deploy-paymaster --deploy-erc20 \ --deploy-ecosystem --l1-rpc-url=http://reth:8545 \ --server-db-url=postgres://postgres:notsecurepassword@postgres:5432 \ @@ -90,13 +106,41 @@ jobs: - name: Run server run: | - ci_run zk_inception server --ignore-prerequisites -a --use-node-framework --verbose &>server.log & + ci_run zk_inception server --ignore-prerequisites &>server.log & ci_run sleep 5 - name: Run integration tests run: | - ci_run zk_supervisor integration-tests --ignore-prerequisites --verbose + ci_run zk_supervisor test integration --ignore-prerequisites --verbose + + - name: Run external node server + run: | + ci_run zk_inception external-node configs --db-url=postgres://postgres:notsecurepassword@postgres:5432 \ + --db-name=zksync_en_localhost_era --l1-rpc-url=http://reth:8545 + ci_run zk_inception external-node init --ignore-prerequisites + ci_run zk_inception external-node run --ignore-prerequisites &>external_node.log & + ci_run sleep 5 + + - name: Run integration tests en + run: | + ci_run zk_supervisor test integration --ignore-prerequisites --verbose --external-node + + - name: Run revert tests + run: | + ci_run zk_supervisor test revert --ignore-prerequisites --verbose + + - name: Run revert tests (external node) + run: | + ci_run zk_supervisor test revert --external-node --ignore-prerequisites --verbose - name: Show server.log logs if: always() run: ci_run cat server.log || true + + - name: Show external_node.log logs + if: always() + run: ci_run cat external_node.log || true + + - name: Show revert.log logs + if: always() + run: ci_run cat ./core/tests/revert-test/revert.log || true diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 9e4d093e317a..f8ef751b317f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -83,30 +83,45 @@ jobs: needs: changed_files if: contains(github.ref_name, 'release-please--branches') uses: ./.github/workflows/ci-core-lint-reusable.yml + secrets: + DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }} + DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} ci-for-core: name: CI for Core Components needs: changed_files if: ${{ (needs.changed_files.outputs.core == 'true' || needs.changed_files.outputs.all == 'true') && !contains(github.ref_name, 'release-please--branches') }} uses: ./.github/workflows/ci-core-reusable.yml + secrets: + DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }} + DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} ci-for-prover: needs: changed_files if: ${{ (needs.changed_files.outputs.prover == 'true' || needs.changed_files.outputs.all == 'true') && !contains(github.ref_name, 'release-please--branches') }} name: CI for Prover Components uses: ./.github/workflows/ci-prover-reusable.yml + secrets: + DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }} + DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} ci-for-zk-toolbox: needs: changed_files if: ${{ (needs.changed_files.outputs.core == 'true' || needs.changed_files.outputs.zk_toolbox == 'true' || needs.changed_files.outputs.all == 'true') && !contains(github.ref_name, 'release-please--branches') }} name: CI for zk_toolbox uses: ./.github/workflows/ci-zk-toolbox-reusable.yml + secrets: + DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }} + DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} ci-for-docs: needs: changed_files if: needs.changed_files.outputs.docs == 'true' name: CI for Docs uses: ./.github/workflows/ci-docs-reusable.yml + secrets: + DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }} + DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} # What needs to be ran for both core and prover ci-for-common: @@ -114,6 +129,9 @@ jobs: if: ${{ (needs.changed_files.outputs.prover == 'true' || needs.changed_files.outputs.core == 'true' || needs.changed_files.outputs.all == 'true') && !contains(github.ref_name, 'release-please--branches') }} name: CI for Common Components (prover or core) uses: ./.github/workflows/ci-common-reusable.yml + secrets: + DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }} + DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} build-core-images: name: Build core images @@ -127,6 +145,19 @@ jobs: DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }} DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} + build-tee-prover-images: + name: Build TEE Prover images + needs: changed_files + if: ${{ (needs.changed_files.outputs.core == 'true' || needs.changed_files.outputs.all == 'true') && !contains(github.ref_name, 'release-please--branches') }} + uses: ./.github/workflows/build-tee-prover-template.yml + with: + image_tag_suffix: ${{ needs.setup.outputs.image_tag_suffix }} + action: "build" + secrets: + DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }} + DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} + ATTIC_TOKEN: ${{ secrets.ATTIC_TOKEN }} + build-contract-verifier: name: Build contract verifier needs: changed_files diff --git a/.github/workflows/release-please-cargo-lock.yml b/.github/workflows/release-please-cargo-lock.yml index a602eaaf083a..c7972580cacb 100644 --- a/.github/workflows/release-please-cargo-lock.yml +++ b/.github/workflows/release-please-cargo-lock.yml @@ -32,7 +32,8 @@ jobs: echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV echo $(pwd)/bin >> $GITHUB_PATH echo IN_DOCKER=1 >> .env - + docker login -u ${{ secrets.DOCKERHUB_USER }} -p ${{ secrets.DOCKERHUB_TOKEN }} + - name: Start services if: steps.condition.outputs.skip_steps != 'true' run: docker compose up -d zk diff --git a/.github/workflows/release-test-stage.yml b/.github/workflows/release-test-stage.yml index dc56aa977617..9605568ead5d 100644 --- a/.github/workflows/release-test-stage.yml +++ b/.github/workflows/release-test-stage.yml @@ -71,9 +71,21 @@ jobs: DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }} DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} + build-push-tee-prover-images: + name: Build and push images + needs: [setup, changed_files] + uses: ./.github/workflows/build-tee-prover-template.yml + if: needs.changed_files.outputs.core == 'true' || needs.changed_files.outputs.all == 'true' + with: + image_tag_suffix: ${{ needs.setup.outputs.image_tag_suffix }} + secrets: + DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }} + DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} + ATTIC_TOKEN: ${{ secrets.ATTIC_TOKEN }} + build-push-contract-verifier: name: Build and push images - needs: [ setup, changed_files ] + needs: [setup, changed_files] uses: ./.github/workflows/build-contract-verifier-template.yml if: needs.changed_files.outputs.core == 'true' || needs.changed_files.outputs.all == 'true' with: diff --git a/.github/workflows/vm-perf-comparison.yml b/.github/workflows/vm-perf-comparison.yml index 53dada123574..e62f8fd0a284 100644 --- a/.github/workflows/vm-perf-comparison.yml +++ b/.github/workflows/vm-perf-comparison.yml @@ -35,6 +35,7 @@ jobs: touch .env echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV echo $(pwd)/bin >> $GITHUB_PATH + docker login -u ${{ secrets.DOCKERHUB_USER }} -p ${{ secrets.DOCKERHUB_TOKEN }} - name: init run: | diff --git a/.github/workflows/vm-perf-to-prometheus.yml b/.github/workflows/vm-perf-to-prometheus.yml index fce7ead2d696..40da982155c1 100644 --- a/.github/workflows/vm-perf-to-prometheus.yml +++ b/.github/workflows/vm-perf-to-prometheus.yml @@ -28,6 +28,7 @@ jobs: - name: init run: | + docker login -u ${{ secrets.DOCKERHUB_USER }} -p ${{ secrets.DOCKERHUB_TOKEN }} run_retried docker compose pull zk docker compose up -d zk ci_run zk diff --git a/.gitignore b/.gitignore index 32ed5815b017..3ffddc7a7930 100644 --- a/.gitignore +++ b/.gitignore @@ -108,7 +108,7 @@ hyperchain-*.yml /etc/hyperchains/artifacts # Prover keys that should not be commited -prover/vk_setup_data_generator_server_fri/data/setup_* +prover/crates/bin/vk_setup_data_generator_server_fri/data/setup_* # Zk Toolbox chains/era/configs/* diff --git a/CODEOWNERS b/CODEOWNERS index eea7f1fa1373..63094b333057 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -1,4 +1,4 @@ -.github/release-please/** @RomanBrodetski @perekopskiy @Deniallugo @popzxc -**/CHANGELOG.md @RomanBrodetski @perekopskiy @Deniallugo @popzxc +.github/release-please/** @RomanBrodetski @perekopskiy @Deniallugo @popzxc @EmilLuta +**/CHANGELOG.md @RomanBrodetski @perekopskiy @Deniallugo @popzxc @EmilLuta CODEOWNERS @RomanBrodetski @perekopskiy @Deniallugo @popzxc .github/workflows/** @matter-labs/devops diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 2676289d0f3a..0791a311fed3 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -52,8 +52,6 @@ Be polite and respectful. **Q**: I have a small contribution that's not getting traction/being merged? **A**: Due to capacity, contributions that are simple renames of variables or stylistic/minor text improvements, one-off -typo fix will not be merged. If you do find any typos or grammar errors, the preferred avenue is to improve the existing -spellchecker. Given you have no technical prowess to do so, please create an issue. Please note that issues will be -resolved on a best effort basis. +typo fix will not be merged. ### Thank you diff --git a/Cargo.lock b/Cargo.lock index f1d34607ead4..a251c2cabb2b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -96,21 +96,6 @@ dependencies = [ "memchr", ] -[[package]] -name = "alloc-no-stdlib" -version = "2.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc7bb162ec39d46ab1ca8c77bf72e890535becd1751bb45f64c597edb4c8c6b3" - -[[package]] -name = "alloc-stdlib" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94fb8275041c72129eb51b7d0322c29b8387a0386127718b096429201a5d6ece" -dependencies = [ - "alloc-no-stdlib", -] - [[package]] name = "allocator-api2" version = "0.2.16" @@ -255,33 +240,6 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b34d609dfbaf33d6889b2b7106d3ca345eacad44200913df5ba02bfd31d2ba9" -[[package]] -name = "async-compression" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f658e2baef915ba0f26f1f7c42bfb8e12f532a01f449a090ded75ae7a07e9ba2" -dependencies = [ - "brotli", - "flate2", - "futures-core", - "memchr", - "pin-project-lite", - "tokio", - "zstd", - "zstd-safe", -] - -[[package]] -name = "async-lock" -version = "3.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7125e42787d53db9dd54261812ef17e937c95a51e4d291373b670342fa44310c" -dependencies = [ - "event-listener 4.0.0", - "event-listener-strategy", - "pin-project-lite", -] - [[package]] name = "async-stream" version = "0.3.5" @@ -324,6 +282,12 @@ dependencies = [ "num-traits", ] +[[package]] +name = "atomic-waker" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" + [[package]] name = "atomic-write-file" version = "0.1.2" @@ -351,6 +315,33 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" +[[package]] +name = "aws-lc-rs" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8a47f2fb521b70c11ce7369a6c5fa4bd6af7e5d62ec06303875bafe7c6ba245" +dependencies = [ + "aws-lc-sys", + "mirai-annotations", + "paste", + "zeroize", +] + +[[package]] +name = "aws-lc-sys" +version = "0.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2927c7af777b460b7ccd95f8b67acd7b4c04ec8896bf0c8e80ba30523cffc057" +dependencies = [ + "bindgen 0.69.4", + "cc", + "cmake", + "dunce", + "fs_extra", + "libc", + "paste", +] + [[package]] name = "axum" version = "0.6.20" @@ -358,13 +349,42 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3b829e4e32b91e643de6eafe82b1d90675f5874230191a4ffbc1b336dec4d6bf" dependencies = [ "async-trait", - "axum-core", + "axum-core 0.3.4", "bitflags 1.3.2", "bytes", "futures-util", - "http", - "http-body", - "hyper", + "http 0.2.9", + "http-body 0.4.6", + "hyper 0.14.29", + "itoa", + "matchit", + "memchr", + "mime", + "percent-encoding", + "pin-project-lite", + "rustversion", + "serde", + "sync_wrapper 0.1.2", + "tower", + "tower-layer", + "tower-service", +] + +[[package]] +name = "axum" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a6c9af12842a67734c9a2e355436e5d03b22383ed60cf13cd0c18fbfe3dcbcf" +dependencies = [ + "async-trait", + "axum-core 0.4.3", + "bytes", + "futures-util", + "http 1.1.0", + "http-body 1.0.0", + "http-body-util", + "hyper 1.3.1", + "hyper-util", "itoa", "matchit", "memchr", @@ -376,11 +396,12 @@ dependencies = [ "serde_json", "serde_path_to_error", "serde_urlencoded", - "sync_wrapper", + "sync_wrapper 1.0.1", "tokio", "tower", "tower-layer", "tower-service", + "tracing", ] [[package]] @@ -392,12 +413,33 @@ dependencies = [ "async-trait", "bytes", "futures-util", - "http", - "http-body", + "http 0.2.9", + "http-body 0.4.6", + "mime", + "rustversion", + "tower-layer", + "tower-service", +] + +[[package]] +name = "axum-core" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a15c63fd72d41492dc4f497196f5da1fb04fb7529e631d73630d1b491e47a2e3" +dependencies = [ + "async-trait", + "bytes", + "futures-util", + "http 1.1.0", + "http-body 1.0.0", + "http-body-util", "mime", + "pin-project-lite", "rustversion", + "sync_wrapper 0.1.2", "tower-layer", "tower-service", + "tracing", ] [[package]] @@ -451,12 +493,27 @@ version = "0.21.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "35636a1494ede3b646cc98f74f8e62c773a38a659ebc777a2cf26b9b74171df9" +[[package]] +name = "base64" +version = "0.22.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" + [[package]] name = "base64ct" version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" +[[package]] +name = "basic-toml" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7bfc506e7a2370ec239e1d072507b2a80c833083699d3c6fa176fbb4de8448c6" +dependencies = [ + "serde", +] + [[package]] name = "beef" version = "0.5.2" @@ -468,8 +525,9 @@ dependencies = [ [[package]] name = "bellman_ce" -version = "0.3.2" -source = "git+https://github.com/matter-labs/bellman?branch=dev#5520aa2274afe73d281373c92b007a2ecdebfbea" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ea340d5c1394ee4daf4415dd80e06f74e0ad9b08e21f73f6bb1fa3a9dfae80d" dependencies = [ "arrayvec 0.7.4", "bit-vec", @@ -477,12 +535,12 @@ dependencies = [ "blake2s_simd", "byteorder", "cfg-if 1.0.0", - "crossbeam", + "crossbeam 0.7.3", "futures 0.3.28", "hex", "lazy_static", "num_cpus", - "pairing_ce 0.28.5 (registry+https://github.com/rust-lang/crates.io-index)", + "pairing_ce", "rand 0.4.6", "serde", "smallvec", @@ -495,7 +553,7 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a6773ddc0eafc0e509fb60e48dff7f450f8e674a0686ae8605e8d9901bd5eefa" dependencies = [ - "num-bigint 0.4.4", + "num-bigint 0.4.6", "num-integer", "num-traits", ] @@ -530,6 +588,29 @@ dependencies = [ "syn 2.0.38", ] +[[package]] +name = "bindgen" +version = "0.69.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a00dc851838a2120612785d195287475a3ac45514741da670b735818822129a0" +dependencies = [ + "bitflags 2.6.0", + "cexpr", + "clang-sys", + "itertools 0.12.0", + "lazy_static", + "lazycell", + "log", + "prettyplease", + "proc-macro2 1.0.69", + "quote 1.0.33", + "regex", + "rustc-hash", + "shlex", + "syn 2.0.38", + "which", +] + [[package]] name = "bit-vec" version = "0.6.3" @@ -547,9 +628,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.4.1" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "327762f6e5a765692301e5bb513e0d9fef63be86bbc14528052b1cd3e6f03e07" +checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de" dependencies = [ "serde", ] @@ -595,14 +676,6 @@ dependencies = [ "digest 0.10.7", ] -[[package]] -name = "blake2" -version = "0.10.6" -source = "git+https://github.com/RustCrypto/hashes.git?rev=1f727ce37ff40fa0cce84eb8543a45bdd3ca4a4e#1f727ce37ff40fa0cce84eb8543a45bdd3ca4a4e" -dependencies = [ - "digest 0.10.7", -] - [[package]] name = "blake2-rfc_bellman_edition" version = "0.0.1" @@ -614,10 +687,20 @@ dependencies = [ "constant_time_eq", ] +[[package]] +name = "blake2_ce" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90cef65f11dd09a6c58914148161dbf190e5dcc02c87ed2aa47b3b97d3e7ce76" +dependencies = [ + "digest 0.10.7", +] + [[package]] name = "blake2s_const" -version = "0.6.0" -source = "git+https://github.com/matter-labs/bellman?branch=dev#5520aa2274afe73d281373c92b007a2ecdebfbea" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f39d933cb38939f885001867874c65699c36f30f0c78aae9f4c9f01b3e4b306a" dependencies = [ "arrayref", "arrayvec 0.5.2", @@ -668,13 +751,15 @@ dependencies = [ "clap 4.4.6", "serde_json", "tokio", - "vlog", "zksync_block_reverter", "zksync_config", + "zksync_core_leftovers", "zksync_dal", "zksync_env_config", "zksync_object_store", + "zksync_protobuf_config", "zksync_types", + "zksync_vlog", ] [[package]] @@ -692,14 +777,15 @@ dependencies = [ [[package]] name = "boojum" version = "0.2.0" -source = "git+https://github.com/matter-labs/era-boojum.git?branch=main#4bcb11f0610302110ae8109af01d5b652191b2f6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f0c2cba247d620ff76123efb335401aa05ec5639551e6ef4e5f977c0809b5cb" dependencies = [ "arrayvec 0.7.4", "bincode", - "blake2 0.10.6 (registry+https://github.com/rust-lang/crates.io-index)", + "blake2 0.10.6", "const_format", "convert_case", - "crossbeam", + "crossbeam 0.8.4", "crypto-bigint 0.5.3", "cs_derive", "derivative", @@ -709,12 +795,12 @@ dependencies = [ "lazy_static", "num-modular", "num_cpus", - "pairing_ce 0.28.5 (git+https://github.com/matter-labs/pairing.git)", + "pairing_ce", "rand 0.8.5", "rayon", "serde", "sha2 0.10.8", - "sha3 0.10.6", + "sha3_ce", "smallvec", "unroll", ] @@ -736,7 +822,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bf4918709cc4dd777ad2b6303ed03cb37f3ca0ccede8c1b0d28ac6db8f4710e0" dependencies = [ "once_cell", - "proc-macro-crate 2.0.1", + "proc-macro-crate 2.0.0", "proc-macro2 1.0.69", "quote 1.0.33", "syn 2.0.38", @@ -744,25 +830,10 @@ dependencies = [ ] [[package]] -name = "brotli" -version = "3.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "516074a47ef4bce09577a3b379392300159ce5b1ba2e501ff1c819950066100f" -dependencies = [ - "alloc-no-stdlib", - "alloc-stdlib", - "brotli-decompressor", -] - -[[package]] -name = "brotli-decompressor" -version = "2.5.0" +name = "build_html" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da74e2b81409b1b743f8f0c62cc6254afefb8b8e50bbfe3735550f7aeefa3448" -dependencies = [ - "alloc-no-stdlib", - "alloc-stdlib", -] +checksum = "3108fe6fe7ac796fb7625bdde8fa2b67b5a7731496251ca57c7b8cadd78a16a1" [[package]] name = "bumpalo" @@ -812,9 +883,15 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.5.0" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "514de17de45fdb8dc022b1a7975556c53c86f9f0aa5f534b98977b171857c2c9" + +[[package]] +name = "bytesize" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2bd12c1caf447e69cd4528f47f94d203fd2582878ecb9e9465484c4148a8223" +checksum = "a3e368af43e418a04d52505cf3dbc23dda4e3407ae2fa99fd0e4f308ce546acc" [[package]] name = "bzip2-sys" @@ -874,6 +951,12 @@ dependencies = [ "libc", ] +[[package]] +name = "cesu8" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d43a04d8753f35258c91f8ec639f792891f748a1edbd759cf1dcea3382ad83c" + [[package]] name = "cexpr" version = "0.6.0" @@ -980,106 +1063,115 @@ dependencies = [ [[package]] name = "circuit_encodings" -version = "0.1.40" -source = "git+https://github.com/matter-labs/era-zkevm_test_harness.git?branch=v1.4.0#39665dffd576cff5007c80dd0e1b5334e230bd3b" +version = "0.140.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7f1168c8fbb45fc7704c1bcdbb65ebdcb019fc9bf1101a475904eff835632f7" dependencies = [ "derivative", "serde", - "zk_evm 1.4.0", - "zkevm_circuits 1.4.0", + "zk_evm 0.140.0", + "zkevm_circuits 0.140.0", ] [[package]] name = "circuit_encodings" -version = "0.1.41" -source = "git+https://github.com/matter-labs/era-zkevm_test_harness.git?branch=v1.4.1#f7bd71fd4216e2c51ab7b09a95909fe48c75f35b" +version = "0.141.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90b17a11dd3489daef314cbb07e1098e8e34a35a625fdca421b0012f4bb6cbd0" dependencies = [ "derivative", "serde", - "zk_evm 1.4.1", - "zkevm_circuits 1.4.1", + "zk_evm 0.141.0", + "zkevm_circuits 0.141.0", ] [[package]] name = "circuit_encodings" -version = "0.1.42" -source = "git+https://github.com/matter-labs/era-zkevm_test_harness.git?branch=v1.4.2#012dcc678990c695f97e5dd1f136dfa8fe376c16" +version = "0.142.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5df3af2244275a1270e2887b2f47625ec78dff14db8dd8a88f7ea1ea0781e48b" dependencies = [ "derivative", "serde", - "zk_evm 1.4.1", - "zkevm_circuits 1.4.1", + "zk_evm 0.141.0", + "zkevm_circuits 0.141.0", ] [[package]] name = "circuit_encodings" -version = "0.1.50" -source = "git+https://github.com/matter-labs/era-zkevm_test_harness.git?branch=v1.5.0#a9b1c3a3cf46e683d6a27db33805d994ca8476ec" +version = "0.150.2-rc.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4b69893ec5a2112430adaf8e29b52ea9ec4ef2d6663879f7cc279b4479a8880" dependencies = [ "derivative", "serde", - "zk_evm 1.5.0", - "zkevm_circuits 1.5.0", + "zk_evm 0.150.0", + "zkevm_circuits 0.150.0", ] [[package]] name = "circuit_sequencer_api" -version = "0.1.0" -source = "git+https://github.com/matter-labs/era-zkevm_test_harness.git?branch=v1.3.3#aba8f2a32767b79838aca7d7d00d9d23144df32f" +version = "0.133.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a87dc7bee6630d4954ac7982eb77e2007476662250cf18e5c460bbc5ee435f1" dependencies = [ "bellman_ce", "derivative", "rayon", "serde", - "zk_evm 1.3.3 (git+https://github.com/matter-labs/era-zk_evm.git?branch=v1.3.3)", + "zk_evm 0.133.0", ] [[package]] name = "circuit_sequencer_api" -version = "0.1.40" -source = "git+https://github.com/matter-labs/era-zkevm_test_harness.git?branch=v1.4.0#39665dffd576cff5007c80dd0e1b5334e230bd3b" +version = "0.140.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b5138e6524c73e6d49fc1d0822b26e62a8d78b2c07e4e1c56061a447c10bec0" dependencies = [ "bellman_ce", - "circuit_encodings 0.1.40", + "circuit_encodings 0.140.0", "derivative", "rayon", "serde", - "zk_evm 1.4.0", + "zk_evm 0.140.0", ] [[package]] name = "circuit_sequencer_api" -version = "0.1.41" -source = "git+https://github.com/matter-labs/era-zkevm_test_harness.git?branch=v1.4.1#f7bd71fd4216e2c51ab7b09a95909fe48c75f35b" +version = "0.141.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff871d625d002eb7f27394a239c0b19d8449adf1b9ca7805ebb43c8cf0810b51" dependencies = [ "bellman_ce", - "circuit_encodings 0.1.41", + "circuit_encodings 0.141.0", "derivative", "rayon", "serde", - "zk_evm 1.4.1", + "zk_evm 0.141.0", ] [[package]] name = "circuit_sequencer_api" -version = "0.1.42" -source = "git+https://github.com/matter-labs/era-zkevm_test_harness.git?branch=v1.4.2#012dcc678990c695f97e5dd1f136dfa8fe376c16" +version = "0.142.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1d861a7a9b8df9389c63092985fc993c46954771da86462d7cab8cbf55a6497" dependencies = [ "bellman_ce", - "circuit_encodings 0.1.42", + "circuit_encodings 0.142.0", "derivative", "rayon", "serde", - "zk_evm 1.4.1", + "zk_evm 0.141.0", ] [[package]] name = "circuit_sequencer_api" -version = "0.1.50" -source = "git+https://github.com/matter-labs/era-zkevm_test_harness.git?branch=v1.5.0#a9b1c3a3cf46e683d6a27db33805d994ca8476ec" +version = "0.150.2-rc.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "121470724079938b8f878e8a95f757d814624795c9a5ca69dd9dd782035fbe39" dependencies = [ "bellman_ce", - "circuit_encodings 0.1.50", + "circuit_encodings 0.150.2-rc.1", "derivative", "rayon", "serde", @@ -1173,19 +1265,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cd7cc57abe963c6d3b9d8be5b06ba7c8957a930305ca90304f24ef040aa6f961" [[package]] -name = "codegen" -version = "0.1.0" -source = "git+https://github.com/matter-labs/solidity_plonk_verifier.git?branch=dev#82f96b7156551087f1c9bfe4f0ea68845b6debfc" +name = "cmake" +version = "0.1.50" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a31c789563b815f77f4250caee12365734369f942439b7defd71e18a48197130" dependencies = [ - "ethereum-types", - "franklin-crypto", - "handlebars", - "hex", - "paste", - "rescue_poseidon", - "serde", - "serde_derive", - "serde_json", + "cc", ] [[package]] @@ -1204,19 +1289,20 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" [[package]] -name = "compile-fmt" -version = "0.1.0" +name = "combine" +version = "4.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bed69047ed42e52c7e38d6421eeb8ceefb4f2a2b52eed59137f7bad7908f6800" +checksum = "ba5a308b75df32fe02788e748662718f03fde005016435c444eea572398219fd" +dependencies = [ + "bytes", + "memchr", +] [[package]] -name = "concurrent-queue" -version = "2.4.0" +name = "compile-fmt" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d16048cd947b08fa32c24458a22f5dc5e835264f689f4f5653210c69fd107363" -dependencies = [ - "crossbeam-utils 0.8.16", -] +checksum = "bed69047ed42e52c7e38d6421eeb8ceefb4f2a2b52eed59137f7bad7908f6800" [[package]] name = "console" @@ -1264,15 +1350,18 @@ checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" [[package]] name = "convert_case" -version = "0.4.0" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" +checksum = "ec182b0ca2f35d8fc196cf3404988fd8b8c739a4d270ff118a398feb0cbec1ca" +dependencies = [ + "unicode-segmentation", +] [[package]] name = "core-foundation" -version = "0.9.3" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "194a7a9e6de53fa55116934067c844d9d749312f75c6f6d0980e8c252f8c2146" +checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" dependencies = [ "core-foundation-sys", "libc", @@ -1280,9 +1369,9 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.8.4" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" +checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f" [[package]] name = "cpufeatures" @@ -1367,6 +1456,19 @@ dependencies = [ "crossbeam-utils 0.7.2", ] +[[package]] +name = "crossbeam" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1137cd7e7fc0fb5d3c5a8678be38ec56e819125d8d7907411fe24ccb943faca8" +dependencies = [ + "crossbeam-channel 0.5.13", + "crossbeam-deque 0.8.5", + "crossbeam-epoch 0.9.18", + "crossbeam-queue 0.3.11", + "crossbeam-utils 0.8.20", +] + [[package]] name = "crossbeam-channel" version = "0.4.4" @@ -1379,12 +1481,11 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.8" +version = "0.5.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a33c2bf77f2df06183c3aa30d1e96c0695a313d4f9c453cc3762a6db39f99200" +checksum = "33480d6946193aa8033910124896ca395333cae7e2d1113d1fef6c3272217df2" dependencies = [ - "cfg-if 1.0.0", - "crossbeam-utils 0.8.16", + "crossbeam-utils 0.8.20", ] [[package]] @@ -1400,13 +1501,12 @@ dependencies = [ [[package]] name = "crossbeam-deque" -version = "0.8.3" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce6fd6f855243022dcecf8702fef0c297d4338e226845fe067f6341ad9fa0cef" +checksum = "613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d" dependencies = [ - "cfg-if 1.0.0", - "crossbeam-epoch 0.9.15", - "crossbeam-utils 0.8.16", + "crossbeam-epoch 0.9.18", + "crossbeam-utils 0.8.20", ] [[package]] @@ -1420,21 +1520,17 @@ dependencies = [ "crossbeam-utils 0.7.2", "lazy_static", "maybe-uninit", - "memoffset 0.5.6", + "memoffset", "scopeguard", ] [[package]] name = "crossbeam-epoch" -version = "0.9.15" +version = "0.9.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae211234986c545741a7dc064309f67ee1e5ad243d0e48335adc0484d960bcc7" +checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" dependencies = [ - "autocfg", - "cfg-if 1.0.0", - "crossbeam-utils 0.8.16", - "memoffset 0.9.0", - "scopeguard", + "crossbeam-utils 0.8.20", ] [[package]] @@ -1450,12 +1546,11 @@ dependencies = [ [[package]] name = "crossbeam-queue" -version = "0.3.8" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1cfb3ea8a53f37c40dea2c7bedcbd88bdfae54f5e2175d6ecaff1c988353add" +checksum = "df0346b5d5e76ac2fe4e327c5fd1118d6be7c51dfb18f9b7922923f287471e35" dependencies = [ - "cfg-if 1.0.0", - "crossbeam-utils 0.8.16", + "crossbeam-utils 0.8.20", ] [[package]] @@ -1471,12 +1566,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.16" +version = "0.8.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a22b2d63d4d1dc0b7f1b6b2747dd0088008a9be28b6ddf0b1e7d335e3037294" -dependencies = [ - "cfg-if 1.0.0", -] +checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" [[package]] name = "crunchy" @@ -1531,8 +1623,9 @@ dependencies = [ [[package]] name = "cs_derive" -version = "0.1.0" -source = "git+https://github.com/matter-labs/era-boojum.git?branch=main#4bcb11f0610302110ae8109af01d5b652191b2f6" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "faa0b8f9fdb5c91dcd5569cc7cbc11f514fd784a34988ead8455db0db2cfc1c7" dependencies = [ "proc-macro-error", "proc-macro2 1.0.69", @@ -1746,6 +1839,12 @@ version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dcbb2bf8e87535c23f7a8a321e364ce21462d0ff10cb6407820e8e96dfff6653" +[[package]] +name = "dunce" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56ce8c6da7551ec6c462cbaf3bfbc75131ebbfa1c944aeaa9dab51ca1c5f0c3b" + [[package]] name = "ecdsa" version = "0.14.8" @@ -1766,7 +1865,7 @@ checksum = "ee27f32b5c5292967d2d4a9d7f1e0b0aed2c15daded5a60300e4abb9d8020bca" dependencies = [ "der 0.7.8", "digest 0.10.7", - "elliptic-curve 0.13.7", + "elliptic-curve 0.13.8", "rfc6979 0.4.0", "signature 2.2.0", "spki 0.7.2", @@ -1827,9 +1926,9 @@ dependencies = [ [[package]] name = "elliptic-curve" -version = "0.13.7" +version = "0.13.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9775b22bc152ad86a0cf23f0f348b884b26add12bf741e7ffc4d4ab2ab4d205" +checksum = "b5e6043086bf7973472e0c7dff2142ea0b680d30e18d9cc40f267efbf222bd47" dependencies = [ "base16ct 0.2.0", "crypto-bigint 0.5.3", @@ -1995,27 +2094,6 @@ version = "2.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" -[[package]] -name = "event-listener" -version = "4.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "770d968249b5d99410d61f5bf89057f3199a077a04d087092f58e7d10692baae" -dependencies = [ - "concurrent-queue", - "parking", - "pin-project-lite", -] - -[[package]] -name = "event-listener-strategy" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "958e4d70b6d5e81971bebec42271ec641e7ff4e170a6fa605f2b8a8b65cb97d3" -dependencies = [ - "event-listener 4.0.0", - "pin-project-lite", -] - [[package]] name = "fastrand" version = "2.0.1" @@ -2061,7 +2139,7 @@ version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b96fbccd88dbb1fac4ee4a07c2fcc4ca719a74ffbd9d2b9d41d8c8eb073d8b20" dependencies = [ - "num-bigint 0.4.4", + "num-bigint 0.4.6", "num-integer", "num-traits", "proc-macro2 1.0.69", @@ -2136,7 +2214,7 @@ checksum = "55ac459de2512911e4b674ce33cf20befaba382d05b62b008afc1c8b57cbf181" dependencies = [ "futures-core", "futures-sink", - "spin 0.9.8", + "spin", ] [[package]] @@ -2170,15 +2248,26 @@ dependencies = [ ] [[package]] -name = "franklin-crypto" -version = "0.0.5" -source = "git+https://github.com/matter-labs/franklin-crypto?branch=dev#5695d07c7bc604c2c39a27712ffac171d39ee1ed" +name = "fraction" +version = "0.15.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f158e3ff0a1b334408dc9fb811cd99b446986f4d8b741bb08f9df1604085ae7" dependencies = [ - "arr_macro", - "bellman_ce", - "bit-vec", - "blake2 0.9.2", - "blake2-rfc_bellman_edition", + "lazy_static", + "num", +] + +[[package]] +name = "franklin-crypto" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "178bca54fc449a6f4cb45321ed9d769353143ac7ef314ea310f3a0c61bed2da2" +dependencies = [ + "arr_macro", + "bellman_ce", + "bit-vec", + "blake2 0.9.2", + "blake2-rfc_bellman_edition", "blake2s_simd", "byteorder", "digest 0.9.0", @@ -2186,7 +2275,7 @@ dependencies = [ "indexmap 1.9.3", "itertools 0.10.5", "lazy_static", - "num-bigint 0.4.4", + "num-bigint 0.4.6", "num-derive", "num-integer", "num-traits", @@ -2199,6 +2288,12 @@ dependencies = [ "tiny-keccak 1.5.0", ] +[[package]] +name = "fs_extra" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c" + [[package]] name = "fuchsia-cprng" version = "0.1.1" @@ -2371,8 +2466,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427" dependencies = [ "cfg-if 1.0.0", + "js-sys", "libc", "wasi 0.11.0+wasi-snapshot-preview1", + "wasm-bindgen", ] [[package]] @@ -2407,7 +2504,7 @@ dependencies = [ "futures-core", "futures-sink", "gloo-utils", - "http", + "http 0.2.9", "js-sys", "pin-project", "serde", @@ -2445,9 +2542,9 @@ dependencies = [ [[package]] name = "google-cloud-auth" -version = "0.13.0" +version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af1087f1fbd2dd3f58c17c7574ddd99cd61cbbbc2c4dc81114b8687209b196cb" +checksum = "1112c453c2e155b3e683204ffff52bcc6d6495d04b68d9e90cd24161270c5058" dependencies = [ "async-trait", "base64 0.21.5", @@ -2455,7 +2552,7 @@ dependencies = [ "google-cloud-token", "home", "jsonwebtoken", - "reqwest", + "reqwest 0.12.5", "serde", "serde_json", "thiserror", @@ -2467,21 +2564,22 @@ dependencies = [ [[package]] name = "google-cloud-metadata" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc279bfb50487d7bcd900e8688406475fc750fe474a835b2ab9ade9eb1fc90e2" +checksum = "04f945a208886a13d07636f38fb978da371d0abc3e34bad338124b9f8c135a8f" dependencies = [ - "reqwest", + "reqwest 0.12.5", "thiserror", "tokio", ] [[package]] name = "google-cloud-storage" -version = "0.15.0" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac04b29849ebdeb9fb008988cc1c4d1f0c9d121b4c7f1ddeb8061df124580e93" +checksum = "cc0c5b7469142d91bd77959e69375bede324a5def07c7f29aa0d582586cba305" dependencies = [ + "anyhow", "async-stream", "async-trait", "base64 0.21.5", @@ -2495,8 +2593,9 @@ dependencies = [ "percent-encoding", "pkcs8 0.10.2", "regex", - "reqwest", - "ring 0.17.7", + "reqwest 0.12.5", + "reqwest-middleware", + "ring", "serde", "serde_json", "sha2 0.10.8", @@ -2509,9 +2608,9 @@ dependencies = [ [[package]] name = "google-cloud-token" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fcd62eb34e3de2f085bcc33a09c3e17c4f65650f36d53eb328b00d63bcb536a" +checksum = "8f49c12ba8b21d128a2ce8585955246977fbce4415f680ebf9199b6f9d6d725f" dependencies = [ "async-trait", ] @@ -2528,7 +2627,7 @@ dependencies = [ "no-std-compat", "nonzero_ext", "parking_lot", - "quanta 0.9.3", + "quanta", "rand 0.8.5", "smallvec", ] @@ -2566,7 +2665,26 @@ dependencies = [ "futures-core", "futures-sink", "futures-util", - "http", + "http 0.2.9", + "indexmap 2.1.0", + "slab", + "tokio", + "tokio-util", + "tracing", +] + +[[package]] +name = "h2" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa82e28a107a8cc405f0839610bdc9b15f1e25ec7d696aa5cf173edbcb1486ab" +dependencies = [ + "atomic-waker", + "bytes", + "fnv", + "futures-core", + "futures-sink", + "http 1.1.0", "indexmap 2.1.0", "slab", "tokio", @@ -2582,16 +2700,16 @@ checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7" [[package]] name = "handlebars" -version = "5.1.1" +version = "3.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c73166c591e67fb4bf9bc04011b4e35f12e89fe8d676193aa263df065955a379" +checksum = "4498fc115fa7d34de968184e473529abb40eeb6be8bc5f7faba3d08c316cb3e3" dependencies = [ "log", "pest", "pest_derive", + "quick-error", "serde", "serde_json", - "thiserror", ] [[package]] @@ -2603,15 +2721,6 @@ dependencies = [ "ahash 0.7.7", ] -[[package]] -name = "hashbrown" -version = "0.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33ff8ae62cd3a9102e5637afc8452c55acf3844001bd5374e0b0bd7b6616c038" -dependencies = [ - "ahash 0.8.7", -] - [[package]] name = "hashbrown" version = "0.14.2" @@ -2631,16 +2740,6 @@ dependencies = [ "hashbrown 0.14.2", ] -[[package]] -name = "hdrhistogram" -version = "7.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f19b9f54f7c7f55e31401bb647626ce0cf0f67b0004982ce815b3ee72a02aa8" -dependencies = [ - "byteorder", - "num-traits", -] - [[package]] name = "heck" version = "0.3.3" @@ -2735,6 +2834,17 @@ dependencies = [ "itoa", ] +[[package]] +name = "http" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258" +dependencies = [ + "bytes", + "fnv", + "itoa", +] + [[package]] name = "http-body" version = "0.4.6" @@ -2742,15 +2852,32 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" dependencies = [ "bytes", - "http", + "http 0.2.9", "pin-project-lite", ] [[package]] -name = "http-range-header" -version = "0.3.1" +name = "http-body" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1cac85db508abc24a2e48553ba12a996e87244a0395ce011e62b37158745d643" +dependencies = [ + "bytes", + "http 1.1.0", +] + +[[package]] +name = "http-body-util" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "add0ab9360ddbd88cfeb3bd9574a1d85cfdfa14db10b3e21d3700dbc4328758f" +checksum = "793429d76616a256bcb62c2a2ec2bed781c8307e797e2598c50010f2bee2544f" +dependencies = [ + "bytes", + "futures-util", + "http 1.1.0", + "http-body 1.0.0", + "pin-project-lite", +] [[package]] name = "httparse" @@ -2774,9 +2901,9 @@ dependencies = [ "futures-channel", "futures-core", "futures-util", - "h2", - "http", - "http-body", + "h2 0.3.26", + "http 0.2.9", + "http-body 0.4.6", "httparse", "httpdate", "itoa", @@ -2788,20 +2915,43 @@ dependencies = [ "want", ] +[[package]] +name = "hyper" +version = "1.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe575dd17d0862a9a33781c8c4696a55c320909004a67a00fb286ba8b1bc496d" +dependencies = [ + "bytes", + "futures-channel", + "futures-util", + "h2 0.4.5", + "http 1.1.0", + "http-body 1.0.0", + "httparse", + "httpdate", + "itoa", + "pin-project-lite", + "smallvec", + "tokio", + "want", +] + [[package]] name = "hyper-rustls" -version = "0.24.1" +version = "0.27.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d78e1e73ec14cf7375674f74d7dde185c8206fd9dea6fb6295e8a98098aaa97" +checksum = "5ee4be2c948921a1a5320b629c4193916ed787a7f7f293fd3f7f5a6c9de74155" dependencies = [ "futures-util", - "http", - "hyper", + "http 1.1.0", + "hyper 1.3.1", + "hyper-util", "log", - "rustls 0.21.11", - "rustls-native-certs 0.6.3", + "rustls", + "rustls-pki-types", "tokio", - "tokio-rustls 0.24.1", + "tokio-rustls", + "tower-service", ] [[package]] @@ -2810,7 +2960,7 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1" dependencies = [ - "hyper", + "hyper 0.14.29", "pin-project-lite", "tokio", "tokio-io-timeout", @@ -2823,10 +2973,46 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" dependencies = [ "bytes", - "hyper", + "hyper 0.14.29", + "native-tls", + "tokio", + "tokio-native-tls", +] + +[[package]] +name = "hyper-tls" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" +dependencies = [ + "bytes", + "http-body-util", + "hyper 1.3.1", + "hyper-util", "native-tls", "tokio", "tokio-native-tls", + "tower-service", +] + +[[package]] +name = "hyper-util" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b875924a60b96e5d7b9ae7b066540b1dd1cbd90d1828f54c92e02a283351c56" +dependencies = [ + "bytes", + "futures-channel", + "futures-util", + "http 1.1.0", + "http-body 1.0.0", + "hyper 1.3.1", + "pin-project-lite", + "socket2", + "tokio", + "tower", + "tower-service", + "tracing", ] [[package]] @@ -2985,20 +3171,19 @@ dependencies = [ ] [[package]] -name = "iri-string" -version = "0.7.0" +name = "itertools" +version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21859b667d66a4c1dacd9df0863b3efb65785474255face87f5bca39dd8407c0" +checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" dependencies = [ - "memchr", - "serde", + "either", ] [[package]] name = "itertools" -version = "0.10.5" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" +checksum = "b1c173a5686ce8bfa551b3563d0c2170bf24ca44da99c7ca4bfdab5418c3fe57" dependencies = [ "either", ] @@ -3018,6 +3203,26 @@ version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" +[[package]] +name = "jni" +version = "0.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c6df18c2e3db7e453d3c6ac5b3e9d5182664d28788126d39b91f2d1e22b017ec" +dependencies = [ + "cesu8", + "combine", + "jni-sys", + "log", + "thiserror", + "walkdir", +] + +[[package]] +name = "jni-sys" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8eaf4bc02d17cbdd7ff4c7438cafcdf7fb9a4613313ad11b4f8fefe7d3fa0130" + [[package]] name = "jobserver" version = "0.1.27" @@ -3038,9 +3243,9 @@ dependencies = [ [[package]] name = "jsonrpsee" -version = "0.21.0" +version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9579d0ca9fb30da026bac2f0f7d9576ec93489aeb7cd4971dd5b4617d82c79b2" +checksum = "62b089779ad7f80768693755a031cc14a7766aba707cbe886674e3f79e9b7e47" dependencies = [ "jsonrpsee-client-transport", "jsonrpsee-core", @@ -3056,41 +3261,44 @@ dependencies = [ [[package]] name = "jsonrpsee-client-transport" -version = "0.21.0" +version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f9f9ed46590a8d5681975f126e22531698211b926129a40a2db47cbca429220" +checksum = "08163edd8bcc466c33d79e10f695cdc98c00d1e6ddfb95cec41b6b0279dd5432" dependencies = [ + "base64 0.22.1", "futures-channel", "futures-util", "gloo-net", - "http", + "http 1.1.0", "jsonrpsee-core", "pin-project", - "rustls-native-certs 0.7.0", + "rustls", "rustls-pki-types", + "rustls-platform-verifier", "soketto", "thiserror", "tokio", - "tokio-rustls 0.25.0", + "tokio-rustls", "tokio-util", "tracing", "url", - "webpki-roots", ] [[package]] name = "jsonrpsee-core" -version = "0.21.0" +version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "776d009e2f591b78c038e0d053a796f94575d66ca4e77dd84bfc5e81419e436c" +checksum = "79712302e737d23ca0daa178e752c9334846b08321d439fd89af9a384f8c830b" dependencies = [ "anyhow", - "async-lock", "async-trait", "beef", + "bytes", "futures-timer", "futures-util", - "hyper", + "http 1.1.0", + "http-body 1.0.0", + "http-body-util", "jsonrpsee-types", "parking_lot", "pin-project", @@ -3107,15 +3315,20 @@ dependencies = [ [[package]] name = "jsonrpsee-http-client" -version = "0.21.0" +version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78b7de9f3219d95985eb77fd03194d7c1b56c19bce1abfcc9d07462574b15572" +checksum = "2d90064e04fb9d7282b1c71044ea94d0bbc6eff5621c66f1a0bce9e9de7cf3ac" dependencies = [ "async-trait", - "hyper", + "base64 0.22.1", + "http-body 1.0.0", + "hyper 1.3.1", "hyper-rustls", + "hyper-util", "jsonrpsee-core", "jsonrpsee-types", + "rustls", + "rustls-platform-verifier", "serde", "serde_json", "thiserror", @@ -3127,26 +3340,30 @@ dependencies = [ [[package]] name = "jsonrpsee-proc-macros" -version = "0.21.0" +version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d94b7505034e2737e688e1153bf81e6f93ad296695c43958d6da2e4321f0a990" +checksum = "7895f186d5921065d96e16bd795e5ca89ac8356ec423fafc6e3d7cf8ec11aee4" dependencies = [ - "heck 0.4.1", - "proc-macro-crate 2.0.1", + "heck 0.5.0", + "proc-macro-crate 3.1.0", "proc-macro2 1.0.69", "quote 1.0.33", - "syn 1.0.109", + "syn 2.0.38", ] [[package]] name = "jsonrpsee-server" -version = "0.21.0" +version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cc7c6d1a2c58f6135810284a390d9f823d0f508db74cd914d8237802de80f98" +checksum = "654afab2e92e5d88ebd8a39d6074483f3f2bfdf91c5ac57fe285e7127cdd4f51" dependencies = [ + "anyhow", "futures-util", - "http", - "hyper", + "http 1.1.0", + "http-body 1.0.0", + "http-body-util", + "hyper 1.3.1", + "hyper-util", "jsonrpsee-core", "jsonrpsee-types", "pin-project", @@ -3164,12 +3381,12 @@ dependencies = [ [[package]] name = "jsonrpsee-types" -version = "0.21.0" +version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3266dfb045c9174b24c77c2dfe0084914bb23a6b2597d70c9dc6018392e1cd1b" +checksum = "d9c465fbe385238e861fdc4d1c85e04ada6c1fd246161d26385c1b311724d2af" dependencies = [ - "anyhow", "beef", + "http 1.1.0", "serde", "serde_json", "thiserror", @@ -3177,9 +3394,9 @@ dependencies = [ [[package]] name = "jsonrpsee-wasm-client" -version = "0.21.0" +version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30f36d27503d0efc0355c1630b74ecfb367050847bf7241a0ed75fab6dfa96c0" +checksum = "4727ac037f834c6f04c0912cada7532dbddb54e92fbc64e33d6cb8c24af313c9" dependencies = [ "jsonrpsee-client-transport", "jsonrpsee-core", @@ -3188,11 +3405,11 @@ dependencies = [ [[package]] name = "jsonrpsee-ws-client" -version = "0.21.0" +version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "073c077471e89c4b511fa88b3df9a0f0abdf4a0a2e6683dd2ab36893af87bb2d" +checksum = "1c28759775f5cb2f1ea9667672d3fe2b0e701d1f4b7b67954e60afe7fd058b5e" dependencies = [ - "http", + "http 1.1.0", "jsonrpsee-client-transport", "jsonrpsee-core", "jsonrpsee-types", @@ -3201,13 +3418,14 @@ dependencies = [ [[package]] name = "jsonwebtoken" -version = "8.3.0" +version = "9.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6971da4d9c3aa03c3d8f3ff0f4155b534aad021292003895a469716b2a230378" +checksum = "b9ae10193d25051e74945f1ea2d0b42e03cc3b890f7e4cc5faa44997d808193f" dependencies = [ "base64 0.21.5", + "js-sys", "pem", - "ring 0.16.20", + "ring", "serde", "serde_json", "simple_asn1", @@ -3227,13 +3445,13 @@ dependencies = [ [[package]] name = "k256" -version = "0.13.2" +version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f01b677d82ef7a676aa37e099defd83a28e15687112cafdd112d60236b6115b" +checksum = "956ff9b67e26e1a6a866cb758f12c6f8746208489e3e4a4b5580802f2f0a587b" dependencies = [ "cfg-if 1.0.0", "ecdsa 0.16.9", - "elliptic-curve 0.13.7", + "elliptic-curve 0.13.8", "once_cell", "sha2 0.10.8", "signature 2.2.0", @@ -3248,29 +3466,13 @@ dependencies = [ "cpufeatures", ] -[[package]] -name = "kzg" -version = "0.1.50" -source = "git+https://github.com/matter-labs/era-zkevm_test_harness.git?branch=v1.5.0#a9b1c3a3cf46e683d6a27db33805d994ca8476ec" -dependencies = [ - "boojum", - "derivative", - "hex", - "once_cell", - "rayon", - "serde", - "serde_json", - "serde_with", - "zkevm_circuits 1.5.0", -] - [[package]] name = "lazy_static" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" dependencies = [ - "spin 0.5.2", + "spin", ] [[package]] @@ -3313,7 +3515,7 @@ version = "0.11.0+8.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3386f101bcb4bd252d8e9d2fb41ec3b0862a15a62b478c355b2982efa469e3e" dependencies = [ - "bindgen", + "bindgen 0.65.1", "bzip2-sys", "cc", "glob", @@ -3388,10 +3590,9 @@ dependencies = [ "hex", "num", "once_cell", - "prometheus_exporter", "rand 0.8.5", "regex", - "reqwest", + "reqwest 0.12.5", "serde", "serde_json", "static_assertions", @@ -3399,7 +3600,6 @@ dependencies = [ "tokio", "tracing", "vise", - "vlog", "zksync_config", "zksync_contracts", "zksync_eth_client", @@ -3407,6 +3607,7 @@ dependencies = [ "zksync_system_constants", "zksync_types", "zksync_utils", + "zksync_vlog", "zksync_web3_decl", ] @@ -3483,15 +3684,6 @@ dependencies = [ "libc", ] -[[package]] -name = "mach2" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d0d1830bcd151a6fc4aea1369af235b36c1528fe976b8ff678683c9995eade8" -dependencies = [ - "libc", -] - [[package]] name = "match_cfg" version = "0.1.0" @@ -3544,15 +3736,6 @@ dependencies = [ "autocfg", ] -[[package]] -name = "memoffset" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c" -dependencies = [ - "autocfg", -] - [[package]] name = "merkle_tree_consistency_checker" version = "0.1.0" @@ -3560,67 +3743,12 @@ dependencies = [ "anyhow", "clap 4.4.6", "tracing", - "vlog", "zksync_config", "zksync_env_config", "zksync_merkle_tree", "zksync_storage", "zksync_types", -] - -[[package]] -name = "metrics" -version = "0.21.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fde3af1a009ed76a778cb84fdef9e7dbbdf5775ae3e4cc1f434a6a307f6f76c5" -dependencies = [ - "ahash 0.8.7", - "metrics-macros", - "portable-atomic", -] - -[[package]] -name = "metrics-exporter-prometheus" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a4964177ddfdab1e3a2b37aec7cf320e14169abb0ed73999f558136409178d5" -dependencies = [ - "base64 0.21.5", - "hyper", - "indexmap 1.9.3", - "ipnet", - "metrics", - "metrics-util", - "quanta 0.11.1", - "thiserror", - "tokio", - "tracing", -] - -[[package]] -name = "metrics-macros" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddece26afd34c31585c74a4db0630c376df271c285d682d1e55012197830b6df" -dependencies = [ - "proc-macro2 1.0.69", - "quote 1.0.33", - "syn 2.0.38", -] - -[[package]] -name = "metrics-util" -version = "0.15.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4de2ed6e491ed114b40b732e4d1659a9d53992ebd87490c44a6ffe23739d973e" -dependencies = [ - "crossbeam-epoch 0.9.15", - "crossbeam-utils 0.8.16", - "hashbrown 0.13.1", - "metrics", - "num_cpus", - "quanta 0.11.1", - "sketches-ddsketch", + "zksync_vlog", ] [[package]] @@ -3668,8 +3796,8 @@ version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "23e0b72e7c9042467008b10279fc732326bd605459ae03bda88825909dd19b56" dependencies = [ - "crossbeam-channel 0.5.8", - "crossbeam-utils 0.8.16", + "crossbeam-channel 0.5.13", + "crossbeam-utils 0.8.20", "dashmap", "skeptic", "smallvec", @@ -3703,6 +3831,12 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "mirai-annotations" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c9be0862c1b3f26a88803c4a49de6889c10e608b3ee9344e6ef5b45fb37ad3d1" + [[package]] name = "multimap" version = "0.8.3" @@ -3710,45 +3844,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" [[package]] -name = "multivm" -version = "0.1.0" -dependencies = [ - "anyhow", - "circuit_sequencer_api 0.1.0", - "circuit_sequencer_api 0.1.40", - "circuit_sequencer_api 0.1.41", - "circuit_sequencer_api 0.1.42", - "circuit_sequencer_api 0.1.50", - "ethabi", - "hex", - "itertools 0.10.5", - "once_cell", - "pretty_assertions", - "serde", - "thiserror", - "tokio", - "tracing", - "vise", - "vm2", - "zk_evm 1.3.1", - "zk_evm 1.3.3 (git+https://github.com/matter-labs/era-zk_evm.git?tag=v1.3.3-rc2)", - "zk_evm 1.4.0", - "zk_evm 1.4.1", - "zk_evm 1.5.0", - "zksync_contracts", - "zksync_eth_signer", - "zksync_state", - "zksync_system_constants", - "zksync_test_account", - "zksync_types", - "zksync_utils", -] - -[[package]] -name = "native-tls" -version = "0.2.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07226173c32f2926027b63cce4bcd8076c3552846cbe7925f3aaffeac0a3b92e" +name = "native-tls" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07226173c32f2926027b63cce4bcd8076c3552846cbe7925f3aaffeac0a3b92e" dependencies = [ "lazy_static", "libc", @@ -3768,7 +3867,7 @@ version = "0.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2eb04e9c688eff1c89d72b407f168cf79bb9e867a9d3323ed6c01519eb9cc053" dependencies = [ - "bitflags 2.4.1", + "bitflags 2.6.0", "cfg-if 1.0.0", "libc", ] @@ -3813,11 +3912,11 @@ dependencies = [ [[package]] name = "num" -version = "0.4.1" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b05180d69e3da0e530ba2a1dae5110317e49e3b7f3d41be227dc5f92e49ee7af" +checksum = "35bd024e8b2ff75562e5f34e7f4905839deb4b22955ef5e73d2fea1b9813cb23" dependencies = [ - "num-bigint 0.4.4", + "num-bigint 0.4.6", "num-complex", "num-integer", "num-iter", @@ -3838,11 +3937,10 @@ dependencies = [ [[package]] name = "num-bigint" -version = "0.4.4" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "608e7659b5c3d7cba262d894801b9ec9d00de989e8a82bd4bef91d08da45cdc0" +checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" dependencies = [ - "autocfg", "num-integer", "num-traits", "serde", @@ -3867,9 +3965,9 @@ dependencies = [ [[package]] name = "num-complex" -version = "0.4.4" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ba157ca0885411de85d6ca030ba7e2a83a28636056c7c699b07c8b6f7383214" +checksum = "73f88a1307638156682bada9d7604135552957b7818057dcef22705b4d509495" dependencies = [ "num-traits", "serde", @@ -3888,19 +3986,18 @@ dependencies = [ [[package]] name = "num-integer" -version = "0.1.45" +version = "0.1.46" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" +checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" dependencies = [ - "autocfg", "num-traits", ] [[package]] name = "num-iter" -version = "0.1.43" +version = "0.1.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d03e6c028c5dc5cac6e2dec0efda81fc887605bb3d884578bb6d6bf7514e252" +checksum = "1429034a0490724d0075ebb2bc9e875d6503c3cf69e235a8941aa757d83ef5bf" dependencies = [ "autocfg", "num-integer", @@ -3919,12 +4016,11 @@ dependencies = [ [[package]] name = "num-rational" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0638a1c9d0a3c0914158145bc76cff373a75a627e6ecbfb71cbe6f453a5a19b0" +checksum = "f83d14da390562dca69fc84082e73e548e1ad308d24accdedd2720017cb37824" dependencies = [ - "autocfg", - "num-bigint 0.4.4", + "num-bigint 0.4.6", "num-integer", "num-traits", "serde", @@ -3932,9 +4028,9 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.18" +version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da0df0e5185db44f69b44f26786fe401b6c293d1907744beaa7fa62b2e5a517a" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" dependencies = [ "autocfg", "libm", @@ -3986,7 +4082,7 @@ version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "681030a937600a36906c185595136d26abfebb4aa9c65701cefcaf8578bb982b" dependencies = [ - "proc-macro-crate 2.0.1", + "proc-macro-crate 3.1.0", "proc-macro2 1.0.69", "quote 1.0.33", "syn 2.0.38", @@ -4021,11 +4117,11 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "openssl" -version = "0.10.57" +version = "0.10.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bac25ee399abb46215765b1cb35bc0212377e58a061560d8b29b024fd0430e7c" +checksum = "9529f4786b70a3e8c61e11179af17ab6188ad8d0ded78c5529441ed39d4bd9c1" dependencies = [ - "bitflags 2.4.1", + "bitflags 2.6.0", "cfg-if 1.0.0", "foreign-types", "libc", @@ -4053,9 +4149,9 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-sys" -version = "0.9.93" +version = "0.9.103" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db4d56a4c0478783083cfafcc42493dd4a981d41669da64b4572a2a089b51b1d" +checksum = "7f9e8deee91df40a943c71b917e5874b951d32a802526c85721ce3b776c929d6" dependencies = [ "cc", "libc", @@ -4081,9 +4177,9 @@ checksum = "c7594ec0e11d8e33faf03530a4c49af7064ebba81c1480e01be67d90b356508b" dependencies = [ "async-trait", "bytes", - "http", + "http 0.2.9", "opentelemetry_api", - "reqwest", + "reqwest 0.11.22", ] [[package]] @@ -4094,14 +4190,14 @@ checksum = "7e5e5a5c4135864099f3faafbe939eb4d7f9b80ebf68a8448da961b32a7c1275" dependencies = [ "async-trait", "futures-core", - "http", + "http 0.2.9", "opentelemetry-http", "opentelemetry-proto", "opentelemetry-semantic-conventions", "opentelemetry_api", "opentelemetry_sdk", "prost 0.11.9", - "reqwest", + "reqwest 0.11.22", "thiserror", "tokio", "tonic", @@ -4151,7 +4247,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fa8e705a0612d48139799fcbaba0d4a90f06277153e43dd2bdc16c6f0edd8026" dependencies = [ "async-trait", - "crossbeam-channel 0.5.8", + "crossbeam-channel 0.5.13", "futures-channel", "futures-executor", "futures-util", @@ -4215,40 +4311,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c9863ad85fa8f4460f9c48cb909d38a0d689dba1f6f6988a5e3e0d31071bcd4b" dependencies = [ "ecdsa 0.16.9", - "elliptic-curve 0.13.7", + "elliptic-curve 0.13.8", "primeorder", "sha2 0.10.8", ] [[package]] name = "pairing_ce" -version = "0.28.5" +version = "0.28.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db007b21259660d025918e653508f03050bf23fb96a88601f9936329faadc597" -dependencies = [ - "byteorder", - "cfg-if 1.0.0", - "ff_ce", - "rand 0.4.6", - "serde", -] - -[[package]] -name = "pairing_ce" -version = "0.28.5" -source = "git+https://github.com/matter-labs/pairing.git?rev=d24f2c5871089c4cd4f54c0ca266bb9fef6115eb#d24f2c5871089c4cd4f54c0ca266bb9fef6115eb" -dependencies = [ - "byteorder", - "cfg-if 1.0.0", - "ff_ce", - "rand 0.4.6", - "serde", -] - -[[package]] -name = "pairing_ce" -version = "0.28.5" -source = "git+https://github.com/matter-labs/pairing.git#d24f2c5871089c4cd4f54c0ca266bb9fef6115eb" +checksum = "843b5b6fb63f00460f611dbc87a50bbbb745f0dfe5cbf67ca89299c79098640e" dependencies = [ "byteorder", "cfg-if 1.0.0", @@ -4283,12 +4355,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "parking" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb813b8af86854136c6922af0598d719255ecb2179515e6e7730d468f05c9cae" - [[package]] name = "parking_lot" version = "0.12.1" @@ -4326,11 +4392,12 @@ checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" [[package]] name = "pem" -version = "1.1.1" +version = "3.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8835c273a76a90455d7344889b0964598e3316e2a79ede8e36f16bdcf2228b8" +checksum = "8e459365e590736a54c3fa561947c84837534b8e9af6fc5bf781307e82658fae" dependencies = [ - "base64 0.13.1", + "base64 0.22.1", + "serde", ] [[package]] @@ -4523,12 +4590,6 @@ dependencies = [ "universal-hash", ] -[[package]] -name = "portable-atomic" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b559898e0b4931ed2d3b959ab0c2da4d99cc644c4b0b1a35b4d344027f474023" - [[package]] name = "powerfmt" version = "0.2.0" @@ -4567,7 +4628,7 @@ version = "0.13.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "353e1ca18966c16d9deb1c69278edbc5f194139612772bd9537af60ac231e1e6" dependencies = [ - "elliptic-curve 0.13.7", + "elliptic-curve 0.13.8", ] [[package]] @@ -4595,14 +4656,22 @@ dependencies = [ [[package]] name = "proc-macro-crate" -version = "2.0.1" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97dc5fea232fc28d2f597b37c4876b348a40e33f3b02cc975c8d006d78d94b1a" +checksum = "7e8366a6159044a37876a2b9817124296703c586a5c92e2c53751fa06d8d43e8" dependencies = [ - "toml_datetime", "toml_edit 0.20.2", ] +[[package]] +name = "proc-macro-crate" +version = "3.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d37c51ca738a55da99dc0c4a34860fd675453b8b36209178c2249bb13651284" +dependencies = [ + "toml_edit 0.21.1", +] + [[package]] name = "proc-macro-error" version = "1.0.4" @@ -4674,18 +4743,6 @@ dependencies = [ "syn 2.0.38", ] -[[package]] -name = "prometheus_exporter" -version = "0.1.0" -dependencies = [ - "anyhow", - "metrics", - "metrics-exporter-prometheus", - "tokio", - "vise", - "vise-exporter", -] - [[package]] name = "prost" version = "0.11.9" @@ -4714,7 +4771,7 @@ checksum = "8bdf592881d821b83d471f8af290226c8d51402259e9bb5be7f9f8bdebbb11ac" dependencies = [ "bytes", "heck 0.4.1", - "itertools 0.10.5", + "itertools 0.11.0", "log", "multimap", "once_cell", @@ -4748,7 +4805,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "265baba7fabd416cf5078179f7d2cbeca4ce7a9041111900675ea7c4cb8a4c32" dependencies = [ "anyhow", - "itertools 0.10.5", + "itertools 0.11.0", "proc-macro2 1.0.69", "quote 1.0.33", "syn 2.0.38", @@ -4806,16 +4863,6 @@ dependencies = [ "thiserror", ] -[[package]] -name = "prover_dal" -version = "0.1.0" -dependencies = [ - "sqlx", - "strum", - "zksync_basic_types", - "zksync_db_connection", -] - [[package]] name = "ptr_meta" version = "0.1.4" @@ -4853,7 +4900,7 @@ version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "20afe714292d5e879d8b12740aa223c6a88f118af41870e8b6196e39a02238a8" dependencies = [ - "crossbeam-utils 0.8.16", + "crossbeam-utils 0.8.20", "libc", "mach", "once_cell", @@ -4864,20 +4911,10 @@ dependencies = [ ] [[package]] -name = "quanta" -version = "0.11.1" +name = "quick-error" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a17e662a7a8291a865152364c20c7abc5e60486ab2001e8ec10b24862de0b9ab" -dependencies = [ - "crossbeam-utils 0.8.16", - "libc", - "mach2", - "once_cell", - "raw-cpuid", - "wasi 0.11.0+wasi-snapshot-preview1", - "web-sys", - "winapi", -] +checksum = "a993555f31e5a609f617c12db6250dedcac1b0a85076912c436e6fc9b2c8e6a3" [[package]] name = "quick-protobuf" @@ -4990,9 +5027,9 @@ dependencies = [ [[package]] name = "rayon" -version = "1.8.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c27db03db7734835b3f53954b534c91069375ce6ccaa2e065441e07d9b6cdb1" +checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa" dependencies = [ "either", "rayon-core", @@ -5000,12 +5037,12 @@ dependencies = [ [[package]] name = "rayon-core" -version = "1.12.0" +version = "1.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ce3fb6ad83f861aac485e76e1985cd109d9a3713802152be56c3b1f0e0658ed" +checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2" dependencies = [ - "crossbeam-deque 0.8.3", - "crossbeam-utils 0.8.16", + "crossbeam-deque 0.8.5", + "crossbeam-utils 0.8.20", ] [[package]] @@ -5099,11 +5136,53 @@ dependencies = [ "encoding_rs", "futures-core", "futures-util", - "h2", - "http", - "http-body", - "hyper", - "hyper-tls", + "h2 0.3.26", + "http 0.2.9", + "http-body 0.4.6", + "hyper 0.14.29", + "hyper-tls 0.5.0", + "ipnet", + "js-sys", + "log", + "mime", + "native-tls", + "once_cell", + "percent-encoding", + "pin-project-lite", + "serde", + "serde_json", + "serde_urlencoded", + "system-configuration", + "tokio", + "tokio-native-tls", + "tower-service", + "url", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", + "winreg 0.50.0", +] + +[[package]] +name = "reqwest" +version = "0.12.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7d6d2a27d57148378eb5e111173f4276ad26340ecc5c49a4a2152167a2d6a37" +dependencies = [ + "base64 0.22.1", + "bytes", + "encoding_rs", + "futures-channel", + "futures-core", + "futures-util", + "h2 0.4.5", + "http 1.1.0", + "http-body 1.0.0", + "http-body-util", + "hyper 1.3.1", + "hyper-rustls", + "hyper-tls 0.6.0", + "hyper-util", "ipnet", "js-sys", "log", @@ -5113,9 +5192,11 @@ dependencies = [ "once_cell", "percent-encoding", "pin-project-lite", + "rustls-pemfile", "serde", "serde_json", "serde_urlencoded", + "sync_wrapper 1.0.1", "system-configuration", "tokio", "tokio-native-tls", @@ -5126,17 +5207,33 @@ dependencies = [ "wasm-bindgen-futures", "wasm-streams", "web-sys", - "winreg", + "winreg 0.52.0", +] + +[[package]] +name = "reqwest-middleware" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "39346a33ddfe6be00cbc17a34ce996818b97b230b87229f10114693becca1268" +dependencies = [ + "anyhow", + "async-trait", + "http 1.1.0", + "reqwest 0.12.5", + "serde", + "thiserror", + "tower-service", ] [[package]] name = "rescue_poseidon" version = "0.4.1" -source = "git+https://github.com/matter-labs/rescue-poseidon#d059b5042df5ed80e151f05751410b524a54d16c" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ada2124f92cf32b813e50f6f7d9e92f05addc321edb8b68f9b4e2bb6e0d5af8b" dependencies = [ "addchain", "arrayvec 0.7.4", - "blake2 0.10.6 (registry+https://github.com/rust-lang/crates.io-index)", + "blake2 0.10.6", "byteorder", "franklin-crypto", "num-bigint 0.3.3", @@ -5170,21 +5267,6 @@ dependencies = [ "subtle", ] -[[package]] -name = "ring" -version = "0.16.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3053cf52e236a3ed746dfc745aa9cacf1b791d846bdaf412f60a8d7d6e17c8fc" -dependencies = [ - "cc", - "libc", - "once_cell", - "spin 0.5.2", - "untrusted 0.7.1", - "web-sys", - "winapi", -] - [[package]] name = "ring" version = "0.17.7" @@ -5194,8 +5276,8 @@ dependencies = [ "cc", "getrandom", "libc", - "spin 0.9.8", - "untrusted 0.9.0", + "spin", + "untrusted", "windows-sys 0.48.0", ] @@ -5323,7 +5405,7 @@ version = "0.38.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "67ce50cb2e16c2903e30d1cbccfd8387a74b9d4c938b6a4c5ec6cc7556f7a8a0" dependencies = [ - "bitflags 2.4.1", + "bitflags 2.6.0", "errno", "libc", "linux-raw-sys", @@ -5332,42 +5414,20 @@ dependencies = [ [[package]] name = "rustls" -version = "0.21.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fecbfb7b1444f477b345853b1fce097a2c6fb637b2bfb87e6bc5db0f043fae4" -dependencies = [ - "log", - "ring 0.17.7", - "rustls-webpki 0.101.7", - "sct", -] - -[[package]] -name = "rustls" -version = "0.22.4" +version = "0.23.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf4ef73721ac7bcd79b2b315da7779d8fc09718c6b3d2d1b2d94850eb8c18432" +checksum = "05cff451f60db80f490f3c182b77c35260baace73209e9cdbbe526bfe3a4d402" dependencies = [ + "aws-lc-rs", "log", - "ring 0.17.7", + "once_cell", + "ring", "rustls-pki-types", - "rustls-webpki 0.102.2", + "rustls-webpki", "subtle", "zeroize", ] -[[package]] -name = "rustls-native-certs" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9aace74cb666635c918e9c12bc0d348266037aa8eb599b5cba565709a8dff00" -dependencies = [ - "openssl-probe", - "rustls-pemfile 1.0.3", - "schannel", - "security-framework", -] - [[package]] name = "rustls-native-certs" version = "0.7.0" @@ -5375,21 +5435,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f1fb85efa936c42c6d5fc28d2629bb51e4b2f4b8a5211e297d599cc5a093792" dependencies = [ "openssl-probe", - "rustls-pemfile 2.0.0", + "rustls-pemfile", "rustls-pki-types", "schannel", "security-framework", ] -[[package]] -name = "rustls-pemfile" -version = "1.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d3987094b1d07b653b7dfdc3f70ce9a1da9c51ac18c1b06b662e4f9a0e9f4b2" -dependencies = [ - "base64 0.21.5", -] - [[package]] name = "rustls-pemfile" version = "2.0.0" @@ -5402,29 +5453,47 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.4.1" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecd36cc4259e3e4514335c4a138c6b43171a8d61d8f5c9348f9fc7529416f247" +checksum = "976295e77ce332211c0d24d92c0e83e50f5c5f046d11082cea19f3df13a3562d" [[package]] -name = "rustls-webpki" -version = "0.101.7" +name = "rustls-platform-verifier" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" +checksum = "3e3beb939bcd33c269f4bf946cc829fcd336370267c4a927ac0399c84a3151a1" dependencies = [ - "ring 0.17.7", - "untrusted 0.9.0", + "core-foundation", + "core-foundation-sys", + "jni", + "log", + "once_cell", + "rustls", + "rustls-native-certs", + "rustls-platform-verifier-android", + "rustls-webpki", + "security-framework", + "security-framework-sys", + "webpki-roots", + "winapi", ] +[[package]] +name = "rustls-platform-verifier-android" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "84e217e7fdc8466b5b35d30f8c0a30febd29173df4a3a0c2115d306b9c4117ad" + [[package]] name = "rustls-webpki" -version = "0.102.2" +version = "0.102.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "faaa0a62740bedb9b2ef5afa303da42764c012f743917351dc9a237ea1663610" +checksum = "ff448f7e92e913c4b7d4c6d8e4540a1724b319b4152b8aef6d4cf8339712b33e" dependencies = [ - "ring 0.17.7", + "aws-lc-rs", + "ring", "rustls-pki-types", - "untrusted 0.9.0", + "untrusted", ] [[package]] @@ -5463,16 +5532,6 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" -[[package]] -name = "sct" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d53dcdb7c9f8158937a7981b48accfd39a43af418591a5d008c7b22b5e1b7ca4" -dependencies = [ - "ring 0.16.20", - "untrusted 0.7.1", -] - [[package]] name = "seahash" version = "4.1.0" @@ -5536,22 +5595,23 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.9.2" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05b64fb303737d99b81884b2c63433e9ae28abebe5eb5045dcdd175dc2ecf4de" +checksum = "c627723fd09706bacdb5cf41499e95098555af3c3c29d014dc3c458ef6be11c0" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.6.0", "core-foundation", "core-foundation-sys", "libc", + "num-bigint 0.4.6", "security-framework-sys", ] [[package]] name = "security-framework-sys" -version = "2.9.1" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e932934257d3b408ed8f30db49d85ea163bfe74961f017f405b025af298f0c7a" +checksum = "317936bbbd05227752583946b9e66d7ce3b489f84e11a94a510b4437fef407d7" dependencies = [ "core-foundation-sys", "libc", @@ -5580,7 +5640,7 @@ checksum = "0097a48cd1999d983909f07cb03b15241c5af29e5e679379efac1c06296abecc" dependencies = [ "httpdate", "native-tls", - "reqwest", + "reqwest 0.11.22", "sentry-backtrace", "sentry-contexts", "sentry-core", @@ -5786,19 +5846,6 @@ dependencies = [ "unsafe-libyaml", ] -[[package]] -name = "sha-1" -version = "0.9.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99cd6713db3cf16b6c84e06321e049a9b9f699826e16096d23bbcc44d15d51a6" -dependencies = [ - "block-buffer 0.9.0", - "cfg-if 1.0.0", - "cpufeatures", - "digest 0.9.0", - "opaque-debug", -] - [[package]] name = "sha1" version = "0.10.6" @@ -5825,8 +5872,9 @@ dependencies = [ [[package]] name = "sha2" -version = "0.10.6" -source = "git+https://github.com/RustCrypto/hashes.git?rev=1731ced4a116d61ba9dc6ee6d0f38fb8102e357a#1731ced4a116d61ba9dc6ee6d0f38fb8102e357a" +version = "0.10.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" dependencies = [ "cfg-if 1.0.0", "cpufeatures", @@ -5834,10 +5882,10 @@ dependencies = [ ] [[package]] -name = "sha2" -version = "0.10.8" +name = "sha2_ce" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" +checksum = "eca2daa77078f4ddff27e75c4bf59e4c2697525f56dbb3c842d34a5d1f2b04a2" dependencies = [ "cfg-if 1.0.0", "cpufeatures", @@ -5858,18 +5906,19 @@ dependencies = [ [[package]] name = "sha3" -version = "0.10.6" -source = "git+https://github.com/RustCrypto/hashes.git?rev=7a187e934c1f6c68e4b4e5cf37541b7a0d64d303#7a187e934c1f6c68e4b4e5cf37541b7a0d64d303" +version = "0.10.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75872d278a8f37ef87fa0ddbda7802605cb18344497949862c0d4dcb291eba60" dependencies = [ "digest 0.10.7", "keccak", ] [[package]] -name = "sha3" -version = "0.10.8" +name = "sha3_ce" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75872d278a8f37ef87fa0ddbda7802605cb18344497949862c0d4dcb291eba60" +checksum = "34c9a08202c50378d8a07a5f458193a5f542d2828ac6640263dbc0c2533ea25e" dependencies = [ "digest 0.10.7", "keccak", @@ -5937,7 +5986,7 @@ version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "adc4e5204eb1910f40f9cfa375f6f05b68c3abac4b6fd879c8ff5e7ae8a0a085" dependencies = [ - "num-bigint 0.4.4", + "num-bigint 0.4.6", "num-traits", "thiserror", "time", @@ -5968,12 +6017,6 @@ dependencies = [ "walkdir", ] -[[package]] -name = "sketches-ddsketch" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68a406c1882ed7f29cd5e248c9848a80e7cb6ae0fea82346d2746f2f941c07e1" - [[package]] name = "slab" version = "0.4.9" @@ -5998,17 +6041,18 @@ version = "0.1.0" dependencies = [ "anyhow", "futures 0.3.28", - "prometheus_exporter", "rand 0.8.5", + "structopt", "tokio", "tracing", "vise", - "vlog", "zksync_config", + "zksync_core_leftovers", "zksync_dal", "zksync_env_config", "zksync_object_store", "zksync_types", + "zksync_vlog", ] [[package]] @@ -6018,7 +6062,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "850948bee068e713b8ab860fe1adc4d109676ab4c3b621fd8147f06b261f2f85" dependencies = [ "aes-gcm", - "blake2 0.10.6 (registry+https://github.com/rust-lang/crates.io-index)", + "blake2 0.10.6", "chacha20poly1305", "curve25519-dalek", "rand_core 0.6.4", @@ -6039,26 +6083,20 @@ dependencies = [ [[package]] name = "soketto" -version = "0.7.1" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41d1c5305e39e09653383c2c7244f2f78b3bcae37cf50c64cb4789c9f5096ec2" +checksum = "37468c595637c10857701c990f93a40ce0e357cedb0953d1c26c8d8027f9bb53" dependencies = [ - "base64 0.13.1", + "base64 0.22.1", "bytes", "futures 0.3.28", - "http", + "http 1.1.0", "httparse", "log", "rand 0.8.5", - "sha-1", + "sha1", ] -[[package]] -name = "spin" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" - [[package]] name = "spin" version = "0.9.8" @@ -6131,10 +6169,10 @@ dependencies = [ "bytes", "chrono", "crc", - "crossbeam-queue 0.3.8", + "crossbeam-queue 0.3.11", "dotenvy", "either", - "event-listener 2.5.3", + "event-listener", "futures-channel", "futures-core", "futures-intrusive", @@ -6212,7 +6250,7 @@ dependencies = [ "atoi", "base64 0.21.5", "bigdecimal", - "bitflags 2.4.1", + "bitflags 2.6.0", "byteorder", "bytes", "chrono", @@ -6257,7 +6295,7 @@ dependencies = [ "atoi", "base64 0.21.5", "bigdecimal", - "bitflags 2.4.1", + "bitflags 2.6.0", "byteorder", "chrono", "crc", @@ -6276,7 +6314,7 @@ dependencies = [ "log", "md-5", "memchr", - "num-bigint 0.4.4", + "num-bigint 0.4.6", "once_cell", "rand 0.8.5", "rust_decimal", @@ -6454,6 +6492,12 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" +[[package]] +name = "sync_wrapper" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7065abeca94b6a8a577f9bd45aa0867a2238b74e8eb67cf10d492bc39351394" + [[package]] name = "system-configuration" version = "0.5.1" @@ -6479,12 +6523,12 @@ dependencies = [ name = "system-constants-generator" version = "0.1.0" dependencies = [ - "codegen 0.2.0", - "multivm", + "codegen", "once_cell", "serde", "serde_json", "zksync_contracts", + "zksync_multivm", "zksync_state", "zksync_types", "zksync_utils", @@ -6516,12 +6560,21 @@ dependencies = [ ] [[package]] -name = "test-casing" -version = "0.1.2" +name = "termcolor" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2378d657757969a2cec9ec4eb616be8f01be98c21c6467991f91cb182e4653b" +checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755" dependencies = [ - "test-casing-macro", + "winapi-util", +] + +[[package]] +name = "test-casing" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2378d657757969a2cec9ec4eb616be8f01be98c21c6467991f91cb182e4653b" +dependencies = [ + "test-casing-macro", ] [[package]] @@ -6702,6 +6755,19 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" +[[package]] +name = "tls-listener" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83a296135fdab7b3a1f708c338c50bab570bcd77d44080cde9341df45c0c6d73" +dependencies = [ + "futures-util", + "pin-project-lite", + "thiserror", + "tokio", + "tokio-rustls", +] + [[package]] name = "tokio" version = "1.34.0" @@ -6754,21 +6820,11 @@ dependencies = [ [[package]] name = "tokio-rustls" -version = "0.24.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" -dependencies = [ - "rustls 0.21.11", - "tokio", -] - -[[package]] -name = "tokio-rustls" -version = "0.25.0" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "775e0c0f0adb3a2f22a00c4745d728b479985fc15ee7ca6a2608388c5569860f" +checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" dependencies = [ - "rustls 0.22.4", + "rustls", "rustls-pki-types", "tokio", ] @@ -6782,6 +6838,7 @@ dependencies = [ "futures-core", "pin-project-lite", "tokio", + "tokio-util", ] [[package]] @@ -6801,9 +6858,9 @@ dependencies = [ [[package]] name = "toml_datetime" -version = "0.6.3" +version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cda73e2f1397b1262d6dfdcef8aafae14d1de7748d66822d3bfeeb6d03e5e4b" +checksum = "4badfd56924ae69bcc9039335b2e017639ce3f9b001c393c1b2d1ef846ce2cbf" [[package]] name = "toml_edit" @@ -6827,6 +6884,17 @@ dependencies = [ "winnow", ] +[[package]] +name = "toml_edit" +version = "0.21.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a8534fd7f78b5405e860340ad6575217ce99f38d4d5c8f2442cb5ecb50090e1" +dependencies = [ + "indexmap 2.1.0", + "toml_datetime", + "winnow", +] + [[package]] name = "tonic" version = "0.9.2" @@ -6834,15 +6902,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3082666a3a6433f7f511c7192923fa1fe07c69332d3c6a2e6bb040b569199d5a" dependencies = [ "async-trait", - "axum", + "axum 0.6.20", "base64 0.21.5", "bytes", "futures-core", "futures-util", - "h2", - "http", - "http-body", - "hyper", + "h2 0.3.26", + "http 0.2.9", + "http-body 0.4.6", + "hyper 0.14.29", "hyper-timeout", "percent-encoding", "pin-project", @@ -6863,7 +6931,6 @@ checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" dependencies = [ "futures-core", "futures-util", - "hdrhistogram", "indexmap 1.9.3", "pin-project", "pin-project-lite", @@ -6878,32 +6945,19 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.4.4" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61c5bb1d698276a2443e5ecfabc1008bf15a36c12e6a7176e7bf089ea9131140" +checksum = "1e9cd434a998747dd2c4276bc96ee2e0c7a2eadf3cae88e52be55a05fa9053f5" dependencies = [ - "async-compression", - "base64 0.21.5", - "bitflags 2.4.1", + "bitflags 2.6.0", "bytes", - "futures-core", - "futures-util", - "http", - "http-body", - "http-range-header", - "httpdate", - "iri-string", - "mime", - "mime_guess", - "percent-encoding", + "http 1.1.0", + "http-body 1.0.0", + "http-body-util", "pin-project-lite", "tokio", - "tokio-util", - "tower", "tower-layer", "tower-service", - "tracing", - "uuid", ] [[package]] @@ -7022,6 +7076,21 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" +[[package]] +name = "trybuild" +version = "1.0.86" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8419ecd263363827c5730386f418715766f584e2f874d32c23c5b00bd9727e7e" +dependencies = [ + "basic-toml", + "glob", + "once_cell", + "serde", + "serde_derive", + "serde_json", + "termcolor", +] + [[package]] name = "typenum" version = "1.17.0" @@ -7141,12 +7210,6 @@ version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab4c90930b95a82d00dc9e9ac071b4991924390d46cbd0dfe566148667605e4b" -[[package]] -name = "untrusted" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" - [[package]] name = "untrusted" version = "0.9.0" @@ -7196,7 +7259,6 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "88ad59a7560b41a70d191093a945f0b87bc1deeda46fb237479708a1d6b6cdfc" dependencies = [ - "getrandom", "serde", ] @@ -7240,7 +7302,8 @@ checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" [[package]] name = "vise" version = "0.1.0" -source = "git+https://github.com/matter-labs/vise.git?rev=a5bb80c9ce7168663114ee30e794d6dc32159ee4#a5bb80c9ce7168663114ee30e794d6dc32159ee4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "229baafe01d5177b63c6ee1def80d8e39a2365e64caf69ddb05a57594b15647c" dependencies = [ "compile-fmt", "elsa", @@ -7253,10 +7316,10 @@ dependencies = [ [[package]] name = "vise-exporter" version = "0.1.0" -source = "git+https://github.com/matter-labs/vise.git?rev=a5bb80c9ce7168663114ee30e794d6dc32159ee4#a5bb80c9ce7168663114ee30e794d6dc32159ee4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23981b18d697026f5430249ab01ba739ef2edc463e400042394331cb2bb63494" dependencies = [ - "hyper", - "metrics-exporter-prometheus", + "hyper 0.14.29", "once_cell", "tokio", "tracing", @@ -7266,53 +7329,24 @@ dependencies = [ [[package]] name = "vise-macros" version = "0.1.0" -source = "git+https://github.com/matter-labs/vise.git?rev=a5bb80c9ce7168663114ee30e794d6dc32159ee4#a5bb80c9ce7168663114ee30e794d6dc32159ee4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8bb19c33cd5f04dcf4e767635e058a998edbc2b7fca32ade0a4a1cea0f8e9b34" dependencies = [ "proc-macro2 1.0.69", "quote 1.0.33", "syn 2.0.38", ] -[[package]] -name = "vlog" -version = "0.1.0" -dependencies = [ - "chrono", - "opentelemetry", - "opentelemetry-otlp", - "opentelemetry-semantic-conventions", - "sentry", - "serde", - "serde_json", - "tracing", - "tracing-opentelemetry", - "tracing-subscriber", -] - [[package]] name = "vm-benchmark" version = "0.1.0" dependencies = [ "criterion", "iai", - "metrics-exporter-prometheus", "tokio", "vise", - "vm-benchmark-harness", -] - -[[package]] -name = "vm-benchmark-harness" -version = "0.1.0" -dependencies = [ - "multivm", - "once_cell", - "zk_evm 1.3.3 (git+https://github.com/matter-labs/era-zk_evm.git?tag=v1.3.3-rc2)", - "zksync_contracts", - "zksync_state", - "zksync_system_constants", - "zksync_types", - "zksync_utils", + "zksync_vlog", + "zksync_vm_benchmark_harness", ] [[package]] @@ -7326,21 +7360,6 @@ dependencies = [ "zkevm_opcode_defs 1.5.0", ] -[[package]] -name = "vm_utils" -version = "0.1.0" -dependencies = [ - "anyhow", - "multivm", - "tokio", - "tracing", - "zksync_contracts", - "zksync_dal", - "zksync_state", - "zksync_types", - "zksync_utils", -] - [[package]] name = "walkdir" version = "2.4.0" @@ -7446,9 +7465,9 @@ checksum = "ca6ad05a4870b2bf5fe995117d3728437bd27d7cd5f06f13c17443ef369775a1" [[package]] name = "wasm-streams" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4609d447824375f43e1ffbc051b50ad8f4b3ae8219680c94452ea05eb240ac7" +checksum = "b65dc4c90b63b118468cf747d8bf3566c1913ef60be765b5730ead9e0a3ba129" dependencies = [ "futures-util", "js-sys", @@ -7689,6 +7708,16 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "winreg" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a277a57398d4bfa075df44f501a17cfdf8542d224f0d36095a2adc7aee4ef0a5" +dependencies = [ + "cfg-if 1.0.0", + "windows-sys 0.48.0", +] + [[package]] name = "wyz" version = "0.5.1" @@ -7755,25 +7784,27 @@ dependencies = [ [[package]] name = "zk_evm" -version = "1.3.1" -source = "git+https://github.com/matter-labs/era-zk_evm.git?tag=v1.3.1-rc2#0a7c775932db4839ff6b7fb0db9bdb3583ab54c0" +version = "0.131.0-rc.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2b83ee7887fb29fda57c6b26a0f64c9b211459d718f8a26310f962e69f0b764" dependencies = [ - "blake2 0.10.6 (git+https://github.com/RustCrypto/hashes.git?rev=1f727ce37ff40fa0cce84eb8543a45bdd3ca4a4e)", + "blake2_ce", "k256 0.11.6", "lazy_static", "num", "serde", "serde_json", - "sha2 0.10.6", - "sha3 0.10.6", + "sha2_ce", + "sha3_ce", "static_assertions", - "zkevm_opcode_defs 1.3.1", + "zkevm_opcode_defs 0.131.0", ] [[package]] name = "zk_evm" -version = "1.3.3" -source = "git+https://github.com/matter-labs/era-zk_evm.git?tag=v1.3.3-rc2#fbee20f5bac7d6ca3e22ae69b2077c510a07de4e" +version = "0.133.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9af08e9284686a1b0c89ec4931eb915ac0729367f1247abd06164874fe738106" dependencies = [ "anyhow", "lazy_static", @@ -7781,14 +7812,15 @@ dependencies = [ "serde", "serde_json", "static_assertions", - "zk_evm_abstractions 0.1.0", - "zkevm_opcode_defs 1.3.2", + "zk_evm_abstractions 0.140.0", + "zkevm_opcode_defs 0.132.0", ] [[package]] name = "zk_evm" -version = "1.3.3" -source = "git+https://github.com/matter-labs/era-zk_evm.git?branch=v1.3.3#fbee20f5bac7d6ca3e22ae69b2077c510a07de4e" +version = "0.140.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "349bb8320d12578537658792df708f43c52e6330f0df071f812cb93b04ade962" dependencies = [ "anyhow", "lazy_static", @@ -7796,14 +7828,15 @@ dependencies = [ "serde", "serde_json", "static_assertions", - "zk_evm_abstractions 0.1.0", - "zkevm_opcode_defs 1.3.2", + "zk_evm_abstractions 0.140.0", + "zkevm_opcode_defs 0.132.0", ] [[package]] name = "zk_evm" -version = "1.4.0" -source = "git+https://github.com/matter-labs/era-zk_evm.git?branch=v1.4.0#dd76fc5badf2c05278a21b38015a7798fe2fe358" +version = "0.141.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8886ba5989b952b7b76096469eeb6fdfaf3369770e9e22a6f67dc4b7d65f9243" dependencies = [ "anyhow", "lazy_static", @@ -7811,14 +7844,15 @@ dependencies = [ "serde", "serde_json", "static_assertions", - "zk_evm_abstractions 0.1.0", - "zkevm_opcode_defs 1.3.2", + "zk_evm_abstractions 0.141.0", + "zkevm_opcode_defs 0.141.0", ] [[package]] name = "zk_evm" -version = "1.4.1" -source = "git+https://github.com/matter-labs/era-zk_evm.git?branch=v1.4.1#6250dbf64b2d14ced87a127735da559f27a432d5" +version = "0.150.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f5bf91304aa14827758afa3def8cf622f9a7f9fb65fe5d5099018dbacf0c5984" dependencies = [ "anyhow", "lazy_static", @@ -7826,46 +7860,46 @@ dependencies = [ "serde", "serde_json", "static_assertions", - "zk_evm_abstractions 1.4.1", - "zkevm_opcode_defs 1.4.1", + "zk_evm_abstractions 0.150.0", ] [[package]] -name = "zk_evm" -version = "1.5.0" -source = "git+https://github.com/matter-labs/era-zk_evm.git?branch=v1.5.0#9bbf7ffd2c38ee8b9667e96eaf0c111037fe976f" +name = "zk_evm_abstractions" +version = "0.140.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be696258861eba4e6625a5665084b2266720bb67f4ba69819469700ac5c6a401" dependencies = [ "anyhow", - "lazy_static", - "num", + "num_enum 0.6.1", "serde", - "serde_json", "static_assertions", - "zk_evm_abstractions 1.5.0", + "zkevm_opcode_defs 0.132.0", ] [[package]] name = "zk_evm_abstractions" -version = "0.1.0" -source = "git+https://github.com/matter-labs/era-zk_evm_abstractions.git#32dd320953841aa78579d9da08abbc70bcaed175" +version = "0.141.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "637a3cb6cb475bb238bee3e450763205d36fe6c92dc1b23300655927915baf03" dependencies = [ "anyhow", "num_enum 0.6.1", "serde", "static_assertions", - "zkevm_opcode_defs 1.3.2", + "zkevm_opcode_defs 0.141.0", ] [[package]] name = "zk_evm_abstractions" -version = "1.4.1" -source = "git+https://github.com/matter-labs/era-zk_evm_abstractions.git?branch=v1.4.1#0aac08c3b097ee8147e748475117ac46bddcdcef" +version = "0.150.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc313cea4ac9ef6b855264b1425cbe9de30dd8f009559dabcb6b2896122da5db" dependencies = [ "anyhow", "num_enum 0.6.1", "serde", "static_assertions", - "zkevm_opcode_defs 1.4.1", + "zkevm_opcode_defs 0.150.0", ] [[package]] @@ -7882,8 +7916,9 @@ dependencies = [ [[package]] name = "zkevm_circuits" -version = "1.4.0" -source = "git+https://github.com/matter-labs/era-zkevm_circuits.git?branch=v1.4.0#fb3e2574b5c890342518fc930c145443f039a105" +version = "0.140.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6db7061a85757529d06a9cb1c4697902bff16dfb303484499eeb5c7f20e1ac0d" dependencies = [ "arrayvec 0.7.4", "bincode", @@ -7898,13 +7933,14 @@ dependencies = [ "serde", "serde_json", "smallvec", - "zkevm_opcode_defs 1.3.2", + "zkevm_opcode_defs 0.132.0", ] [[package]] name = "zkevm_circuits" -version = "1.4.1" -source = "git+https://github.com/matter-labs/era-zkevm_circuits.git?branch=v1.4.1#3a973afb3cf2b50b7138c1af61cc6ac3d7d0189f" +version = "0.141.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "25e0f6e554b88310ad3b086e5334fbebe27154674a91c91643241b64c3d05b3a" dependencies = [ "arrayvec 0.7.4", "bincode", @@ -7919,13 +7955,14 @@ dependencies = [ "serde", "serde_json", "smallvec", - "zkevm_opcode_defs 1.4.1", + "zkevm_opcode_defs 0.141.0", ] [[package]] name = "zkevm_circuits" -version = "1.5.0" -source = "git+https://github.com/matter-labs/era-zkevm_circuits.git?branch=v1.5.0#b7a86c739e8a8f88e788e90893c6e7496f6d7dfc" +version = "0.150.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4691ca0faeb666120ad48fb1a45750c5bacc90118a851f4450f3e1e903f9b2e3" dependencies = [ "arrayvec 0.7.4", "boojum", @@ -7938,13 +7975,14 @@ dependencies = [ "seq-macro", "serde", "smallvec", - "zkevm_opcode_defs 1.5.0", + "zkevm_opcode_defs 0.150.0", ] [[package]] name = "zkevm_opcode_defs" -version = "1.3.1" -source = "git+https://github.com/matter-labs/era-zkevm_opcode_defs.git?branch=v1.3.1#00d4ad2292bd55374a0fa10fe11686d7a109d8a0" +version = "0.131.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49e0154bd4ae8202c96c52b29dd44f944bfd08c1c233fef843744463964de957" dependencies = [ "bitflags 1.3.2", "ethereum-types", @@ -7954,28 +7992,47 @@ dependencies = [ [[package]] name = "zkevm_opcode_defs" -version = "1.3.2" -source = "git+https://github.com/matter-labs/era-zkevm_opcode_defs.git?branch=v1.3.2#dffacadeccdfdbff4bc124d44c595c4a6eae5013" +version = "0.132.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e0769f7b27d8fb06e715da3290c575cac5d04d10a557faef180e847afce50ac4" dependencies = [ - "bitflags 2.4.1", - "blake2 0.10.6 (git+https://github.com/RustCrypto/hashes.git?rev=1f727ce37ff40fa0cce84eb8543a45bdd3ca4a4e)", + "bitflags 2.6.0", + "blake2 0.10.6", "ethereum-types", "k256 0.11.6", "lazy_static", - "sha2 0.10.6", - "sha3 0.10.6", + "sha2_ce", + "sha3_ce", ] [[package]] name = "zkevm_opcode_defs" -version = "1.4.1" -source = "git+https://github.com/matter-labs/era-zkevm_opcode_defs.git?branch=v1.4.1#ba8228ff0582d21f64d6a319d50d0aec48e9e7b6" +version = "0.141.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6be7bd5f0e0b61211f544147289640b4712715589d7f2fe5229d92a7a3ac64c0" +dependencies = [ + "bitflags 2.6.0", + "blake2 0.10.6", + "ethereum-types", + "k256 0.13.3", + "lazy_static", + "sha2 0.10.8", + "sha3 0.10.8", +] + +[[package]] +name = "zkevm_opcode_defs" +version = "0.150.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b3328c012d444bdbfadb754a72c01a56879eb66584efc71eac457e89e7843608" dependencies = [ - "bitflags 2.4.1", - "blake2 0.10.6 (registry+https://github.com/rust-lang/crates.io-index)", + "bitflags 2.6.0", + "blake2 0.10.6", "ethereum-types", - "k256 0.13.2", + "k256 0.13.3", "lazy_static", + "p256", + "serde", "sha2 0.10.8", "sha3 0.10.8", ] @@ -7985,10 +8042,10 @@ name = "zkevm_opcode_defs" version = "1.5.0" source = "git+https://github.com/matter-labs/era-zkevm_opcode_defs.git?branch=v1.5.0#28d2edabf902ea9b08f6a26a4506831fd89346b9" dependencies = [ - "bitflags 2.4.1", - "blake2 0.10.6 (registry+https://github.com/rust-lang/crates.io-index)", + "bitflags 2.6.0", + "blake2 0.10.6", "ethereum-types", - "k256 0.13.2", + "k256 0.13.3", "lazy_static", "p256", "serde", @@ -7996,6 +8053,22 @@ dependencies = [ "sha3 0.10.8", ] +[[package]] +name = "zksync_base_token_adjuster" +version = "0.1.0" +dependencies = [ + "anyhow", + "async-trait", + "chrono", + "rand 0.8.5", + "tokio", + "tracing", + "zksync_config", + "zksync_dal", + "zksync_external_price_api", + "zksync_types", +] + [[package]] name = "zksync_basic_types" version = "0.1.0" @@ -8059,26 +8132,26 @@ name = "zksync_commitment_generator" version = "0.1.0" dependencies = [ "anyhow", - "circuit_sequencer_api 0.1.40", - "circuit_sequencer_api 0.1.41", - "circuit_sequencer_api 0.1.50", + "circuit_sequencer_api 0.140.0", + "circuit_sequencer_api 0.141.0", + "circuit_sequencer_api 0.150.2-rc.1", "futures 0.3.28", "itertools 0.10.5", - "multivm", "num_cpus", "rand 0.8.5", "serde_json", "tokio", "tracing", "vise", - "zk_evm 1.3.3 (git+https://github.com/matter-labs/era-zk_evm.git?tag=v1.3.3-rc2)", - "zk_evm 1.4.1", - "zk_evm 1.5.0", + "zk_evm 0.133.0", + "zk_evm 0.141.0", + "zk_evm 0.150.0", "zksync_contracts", "zksync_dal", "zksync_eth_client", "zksync_health_check", "zksync_l1_contract_interface", + "zksync_multivm", "zksync_node_genesis", "zksync_node_test_utils", "zksync_types", @@ -8088,8 +8161,9 @@ dependencies = [ [[package]] name = "zksync_concurrency" -version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=3e6f101ee4124308c4c974caaa259d524549b0c6#3e6f101ee4124308c4c974caaa259d524549b0c6" +version = "0.1.0-rc.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50302b77192891256d180ff2551dc0c3bc4144958b49e9a16c50a0dc218958ba" dependencies = [ "anyhow", "once_cell", @@ -8112,15 +8186,18 @@ dependencies = [ "rand 0.8.5", "secrecy", "serde", + "url", "zksync_basic_types", + "zksync_concurrency", "zksync_consensus_utils", "zksync_crypto_primitives", ] [[package]] name = "zksync_consensus_bft" -version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=3e6f101ee4124308c4c974caaa259d524549b0c6#3e6f101ee4124308c4c974caaa259d524549b0c6" +version = "0.1.0-rc.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2325c7486a8280db1c26c10020350bead6eecb3de03f8bbfd878060f000cdce7" dependencies = [ "anyhow", "async-trait", @@ -8140,17 +8217,20 @@ dependencies = [ [[package]] name = "zksync_consensus_crypto" -version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=3e6f101ee4124308c4c974caaa259d524549b0c6#3e6f101ee4124308c4c974caaa259d524549b0c6" +version = "0.1.0-rc.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f5cb8ed0d59593f6147085b77142628e459ba673aa4d48fce064d5b96e31eb36" dependencies = [ "anyhow", "blst", "ed25519-dalek", + "elliptic-curve 0.13.8", "ff_ce", "hex", - "num-bigint 0.4.4", + "k256 0.13.3", + "num-bigint 0.4.6", "num-traits", - "pairing_ce 0.28.5 (git+https://github.com/matter-labs/pairing.git?rev=d24f2c5871089c4cd4f54c0ca266bb9fef6115eb)", + "pairing_ce", "rand 0.4.6", "rand 0.8.5", "sha3 0.10.8", @@ -8161,8 +8241,9 @@ dependencies = [ [[package]] name = "zksync_consensus_executor" -version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=3e6f101ee4124308c4c974caaa259d524549b0c6#3e6f101ee4124308c4c974caaa259d524549b0c6" +version = "0.1.0-rc.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "247b70ec255781b3b740acb744236e771a192922ffbaa52c462b84c4ea67609f" dependencies = [ "anyhow", "rand 0.8.5", @@ -8180,11 +8261,18 @@ dependencies = [ [[package]] name = "zksync_consensus_network" -version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=3e6f101ee4124308c4c974caaa259d524549b0c6#3e6f101ee4124308c4c974caaa259d524549b0c6" +version = "0.1.0-rc.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f10626b79885a9b096cd19ee83d85ef9b0554f061a9db6946f2b7c9d1b2f49ea" dependencies = [ "anyhow", "async-trait", + "base64 0.22.1", + "build_html", + "bytesize", + "http-body-util", + "hyper 1.3.1", + "hyper-util", "im", "once_cell", "pin-project", @@ -8192,6 +8280,9 @@ dependencies = [ "rand 0.8.5", "snow", "thiserror", + "tls-listener", + "tokio", + "tokio-rustls", "tracing", "vise", "zksync_concurrency", @@ -8205,13 +8296,14 @@ dependencies = [ [[package]] name = "zksync_consensus_roles" -version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=3e6f101ee4124308c4c974caaa259d524549b0c6#3e6f101ee4124308c4c974caaa259d524549b0c6" +version = "0.1.0-rc.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ffe3e47d99eb943eb94f2f5c9d929b1192bf3e8d1434de0fa6f0090f9c1197e" dependencies = [ "anyhow", "bit-vec", "hex", - "num-bigint 0.4.4", + "num-bigint 0.4.6", "prost 0.12.1", "rand 0.8.5", "serde", @@ -8226,8 +8318,9 @@ dependencies = [ [[package]] name = "zksync_consensus_storage" -version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=3e6f101ee4124308c4c974caaa259d524549b0c6#3e6f101ee4124308c4c974caaa259d524549b0c6" +version = "0.1.0-rc.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9ae9a0ec64ce9c0af346e50cc87dc257c30259101ce9675b408cb883e096087" dependencies = [ "anyhow", "async-trait", @@ -8237,6 +8330,7 @@ dependencies = [ "tracing", "vise", "zksync_concurrency", + "zksync_consensus_crypto", "zksync_consensus_roles", "zksync_protobuf", "zksync_protobuf_build", @@ -8244,9 +8338,11 @@ dependencies = [ [[package]] name = "zksync_consensus_utils" -version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=3e6f101ee4124308c4c974caaa259d524549b0c6#3e6f101ee4124308c4c974caaa259d524549b0c6" +version = "0.1.0-rc.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24dc6135abeefa80f617eb2903fe43d137d362bf673f0651b4894b17069d1fb1" dependencies = [ + "anyhow", "rand 0.8.5", "thiserror", "zksync_concurrency", @@ -8283,7 +8379,7 @@ name = "zksync_contract_verification_server" version = "0.1.0" dependencies = [ "anyhow", - "axum", + "axum 0.7.5", "serde", "serde_json", "tokio", @@ -8302,17 +8398,17 @@ dependencies = [ "anyhow", "ctrlc", "futures 0.3.28", - "prometheus_exporter", "structopt", "tokio", "tracing", - "vlog", "zksync_config", "zksync_contract_verifier_lib", + "zksync_core_leftovers", "zksync_dal", "zksync_env_config", "zksync_queued_job_processor", "zksync_utils", + "zksync_vlog", ] [[package]] @@ -8360,121 +8456,61 @@ name = "zksync_core_leftovers" version = "0.1.0" dependencies = [ "anyhow", - "assert_matches", - "async-trait", - "axum", - "backon", - "chrono", "ctrlc", - "dashmap", - "futures 0.3.28", - "governor", + "serde_yaml", + "tokio", + "zksync_config", + "zksync_dal", + "zksync_env_config", + "zksync_node_genesis", + "zksync_protobuf", + "zksync_protobuf_config", +] + +[[package]] +name = "zksync_crypto_primitives" +version = "0.1.0" +dependencies = [ + "anyhow", + "blake2 0.10.6", "hex", - "itertools 0.10.5", - "jsonrpsee", - "lru", - "multivm", - "once_cell", - "pin-project-lite", - "prometheus_exporter", - "prost 0.12.1", - "prover_dal", "rand 0.8.5", - "reqwest", - "secrecy", + "secp256k1", "serde", "serde_json", - "serde_yaml", - "tempfile", - "test-casing", - "test-log", + "sha2 0.10.8", "thiserror", - "thread_local", - "tokio", - "tower", - "tower-http", + "zksync_basic_types", + "zksync_utils", +] + +[[package]] +name = "zksync_da_client" +version = "0.1.0" +dependencies = [ + "anyhow", + "async-trait", + "serde", "tracing", - "vise", - "vlog", - "vm_utils", - "zksync_circuit_breaker", - "zksync_commitment_generator", - "zksync_concurrency", "zksync_config", - "zksync_consensus_bft", - "zksync_consensus_crypto", - "zksync_consensus_executor", - "zksync_consensus_network", - "zksync_consensus_roles", - "zksync_consensus_storage", - "zksync_consensus_utils", - "zksync_contract_verification_server", - "zksync_contracts", - "zksync_dal", - "zksync_db_connection", - "zksync_eth_client", - "zksync_eth_sender", - "zksync_eth_signer", - "zksync_eth_watch", - "zksync_health_check", - "zksync_house_keeper", - "zksync_l1_contract_interface", - "zksync_mempool", - "zksync_merkle_tree", - "zksync_metadata_calculator", - "zksync_mini_merkle_tree", - "zksync_node_api_server", - "zksync_node_consensus", - "zksync_node_fee_model", - "zksync_node_genesis", - "zksync_node_sync", - "zksync_node_test_utils", - "zksync_object_store", - "zksync_proof_data_handler", - "zksync_protobuf", - "zksync_protobuf_build", - "zksync_protobuf_config", - "zksync_prover_interface", - "zksync_queued_job_processor", - "zksync_shared_metrics", - "zksync_state", - "zksync_state_keeper", - "zksync_storage", - "zksync_system_constants", - "zksync_tee_verifier", - "zksync_tee_verifier_input_producer", - "zksync_test_account", "zksync_types", - "zksync_utils", - "zksync_web3_decl", ] [[package]] -name = "zksync_crypto" -version = "0.1.0" -dependencies = [ - "blake2 0.10.6 (registry+https://github.com/rust-lang/crates.io-index)", - "hex", - "once_cell", - "serde", - "serde_json", - "sha2 0.10.8", - "thiserror", - "zksync_basic_types", -] - -[[package]] -name = "zksync_crypto_primitives" +name = "zksync_da_dispatcher" version = "0.1.0" dependencies = [ "anyhow", - "hex", + "chrono", + "futures 0.3.28", "rand 0.8.5", - "secp256k1", - "serde", - "serde_json", - "thiserror", - "zksync_basic_types", + "tokio", + "tracing", + "vise", + "zksync_config", + "zksync_da_client", + "zksync_dal", + "zksync_types", "zksync_utils", ] @@ -8521,6 +8557,7 @@ dependencies = [ "serde", "serde_json", "sqlx", + "test-casing", "thiserror", "tokio", "tracing", @@ -8529,6 +8566,23 @@ dependencies = [ "zksync_health_check", ] +[[package]] +name = "zksync_default_da_clients" +version = "0.1.0" +dependencies = [ + "anyhow", + "async-trait", + "flate2", + "serde", + "tracing", + "zksync_config", + "zksync_da_client", + "zksync_env_config", + "zksync_node_framework", + "zksync_object_store", + "zksync_types", +] + [[package]] name = "zksync_env_config" version = "0.1.0" @@ -8623,14 +8677,14 @@ dependencies = [ [[package]] name = "zksync_external_node" -version = "24.7.0" +version = "24.11.0" dependencies = [ "anyhow", "assert_matches", + "async-trait", "clap 4.4.6", "envy", "futures 0.3.28", - "prometheus_exporter", "rustc_version", "semver", "serde", @@ -8641,11 +8695,11 @@ dependencies = [ "tracing", "url", "vise", - "vlog", "zksync_block_reverter", "zksync_commitment_generator", "zksync_concurrency", "zksync_config", + "zksync_consensus_crypto", "zksync_consensus_roles", "zksync_consistency_checker", "zksync_contracts", @@ -8674,9 +8728,28 @@ dependencies = [ "zksync_storage", "zksync_types", "zksync_utils", + "zksync_vlog", "zksync_web3_decl", ] +[[package]] +name = "zksync_external_price_api" +version = "0.1.0" +dependencies = [ + "anyhow", + "async-trait", + "bigdecimal", + "chrono", + "fraction", + "rand 0.8.5", + "reqwest 0.12.5", + "serde", + "tokio", + "url", + "zksync_config", + "zksync_types", +] + [[package]] name = "zksync_health_check" version = "0.1.0" @@ -8698,30 +8771,47 @@ version = "0.1.0" dependencies = [ "anyhow", "async-trait", - "prover_dal", "tokio", "tracing", "vise", "zksync_config", "zksync_dal", + "zksync_prover_dal", "zksync_shared_metrics", "zksync_types", ] +[[package]] +name = "zksync_kzg" +version = "0.150.2-rc.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4672556b6bc06da9dcd38a607e139b8eb3083edfaabcd12981e8a62051ee1f81" +dependencies = [ + "boojum", + "derivative", + "hex", + "once_cell", + "rayon", + "serde", + "serde_json", + "serde_with", + "zkevm_circuits 0.150.0", +] + [[package]] name = "zksync_l1_contract_interface" version = "0.1.0" dependencies = [ - "codegen 0.1.0", "hex", - "kzg", "once_cell", "serde", "serde_json", "serde_with", "sha2 0.10.8", "sha3 0.10.8", + "zksync_kzg", "zksync_prover_interface", + "zksync_solidity_vk_codegen", "zksync_types", ] @@ -8755,7 +8845,7 @@ dependencies = [ "tracing", "tracing-subscriber", "vise", - "zksync_crypto", + "zksync_crypto_primitives", "zksync_prover_interface", "zksync_storage", "zksync_system_constants", @@ -8770,11 +8860,11 @@ dependencies = [ "anyhow", "assert_matches", "async-trait", - "axum", + "axum 0.7.5", "futures 0.3.28", "itertools 0.10.5", "once_cell", - "reqwest", + "reqwest 0.12.5", "serde", "serde_json", "tempfile", @@ -8784,7 +8874,7 @@ dependencies = [ "tracing", "vise", "zksync_config", - "zksync_crypto", + "zksync_crypto_primitives", "zksync_dal", "zksync_health_check", "zksync_merkle_tree", @@ -8805,7 +8895,42 @@ dependencies = [ "criterion", "once_cell", "zksync_basic_types", - "zksync_crypto", + "zksync_crypto_primitives", +] + +[[package]] +name = "zksync_multivm" +version = "0.1.0" +dependencies = [ + "anyhow", + "circuit_sequencer_api 0.133.0", + "circuit_sequencer_api 0.140.0", + "circuit_sequencer_api 0.141.0", + "circuit_sequencer_api 0.142.0", + "circuit_sequencer_api 0.150.2-rc.1", + "ethabi", + "hex", + "itertools 0.10.5", + "once_cell", + "pretty_assertions", + "serde", + "thiserror", + "tokio", + "tracing", + "vise", + "vm2", + "zk_evm 0.131.0-rc.2", + "zk_evm 0.133.0", + "zk_evm 0.140.0", + "zk_evm 0.141.0", + "zk_evm 0.150.0", + "zksync_contracts", + "zksync_eth_signer", + "zksync_state", + "zksync_system_constants", + "zksync_test_account", + "zksync_types", + "zksync_utils", ] [[package]] @@ -8815,20 +8940,20 @@ dependencies = [ "anyhow", "assert_matches", "async-trait", - "axum", + "axum 0.7.5", "chrono", "futures 0.3.28", "governor", "hex", - "http", + "http 1.1.0", "itertools 0.10.5", "lru", - "multivm", "once_cell", "pin-project-lite", "rand 0.8.5", "serde", "serde_json", + "strum", "test-casing", "thiserror", "thread_local", @@ -8843,6 +8968,7 @@ dependencies = [ "zksync_health_check", "zksync_metadata_calculator", "zksync_mini_merkle_tree", + "zksync_multivm", "zksync_node_fee_model", "zksync_node_genesis", "zksync_node_sync", @@ -8867,6 +8993,7 @@ dependencies = [ "secrecy", "tempfile", "test-casing", + "thiserror", "tokio", "tracing", "zksync_concurrency", @@ -8926,10 +9053,12 @@ version = "0.1.0" dependencies = [ "anyhow", "async-trait", + "bigdecimal", "test-casing", "tokio", "tracing", "vise", + "zksync_base_token_adjuster", "zksync_config", "zksync_dal", "zksync_eth_client", @@ -8948,12 +9077,12 @@ dependencies = [ "async-trait", "ctrlc", "futures 0.3.28", - "prometheus_exporter", - "prover_dal", + "pin-project-lite", "thiserror", "tokio", "tracing", - "vlog", + "trybuild", + "zksync_base_token_adjuster", "zksync_block_reverter", "zksync_circuit_breaker", "zksync_commitment_generator", @@ -8962,12 +9091,15 @@ dependencies = [ "zksync_consistency_checker", "zksync_contract_verification_server", "zksync_contracts", + "zksync_da_client", + "zksync_da_dispatcher", "zksync_dal", "zksync_db_connection", "zksync_env_config", "zksync_eth_client", "zksync_eth_sender", "zksync_eth_watch", + "zksync_external_price_api", "zksync_health_check", "zksync_house_keeper", "zksync_metadata_calculator", @@ -8975,10 +9107,13 @@ dependencies = [ "zksync_node_consensus", "zksync_node_db_pruner", "zksync_node_fee_model", + "zksync_node_framework_derive", + "zksync_node_storage_init", "zksync_node_sync", "zksync_object_store", "zksync_proof_data_handler", "zksync_protobuf_config", + "zksync_prover_dal", "zksync_queued_job_processor", "zksync_reorg_detector", "zksync_state", @@ -8987,17 +9122,26 @@ dependencies = [ "zksync_tee_verifier_input_producer", "zksync_types", "zksync_utils", + "zksync_vlog", "zksync_vm_runner", "zksync_web3_decl", ] +[[package]] +name = "zksync_node_framework_derive" +version = "0.1.0" +dependencies = [ + "proc-macro2 1.0.69", + "quote 1.0.33", + "syn 2.0.38", +] + [[package]] name = "zksync_node_genesis" version = "0.1.0" dependencies = [ "anyhow", "itertools 0.10.5", - "multivm", "thiserror", "tokio", "tracing", @@ -9007,11 +9151,34 @@ dependencies = [ "zksync_dal", "zksync_eth_client", "zksync_merkle_tree", + "zksync_multivm", "zksync_system_constants", "zksync_types", "zksync_utils", ] +[[package]] +name = "zksync_node_storage_init" +version = "0.1.0" +dependencies = [ + "anyhow", + "async-trait", + "tokio", + "tracing", + "zksync_block_reverter", + "zksync_config", + "zksync_dal", + "zksync_health_check", + "zksync_node_genesis", + "zksync_node_sync", + "zksync_object_store", + "zksync_reorg_detector", + "zksync_shared_metrics", + "zksync_snapshots_applier", + "zksync_types", + "zksync_web3_decl", +] + [[package]] name = "zksync_node_sync" version = "0.1.0" @@ -9029,7 +9196,6 @@ dependencies = [ "tokio", "tracing", "vise", - "vm_utils", "zksync_concurrency", "zksync_config", "zksync_contracts", @@ -9043,6 +9209,7 @@ dependencies = [ "zksync_system_constants", "zksync_types", "zksync_utils", + "zksync_vm_utils", "zksync_web3_decl", ] @@ -9050,10 +9217,10 @@ dependencies = [ name = "zksync_node_test_utils" version = "0.1.0" dependencies = [ - "multivm", "zksync_contracts", "zksync_dal", "zksync_merkle_tree", + "zksync_multivm", "zksync_node_genesis", "zksync_system_constants", "zksync_types", @@ -9071,10 +9238,10 @@ dependencies = [ "flate2", "google-cloud-auth", "google-cloud-storage", - "http", + "http 1.1.0", "prost 0.12.1", "rand 0.8.5", - "reqwest", + "reqwest 0.12.5", "serde_json", "tempfile", "tokio", @@ -9090,18 +9257,19 @@ name = "zksync_proof_data_handler" version = "0.1.0" dependencies = [ "anyhow", - "axum", + "axum 0.7.5", "chrono", - "hyper", - "multivm", + "hyper 1.3.1", "serde_json", "tokio", "tower", "tracing", + "vise", "zksync_basic_types", "zksync_config", "zksync_contracts", "zksync_dal", + "zksync_multivm", "zksync_object_store", "zksync_prover_interface", "zksync_tee_verifier", @@ -9110,8 +9278,9 @@ dependencies = [ [[package]] name = "zksync_protobuf" -version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=3e6f101ee4124308c4c974caaa259d524549b0c6#3e6f101ee4124308c4c974caaa259d524549b0c6" +version = "0.1.0-rc.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1e7c7820f290db565a1b4ff73aa1175cd7d31498fca8d859eb5aceebd33468c" dependencies = [ "anyhow", "bit-vec", @@ -9130,8 +9299,9 @@ dependencies = [ [[package]] name = "zksync_protobuf_build" -version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=3e6f101ee4124308c4c974caaa259d524549b0c6#3e6f101ee4124308c4c974caaa259d524549b0c6" +version = "0.1.0-rc.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6cafeec1150ae91f1a37c8f0dce6b71b92b93e0c4153d32b4c37e2fd71bce2f" dependencies = [ "anyhow", "heck 0.5.0", @@ -9162,19 +9332,31 @@ dependencies = [ "zksync_types", ] +[[package]] +name = "zksync_prover_dal" +version = "0.1.0" +dependencies = [ + "sqlx", + "strum", + "zksync_basic_types", + "zksync_db_connection", +] + [[package]] name = "zksync_prover_interface" version = "0.1.0" dependencies = [ "bincode", "chrono", - "circuit_sequencer_api 0.1.50", + "circuit_sequencer_api 0.150.2-rc.1", "serde", "serde_json", "serde_with", "strum", "tokio", + "zksync_multivm", "zksync_object_store", + "zksync_state", "zksync_types", ] @@ -9218,18 +9400,17 @@ dependencies = [ "anyhow", "clap 4.4.6", "futures 0.3.28", - "prometheus_exporter", "serde_json", "tikv-jemallocator", "tokio", "tracing", - "vlog", "zksync_concurrency", "zksync_config", "zksync_consensus_crypto", "zksync_consensus_executor", "zksync_consensus_roles", "zksync_core_leftovers", + "zksync_default_da_clients", "zksync_env_config", "zksync_eth_client", "zksync_metadata_calculator", @@ -9240,6 +9421,7 @@ dependencies = [ "zksync_storage", "zksync_types", "zksync_utils", + "zksync_vlog", ] [[package]] @@ -9276,6 +9458,23 @@ dependencies = [ "zksync_web3_decl", ] +[[package]] +name = "zksync_solidity_vk_codegen" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6bac71750012656b207e8cdb67415823318909077d8c8e235111f0d2feeeeeda" +dependencies = [ + "ethereum-types", + "franklin-crypto", + "handlebars", + "hex", + "paste", + "rescue_poseidon", + "serde", + "serde_derive", + "serde_json", +] + [[package]] name = "zksync_state" version = "0.1.0" @@ -9283,6 +9482,7 @@ dependencies = [ "anyhow", "assert_matches", "async-trait", + "backon", "chrono", "itertools 0.10.5", "mini-moka", @@ -9310,7 +9510,6 @@ dependencies = [ "futures 0.3.28", "hex", "itertools 0.10.5", - "multivm", "once_cell", "tempfile", "test-casing", @@ -9318,12 +9517,13 @@ dependencies = [ "tokio", "tracing", "vise", - "vm_utils", + "zksync_base_token_adjuster", "zksync_config", "zksync_contracts", "zksync_dal", "zksync_eth_client", "zksync_mempool", + "zksync_multivm", "zksync_node_fee_model", "zksync_node_genesis", "zksync_node_test_utils", @@ -9335,6 +9535,7 @@ dependencies = [ "zksync_test_account", "zksync_types", "zksync_utils", + "zksync_vm_utils", ] [[package]] @@ -9359,28 +9560,52 @@ dependencies = [ "zksync_utils", ] +[[package]] +name = "zksync_tee_prover" +version = "0.1.0" +dependencies = [ + "anyhow", + "async-trait", + "reqwest 0.12.5", + "secp256k1", + "serde", + "thiserror", + "tokio", + "tracing", + "url", + "vise", + "zksync_basic_types", + "zksync_config", + "zksync_env_config", + "zksync_node_framework", + "zksync_prover_interface", + "zksync_tee_verifier", + "zksync_types", + "zksync_vlog", +] + [[package]] name = "zksync_tee_verifier" version = "0.1.0" dependencies = [ "anyhow", - "multivm", "serde", "tracing", - "vm_utils", "zksync_basic_types", "zksync_config", "zksync_contracts", - "zksync_crypto", + "zksync_crypto_primitives", "zksync_dal", "zksync_db_connection", "zksync_merkle_tree", + "zksync_multivm", "zksync_object_store", "zksync_prover_interface", "zksync_queued_job_processor", "zksync_state", "zksync_types", "zksync_utils", + "zksync_vm_utils", ] [[package]] @@ -9392,7 +9617,6 @@ dependencies = [ "tokio", "tracing", "vise", - "vm_utils", "zksync_dal", "zksync_object_store", "zksync_prover_interface", @@ -9400,6 +9624,7 @@ dependencies = [ "zksync_tee_verifier", "zksync_types", "zksync_utils", + "zksync_vm_utils", ] [[package]] @@ -9421,8 +9646,9 @@ name = "zksync_types" version = "0.1.0" dependencies = [ "anyhow", + "bigdecimal", "bincode", - "blake2 0.10.6 (registry+https://github.com/rust-lang/crates.io-index)", + "blake2 0.10.6", "chrono", "derive_more", "hex", @@ -9438,6 +9664,7 @@ dependencies = [ "strum", "thiserror", "tokio", + "tracing", "zksync_basic_types", "zksync_config", "zksync_contracts", @@ -9462,15 +9689,49 @@ dependencies = [ "num", "once_cell", "rand 0.8.5", - "reqwest", + "reqwest 0.12.5", "serde", "serde_json", "thiserror", "tokio", "tracing", - "vlog", - "zk_evm 1.3.3 (git+https://github.com/matter-labs/era-zk_evm.git?tag=v1.3.3-rc2)", + "zk_evm 0.133.0", "zksync_basic_types", + "zksync_vlog", +] + +[[package]] +name = "zksync_vlog" +version = "0.1.0" +dependencies = [ + "anyhow", + "chrono", + "opentelemetry", + "opentelemetry-otlp", + "opentelemetry-semantic-conventions", + "sentry", + "serde", + "serde_json", + "tokio", + "tracing", + "tracing-opentelemetry", + "tracing-subscriber", + "vise", + "vise-exporter", +] + +[[package]] +name = "zksync_vm_benchmark_harness" +version = "0.1.0" +dependencies = [ + "once_cell", + "zk_evm 0.133.0", + "zksync_contracts", + "zksync_multivm", + "zksync_state", + "zksync_system_constants", + "zksync_types", + "zksync_utils", ] [[package]] @@ -9482,24 +9743,41 @@ dependencies = [ "backon", "dashmap", "futures 0.3.28", - "multivm", "once_cell", "rand 0.8.5", "tempfile", "tokio", "tracing", "vise", - "vm_utils", "zksync_contracts", "zksync_dal", + "zksync_multivm", "zksync_node_genesis", "zksync_node_test_utils", + "zksync_object_store", + "zksync_prover_interface", "zksync_state", "zksync_state_keeper", "zksync_storage", "zksync_test_account", "zksync_types", "zksync_utils", + "zksync_vm_utils", +] + +[[package]] +name = "zksync_vm_utils" +version = "0.1.0" +dependencies = [ + "anyhow", + "tokio", + "tracing", + "zksync_contracts", + "zksync_dal", + "zksync_multivm", + "zksync_state", + "zksync_types", + "zksync_utils", ] [[package]] @@ -9514,6 +9792,7 @@ dependencies = [ "pin-project-lite", "rand 0.8.5", "rlp", + "rustls", "serde", "serde_json", "test-casing", @@ -9525,24 +9804,6 @@ dependencies = [ "zksync_types", ] -[[package]] -name = "zstd" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bffb3309596d527cfcba7dfc6ed6052f1d39dfbd7c867aa2e865e4a449c10110" -dependencies = [ - "zstd-safe", -] - -[[package]] -name = "zstd-safe" -version = "7.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43747c7422e2924c11144d5229878b98180ef8b06cca4ab5af37afc8a8d8ea3e" -dependencies = [ - "zstd-sys", -] - [[package]] name = "zstd-sys" version = "2.0.9+zstd.1.5.5" diff --git a/Cargo.toml b/Cargo.toml index 5d9f6adf37ad..7b6ac30be8f1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -10,6 +10,7 @@ members = [ "core/bin/verified_sources_fetcher", "core/bin/zksync_server", "core/bin/genesis_generator", + "core/bin/zksync_tee_prover", # Node services "core/node/node_framework", "core/node/proof_data_handler", @@ -20,6 +21,7 @@ members = [ "core/node/shared_metrics", "core/node/db_pruner", "core/node/fee_model", + "core/node/da_dispatcher", "core/node/eth_sender", "core/node/vm_runner", "core/node/test_utils", @@ -28,10 +30,12 @@ members = [ "core/node/consistency_checker", "core/node/metadata_calculator", "core/node/node_sync", + "core/node/node_storage_init", "core/node/consensus", "core/node/contract_verification_server", "core/node/api_server", "core/node/tee_verifier_input_producer", + "core/node/base_token_adjuster", # Libraries "core/lib/db_connection", "core/lib/zksync_core_leftovers", @@ -40,18 +44,19 @@ members = [ "core/lib/constants", "core/lib/contract_verifier", "core/lib/contracts", - "core/lib/crypto", "core/lib/circuit_breaker", "core/lib/dal", "core/lib/env_config", + "core/lib/da_client", + "core/lib/default_da_clients", "core/lib/eth_client", "core/lib/eth_signer", "core/lib/l1_contract_interface", "core/lib/mempool", "core/lib/merkle_tree", "core/lib/mini_merkle_tree", + "core/lib/node_framework_derive", "core/lib/object_store", - "core/lib/prometheus_exporter", "core/lib/prover_interface", "core/lib/queued_job_processor", "core/lib/state", @@ -66,11 +71,15 @@ members = [ "core/lib/web3_decl", "core/lib/snapshots_applier", "core/lib/crypto_primitives", + "core/lib/external_price_api", # Test infrastructure "core/tests/test_account", "core/tests/loadnext", "core/tests/vm-benchmark", "core/tests/vm-benchmark/harness", + + # Parts of prover workspace that are needed for Core workspace + "prover/crates/lib/prover_dal", ] resolver = "2" @@ -96,7 +105,7 @@ categories = ["cryptography"] anyhow = "1" assert_matches = "1.5" async-trait = "0.1" -axum = "0.6.19" +axum = "0.7.5" backon = "0.4.4" bigdecimal = "0.3.0" bincode = "1" @@ -112,21 +121,19 @@ envy = "0.4" ethabi = "18.0.0" flate2 = "1.0.28" futures = "0.3" -google-cloud-auth = "0.13.0" -google-cloud-storage = "0.15.0" +google-cloud-auth = "0.16.0" +google-cloud-storage = "0.20.0" governor = "0.4.2" hex = "0.4" -http = "0.2.9" -hyper = "0.14.29" +http = "1.1" +hyper = "1.3" iai = "0.1" insta = "1.29.0" itertools = "0.10" -jsonrpsee = { version = "0.21.0", default-features = false } +jsonrpsee = { version = "0.23", default-features = false } lazy_static = "1.4" leb128 = "0.2.5" lru = { version = "0.12.1", default-features = false } -metrics = "0.21" -metrics-exporter-prometheus = "0.12" mini-moka = "0.10.0" num = "0.4.0" num_cpus = "1.13" @@ -141,10 +148,11 @@ prost = "0.12.1" rand = "0.8" rayon = "1.3.1" regex = "1" -reqwest = "0.11" +reqwest = "0.12" rlp = "0.5" rocksdb = "0.21.0" rustc_version = "0.4.0" +rustls = "0.23" secp256k1 = { version = "0.27.0", features = ["recovery", "global-context"] } secrecy = "0.8.0" semver = "1" @@ -168,100 +176,114 @@ tikv-jemallocator = "0.5" tiny-keccak = "2" tokio = "1" tower = "0.4.13" -tower-http = "0.4.1" +tower-http = "0.5.2" tracing = "0.1" tracing-subscriber = "0.3" tracing-opentelemetry = "0.21.0" url = "2" web3 = "0.19.0" +fraction = "0.15.3" + +# Proc-macro +syn = "2.0" +quote = "1.0" +proc-macro2 = "1.0" +trybuild = "1.0" # "Internal" dependencies -circuit_sequencer_api_1_3_3 = { package = "circuit_sequencer_api", git = "https://github.com/matter-labs/era-zkevm_test_harness.git", branch = "v1.3.3" } -circuit_sequencer_api_1_4_0 = { package = "circuit_sequencer_api", git = "https://github.com/matter-labs/era-zkevm_test_harness.git", branch = "v1.4.0" } -circuit_sequencer_api_1_4_1 = { package = "circuit_sequencer_api", git = "https://github.com/matter-labs/era-zkevm_test_harness.git", branch = "v1.4.1" } -circuit_sequencer_api_1_4_2 = { package = "circuit_sequencer_api", git = "https://github.com/matter-labs/era-zkevm_test_harness.git", branch = "v1.4.2" } -circuit_sequencer_api_1_5_0 = { package = "circuit_sequencer_api", git = "https://github.com/matter-labs/era-zkevm_test_harness.git", branch = "v1.5.0" } -crypto_codegen = { package = "codegen", git = "https://github.com/matter-labs/solidity_plonk_verifier.git", branch = "dev" } -kzg = { package = "kzg", git = "https://github.com/matter-labs/era-zkevm_test_harness.git", branch = "v1.5.0" } -vise = { git = "https://github.com/matter-labs/vise.git", version = "0.1.0", rev = "a5bb80c9ce7168663114ee30e794d6dc32159ee4" } -vise-exporter = { git = "https://github.com/matter-labs/vise.git", version = "0.1.0", rev = "a5bb80c9ce7168663114ee30e794d6dc32159ee4" } -zk_evm = { git = "https://github.com/matter-labs/era-zk_evm.git", tag = "v1.3.3-rc2" } -zk_evm_1_3_1 = { package = "zk_evm", git = "https://github.com/matter-labs/era-zk_evm.git", tag = "v1.3.1-rc2" } -zk_evm_1_3_3 = { package = "zk_evm", git = "https://github.com/matter-labs/era-zk_evm.git", tag = "v1.3.3-rc2" } -zk_evm_1_4_0 = { package = "zk_evm", git = "https://github.com/matter-labs/era-zk_evm.git", branch = "v1.4.0" } -zk_evm_1_4_1 = { package = "zk_evm", git = "https://github.com/matter-labs/era-zk_evm.git", branch = "v1.4.1" } -zk_evm_1_5_0 = { package = "zk_evm", git = "https://github.com/matter-labs/era-zk_evm.git", branch = "v1.5.0" } -zksync_concurrency = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "3e6f101ee4124308c4c974caaa259d524549b0c6" } -zksync_consensus_bft = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "3e6f101ee4124308c4c974caaa259d524549b0c6" } -zksync_consensus_crypto = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "3e6f101ee4124308c4c974caaa259d524549b0c6" } -zksync_consensus_executor = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "3e6f101ee4124308c4c974caaa259d524549b0c6" } -zksync_consensus_network = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "3e6f101ee4124308c4c974caaa259d524549b0c6" } -zksync_consensus_roles = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "3e6f101ee4124308c4c974caaa259d524549b0c6" } -zksync_consensus_storage = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "3e6f101ee4124308c4c974caaa259d524549b0c6" } -zksync_consensus_utils = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "3e6f101ee4124308c4c974caaa259d524549b0c6" } -zksync_protobuf = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "3e6f101ee4124308c4c974caaa259d524549b0c6" } -zksync_protobuf_build = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "3e6f101ee4124308c4c974caaa259d524549b0c6" } +vise = "0.1.0" +vise-exporter = "0.1.0" + +circuit_sequencer_api_1_3_3 = { package = "circuit_sequencer_api", version = "=0.133.0" } +circuit_sequencer_api_1_4_0 = { package = "circuit_sequencer_api", version = "=0.140.0" } +circuit_sequencer_api_1_4_1 = { package = "circuit_sequencer_api", version = "=0.141.0" } +circuit_sequencer_api_1_4_2 = { package = "circuit_sequencer_api", version = "=0.142.0" } +circuit_sequencer_api_1_5_0 = { package = "circuit_sequencer_api", version = "=0.150.2-rc.1" } +crypto_codegen = { package = "zksync_solidity_vk_codegen", version = "=0.1.0" } +kzg = { package = "zksync_kzg", version = "=0.150.2-rc.1" } +zk_evm = { version = "=0.133.0" } +zk_evm_1_3_1 = { package = "zk_evm", version = "0.131.0-rc.2" } +zk_evm_1_3_3 = { package = "zk_evm", version = "0.133.0" } +zk_evm_1_4_0 = { package = "zk_evm", version = "0.140.0" } +zk_evm_1_4_1 = { package = "zk_evm", version = "0.141.0" } +zk_evm_1_5_0 = { package = "zk_evm", version = "0.150.0" } + +# Consensus dependencies. +zksync_concurrency = "=0.1.0-rc.4" +zksync_consensus_bft = "=0.1.0-rc.4" +zksync_consensus_crypto = "=0.1.0-rc.4" +zksync_consensus_executor = "=0.1.0-rc.4" +zksync_consensus_network = "=0.1.0-rc.4" +zksync_consensus_roles = "=0.1.0-rc.4" +zksync_consensus_storage = "=0.1.0-rc.4" +zksync_consensus_utils = "=0.1.0-rc.4" +zksync_protobuf = "=0.1.0-rc.4" +zksync_protobuf_build = "=0.1.0-rc.4" # "Local" dependencies -multivm = { path = "core/lib/multivm" } -prometheus_exporter = { path = "core/lib/prometheus_exporter" } -prover_dal = { path = "prover/prover_dal" } -vlog = { path = "core/lib/vlog" } -vm_utils = { path = "core/lib/vm_utils" } -vm-benchmark-harness = { path = "core/tests/vm-benchmark/harness" } -zksync = { path = "sdk/zksync-rs" } -zksync_basic_types = { path = "core/lib/basic_types" } -zksync_circuit_breaker = { path = "core/lib/circuit_breaker" } -zksync_config = { path = "core/lib/config" } -zksync_contract_verifier_lib = { path = "core/lib/contract_verifier" } -zksync_contracts = { path = "core/lib/contracts" } -zksync_core_leftovers = { path = "core/lib/zksync_core_leftovers" } -zksync_crypto = { path = "core/lib/crypto" } -zksync_dal = { path = "core/lib/dal" } -zksync_db_connection = { path = "core/lib/db_connection" } -zksync_env_config = { path = "core/lib/env_config" } -zksync_eth_client = { path = "core/lib/eth_client" } -zksync_eth_signer = { path = "core/lib/eth_signer" } -zksync_health_check = { path = "core/lib/health_check" } -zksync_l1_contract_interface = { path = "core/lib/l1_contract_interface" } -zksync_mempool = { path = "core/lib/mempool" } -zksync_merkle_tree = { path = "core/lib/merkle_tree" } -zksync_mini_merkle_tree = { path = "core/lib/mini_merkle_tree" } -zksync_object_store = { path = "core/lib/object_store" } -zksync_protobuf_config = { path = "core/lib/protobuf_config" } -zksync_prover_interface = { path = "core/lib/prover_interface" } -zksync_queued_job_processor = { path = "core/lib/queued_job_processor" } -zksync_snapshots_applier = { path = "core/lib/snapshots_applier" } -zksync_state = { path = "core/lib/state" } -zksync_storage = { path = "core/lib/storage" } -zksync_system_constants = { path = "core/lib/constants" } -zksync_tee_verifier = { path = "core/lib/tee_verifier" } -zksync_test_account = { path = "core/tests/test_account" } -zksync_types = { path = "core/lib/types" } -zksync_utils = { path = "core/lib/utils" } -zksync_web3_decl = { path = "core/lib/web3_decl" } -zksync_crypto_primitives = { path = "core/lib/crypto_primitives" } +zksync_multivm = { version = "0.1.0", path = "core/lib/multivm" } +zksync_prover_dal = { version = "0.1.0", path = "prover/crates/lib/prover_dal" } +zksync_vlog = { version = "0.1.0", path = "core/lib/vlog" } +zksync_vm_utils = { version = "0.1.0", path = "core/lib/vm_utils" } +zksync_vm_benchmark_harness = { version = "0.1.0", path = "core/tests/vm-benchmark/harness" } +zksync_basic_types = { version = "0.1.0", path = "core/lib/basic_types" } +zksync_circuit_breaker = { version = "0.1.0", path = "core/lib/circuit_breaker" } +zksync_config = { version = "0.1.0", path = "core/lib/config" } +zksync_contract_verifier_lib = { version = "0.1.0", path = "core/lib/contract_verifier" } +zksync_contracts = { version = "0.1.0", path = "core/lib/contracts" } +zksync_core_leftovers = { version = "0.1.0", path = "core/lib/zksync_core_leftovers" } +zksync_dal = { version = "0.1.0", path = "core/lib/dal" } +zksync_db_connection = { version = "0.1.0", path = "core/lib/db_connection" } +zksync_env_config = { version = "0.1.0", path = "core/lib/env_config" } +zksync_eth_client = { version = "0.1.0", path = "core/lib/eth_client" } +zksync_da_client = { version = "0.1.0", path = "core/lib/da_client" } +zksync_default_da_clients = { version = "0.1.0", path = "core/lib/default_da_clients" } +zksync_eth_signer = { version = "0.1.0", path = "core/lib/eth_signer" } +zksync_health_check = { version = "0.1.0", path = "core/lib/health_check" } +zksync_l1_contract_interface = { version = "0.1.0", path = "core/lib/l1_contract_interface" } +zksync_mempool = { version = "0.1.0", path = "core/lib/mempool" } +zksync_merkle_tree = { version = "0.1.0", path = "core/lib/merkle_tree" } +zksync_mini_merkle_tree = { version = "0.1.0", path = "core/lib/mini_merkle_tree" } +zksync_object_store = { version = "0.1.0", path = "core/lib/object_store" } +zksync_protobuf_config = { version = "0.1.0", path = "core/lib/protobuf_config" } +zksync_prover_interface = { version = "0.1.0", path = "core/lib/prover_interface" } +zksync_queued_job_processor = { version = "0.1.0", path = "core/lib/queued_job_processor" } +zksync_snapshots_applier = { version = "0.1.0", path = "core/lib/snapshots_applier" } +zksync_state = { version = "0.1.0", path = "core/lib/state" } +zksync_storage = { version = "0.1.0", path = "core/lib/storage" } +zksync_system_constants = { version = "0.1.0", path = "core/lib/constants" } +zksync_tee_verifier = { version = "0.1.0", path = "core/lib/tee_verifier" } +zksync_test_account = { version = "0.1.0", path = "core/tests/test_account" } +zksync_types = { version = "0.1.0", path = "core/lib/types" } +zksync_utils = { version = "0.1.0", path = "core/lib/utils" } +zksync_web3_decl = { version = "0.1.0", path = "core/lib/web3_decl" } +zksync_crypto_primitives = { version = "0.1.0", path = "core/lib/crypto_primitives" } +zksync_external_price_api = { version = "0.1.0", path = "core/lib/external_price_api" } # Framework and components -zksync_node_framework = { path = "core/node/node_framework" } -zksync_eth_watch = { path = "core/node/eth_watch" } -zksync_shared_metrics = { path = "core/node/shared_metrics" } -zksync_proof_data_handler = { path = "core/node/proof_data_handler" } -zksync_block_reverter = { path = "core/node/block_reverter" } -zksync_commitment_generator = { path = "core/node/commitment_generator" } -zksync_house_keeper = { path = "core/node/house_keeper" } -zksync_node_genesis = { path = "core/node/genesis" } -zksync_eth_sender = { path = "core/node/eth_sender" } -zksync_node_db_pruner = { path = "core/node/db_pruner" } -zksync_node_fee_model = { path = "core/node/fee_model" } -zksync_vm_runner = { path = "core/node/vm_runner" } -zksync_node_test_utils = { path = "core/node/test_utils" } -zksync_state_keeper = { path = "core/node/state_keeper" } -zksync_reorg_detector = { path = "core/node/reorg_detector" } -zksync_consistency_checker = { path = "core/node/consistency_checker" } -zksync_metadata_calculator = { path = "core/node/metadata_calculator" } -zksync_node_sync = { path = "core/node/node_sync" } -zksync_node_consensus = { path = "core/node/consensus" } -zksync_contract_verification_server = { path = "core/node/contract_verification_server" } -zksync_node_api_server = { path = "core/node/api_server" } -zksync_tee_verifier_input_producer = { path = "core/node/tee_verifier_input_producer" } +zksync_node_framework = { version = "0.1.0", path = "core/node/node_framework" } +zksync_node_framework_derive = { version = "0.1.0", path = "core/lib/node_framework_derive" } +zksync_eth_watch = { version = "0.1.0", path = "core/node/eth_watch" } +zksync_shared_metrics = { version = "0.1.0", path = "core/node/shared_metrics" } +zksync_proof_data_handler = { version = "0.1.0", path = "core/node/proof_data_handler" } +zksync_block_reverter = { version = "0.1.0", path = "core/node/block_reverter" } +zksync_commitment_generator = { version = "0.1.0", path = "core/node/commitment_generator" } +zksync_house_keeper = { version = "0.1.0", path = "core/node/house_keeper" } +zksync_node_genesis = { version = "0.1.0", path = "core/node/genesis" } +zksync_da_dispatcher = { version = "0.1.0", path = "core/node/da_dispatcher" } +zksync_eth_sender = { version = "0.1.0", path = "core/node/eth_sender" } +zksync_node_db_pruner = { version = "0.1.0", path = "core/node/db_pruner" } +zksync_node_fee_model = { version = "0.1.0", path = "core/node/fee_model" } +zksync_vm_runner = { version = "0.1.0", path = "core/node/vm_runner" } +zksync_node_test_utils = { version = "0.1.0", path = "core/node/test_utils" } +zksync_state_keeper = { version = "0.1.0", path = "core/node/state_keeper" } +zksync_reorg_detector = { version = "0.1.0", path = "core/node/reorg_detector" } +zksync_consistency_checker = { version = "0.1.0", path = "core/node/consistency_checker" } +zksync_metadata_calculator = { version = "0.1.0", path = "core/node/metadata_calculator" } +zksync_node_sync = { version = "0.1.0", path = "core/node/node_sync" } +zksync_node_storage_init = { version = "0.1.0", path = "core/node/node_storage_init" } +zksync_node_consensus = { version = "0.1.0", path = "core/node/consensus" } +zksync_contract_verification_server = { version = "0.1.0", path = "core/node/contract_verification_server" } +zksync_node_api_server = { version = "0.1.0", path = "core/node/api_server" } +zksync_tee_verifier_input_producer = { version = "0.1.0", path = "core/node/tee_verifier_input_producer" } +zksync_base_token_adjuster = { version = "0.1.0", path = "core/node/base_token_adjuster" } diff --git a/bin/zkt b/bin/zkt new file mode 100755 index 000000000000..337ad5d73953 --- /dev/null +++ b/bin/zkt @@ -0,0 +1,7 @@ +#!/usr/bin/env bash + +cd $(dirname $0) +cd ../zk_toolbox + +cargo install --path ./crates/zk_inception --force +cargo install --path ./crates/zk_supervisor --force diff --git a/chains/era/ZkStack.yaml b/chains/era/ZkStack.yaml index 17b307cac4f6..8dbd49c02c67 100644 --- a/chains/era/ZkStack.yaml +++ b/chains/era/ZkStack.yaml @@ -4,6 +4,7 @@ chain_id: 271 prover_version: NoProofs configs: ./chains/era/configs/ rocks_db_path: ./chains/era/db/ +external_node_config_path: ./chains/era/configs/external_node l1_batch_commit_data_generator_mode: Rollup base_token: address: '0x0000000000000000000000000000000000000001' diff --git a/chains/era/configs/.gitkeep b/chains/era/configs/.gitkeep new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/checks-config/cspell.json b/checks-config/cspell.json deleted file mode 100644 index bafb5e036d04..000000000000 --- a/checks-config/cspell.json +++ /dev/null @@ -1,47 +0,0 @@ -{ - "language": "en", - "ignorePaths": [ - "**/CHANGELOG.md", - "**/node_modules/**", - ".github/**", - ".firebase/**", - ".yarn/**", - "dist/**", - "**/contracts/**", - "**/target/**" - ], - "dictionaries": [ - "typescript", - "cpp", - "npm", - "filetypes", - "cpp", - "en_GB", - "en_US", - "node", - "bash", - "fonts", - "npm", - "cryptocurrencies", - "companies", - "rust", - "html", - "css", - "entities", - "softwareTerms", - "misc", - "fullstack", - "softwareTerms", - "zksync", - "nuxt", - "viem" - ], - "dictionaryDefinitions": [ - { - "name": "zksync", - "addWords": true, - "path": "./era.dic" - } - ], - "allowCompoundWords": true - } \ No newline at end of file diff --git a/checks-config/era.cfg b/checks-config/era.cfg deleted file mode 100644 index c8a6baba820a..000000000000 --- a/checks-config/era.cfg +++ /dev/null @@ -1,69 +0,0 @@ -# Project settings where a Cargo.toml exists and is passed -# ${CARGO_MANIFEST_DIR}/.config/spellcheck.toml - -# Also take into account developer comments -dev_comments = true - -# Skip the README.md file as defined in the cargo manifest -skip_readme = false - -[Hunspell] -# lang and name of `.dic` file -lang = "en_US" -# OS specific additives -# Linux: [ /usr/share/myspell ] -# Windows: [] -# macOS [ /home/alice/Libraries/hunspell, /Libraries/hunspell ] - -# Additional search paths, which take precedence over the default -# os specific search dirs, searched in order, defaults last -search_dirs = ["."] - -# Adds additional dictionaries, can be specified as -# absolute paths or relative in the search dirs (in this order). -# Relative paths are resolved relative to the configuration file -# which is used. -# Refer to `man 5 hunspell` -# or https://www.systutorials.com/docs/linux/man/4-hunspell/#lbAE -# on how to define a custom dictionary file. -extra_dictionaries = ["era.dic"] - -# If set to `true`, the OS specific default search paths -# are skipped and only explicitly specified ones are used. -skip_os_lookups = false - -# Use the builtin dictionaries if none were found in -# in the configured lookup paths. -# Usually combined with `skip_os_lookups=true` -# to enforce the `builtin` usage for consistent -# results across distributions and CI runs. -# Setting this will still use the dictionaries -# specified in `extra_dictionaries = [..]` -# for topic specific lingo. -use_builtin = true - - -[Hunspell.quirks] -# Transforms words that are provided by the tokenizer -# into word fragments based on the capture groups which are to -# be checked. -# If no capture groups are present, the matched word is whitelisted. -transform_regex = ["^'([^\\s])'$", "^[0-9]+x$"] -# Accepts `alphabeta` variants if the checker provides a replacement suggestion -# of `alpha-beta`. -allow_concatenation = true -# And the counterpart, which accepts words with dashes, when the suggestion has -# recommendations without the dashes. This is less common. -allow_dashed = false - -[NlpRules] -# Allows the user to override the default included -# exports of LanguageTool, with other custom -# languages - -# override_rules = "/path/to/rules_binencoded.bin" -# override_tokenizer = "/path/to/tokenizer_binencoded.bin" - -[Reflow] -# Reflows doc comments to adhere to a given maximum line width limit. -max_line_length = 80 diff --git a/checks-config/era.dic b/checks-config/era.dic deleted file mode 100644 index 3f4c8fc8fa4f..000000000000 --- a/checks-config/era.dic +++ /dev/null @@ -1,984 +0,0 @@ -42 -<= -=> -== --> -<- -+ -- -* -\ -= -/ -|| -< -> -% -^ -0x00 -0x01 -0x02 -0x20 -~10x -u32 -u64 -u8 -1B -H256 -10e18 -10^9 -2^32 -2^128 -2^24 -10^32 -10^* -2^16 -2^64 -10^8 -U256 -12.5% -5% -10% -20% -*% -90% -1% -f64 -k -M -kb -Gbps -50M -2M -130µs -– -18kb -128kb -10k -100k -120k -800k -24k -500k -50k -52k -260k -120kb -18kb -12GB -20GB -500B -100M -~100us -10ms -1_000ms -1us -~100 -gwei - -ABI -vlog -const -L2 -L2s -L1 -json -l1 -l2 -G1 -G2 -SystemConfig -TODO -se -ZKSYNC_HOME -MultiVMTracer -vm_virtual_blocks -eth_node -EthCall -BaseSystemContracts -eth_calls -refactor -WS -env -url -GasAdjuster -base_fee -base_fee_per_gas -ERC20 -Finalizer -Backoff -middleware -parallelization -precompute -precomputed -Postgres -parallelized -parallelize -job_id -API -APIs -async -pointwise -observability -atomics -integrations -stdout -GCS -websocket -struct -struct's -localhost -TOML -config -finalizer -boolean -prover -timestamp -H160 -ZKsync -AccessList -miniblock -member₁ -member₂ -memberₙ -merkle -eth -Ethereum -deployer -designator -designators -RPC -tx -txs -subtrees -subtree -unfinalizable -meterer -Timedout -bootloader -bootloader's -testkit -Sepolia -Goerli -miniblock -miniblocks -MempoolIO -mempool -latencies -OracleTools -StorageOracle -zk_evm -zkEVM -src -utils -ptr -recurse -RefCell -Rc -StorageView -VM_HOOK_POSITION -VM_HOOKS_PARAMS_COUNT -PAYMASTER_CONTEXT_SLOTS -PrecompilerProcessor -MAX_POSTOP_SLOTS -postOp -type -opcode -KnownCodesStorage -param -HistoryDisabled -HistoryEnabled -sorted_timestamps -known_bytecodes -returndata -namespaces -natively -StateDiffRecord -BYTES_PER_ENUMERATION_INDEX -derived_key -prefill -reorg -precompile -Init -init -enqueued -stage2 -testnets -ethCalls -generable -Serde -tokenize -EOAs -zeroized -cardinality - -// ZKsync-related words -matterlabs -zkweb -zksync -blockchain -zkscan -zkscrypto -PubSub -loadtest -BigUint -radix -state_keeper -MIN_PAYMASTER_BALANCE -PrometheusCollector -RetryCollector -ScriptCollector -MetricsCollector -OperationResultsCollector -ReportCollector -filesystem -hasher -Hasher -grafana -prometheus -serializer -serializable -deserializer -Deserializes -deserializes -serializing -deserializing -deserialization -configs -operation_number -hashed_key -deduplication -mutexes -mutex -Blake2s -Blake2 -web3 -Testnets -miniblock_number -hashed_key -tuples -\x19Ethereum -libzkscrypto -EOA -MultiVM -nonces -fri -rollup -pubkey -JSON -keccak256 -pubdata -timestamps -keccak -musig -len -calldata -DApp -metadata -boojum -deps -Precalculated -precalculated -WASM -DefaultPrecompilesProcessor -LSB -DDoS -refactored -tuple -HistoryMode -vm -VM -VMs -VM's -MSB -Enum -PublishProof -jsrpc -backends -ethsig -ethop -decentralization -rollups -zkrollup -unencrypted -permissionless -trustlessness -IERC -Schnorr -MuSig -Merkle -decentralised -mainchain -offchain -processed -zcli -blockchains -sidechain -sidechains -tokenomics -validator -validator's -validator -validators -Validators -CHAINID -PREVRANDAO -ECDSA -EIP712 -EIP1559 -EIPs -eth_estimateGas -eth_call -versa -blake2 -AR16MT -Preimages -EN's -SystemContext -StorageOracle -intrinsics -chunked -chunking -deadbeef01 -deadbeef0 -deadbeef -unsynced -computable -DevEx -Workspace -NFT -preimage -subcalls -hashmaps -monotonicity -subquery -RPCs -programmatically -stdin -stderr -Linter -SmallRng -ZkPorter -StateDiffs -HashMaps -encodings -CTPOP -decommitter -Decommitter -Decommitments -Decommitment -decommitment -decommitments -Decommit -decommit -decommits -DecommiterOracle -DecommitmentProcessor -decommitted -decommit -decommitting -Demuxer -demultiplex -recid -inversed -plux -Binop -Arithmetization -arithmetization -nocapture -Plonky -permissioned -mathbb -Invb -REDC -iszero -skept -ECADD -ECMUL -preds -inttoptr -syncvm -nasm -rodata -ISZERO -JUMPI -ethir -ptrtoint -lshr -getu -zext -noprofile -umin -cccond -ccret -prodm -prodl -prodeh -prodh -interm -signv -ashr -noalias -immediates -prode -StorageBatchInfo -CommitBatchInfo -IExecutor -SetChainId -setChainId -SetChainIdUpgrade -state_transition_manager_contract -prunable -bytea - -// Names -Vyper -stimate -samount -Stichting -Kingsfordweg -RSIN -ABDK -Alef -Zcon -Paypal -Numio -MLTT -USDCs -dapi -validiums -validium -Validium -sharded -pepe -Arweave -Streamr -dutterbutter -NixOS -CLI -SQLx -Rustup -nextest -NTFS -toolchains -toolchain -IDE -M1 -M2 -MacOS -OpenSSL -Xcode -LLVM -nvm -LTS -logout -WSL -orchestrator -TypeScript -Cryptographical -cryptographical -microservices -Executables -subcomponents -v2 -v1 -rmSync -SSL -setup_2^26 -uncomment -toml -GCP -dev -workspace -subcommand -Kubernetes -Etherscan -cryptographic -hashers -MacBook -DDR5 -~ - -// Used libraries -numberish -arrayify -hexlify -markdownlint -ethersproject -nomicfoundation -nomiclabs -Consensys -zkforge -zkcast -Eigen -IPFS - -// Used programming language words -printf -charsets -println -fatalf -allowfullscreen -inttypes -zbin -Panicf -Deri -DERI -Furucombo -kwargs -scaleb -isinstance -RocksDB -mload -secp -porco -rosso -insize -MLOAD -sload -sload -uadd -nocallback -nosync -swrite -Devs -insta -NFTF -yaml - -// ETC -gitter -signup -signups -precompiled -checkmark -Vitalik -Buterin -roadmap -majeure -conveniens -reimplementing -subsecond -supermajority -gemeente -unauthorised -Ethereum's -SDKs -EVM's -EVM -Göerli -ETHUSDC -USDCUSD -ETHUS -USDCUS -ETHUSD -Arbitrum -Adamantium -Immunefi -Winternitz -ewasm -Evmla -UUPS -Uups -TLDR -BLAKE2s -bytes32 -enumeration_index -backend -enum -num_initial -to_check_storage -source_storage -prepend -deduplicated -user_l2_to_l1_logs -L1Messeger -params -provers -zk -substring -reverter -wei -deduplicate -testnet -mainnet -performant -opcodes -USDC -USD -DBs -unexecutable -RLP -DAL -ZKsync's -l2_to_l1 -PoW -coinbase -FIXME -ASC -DESC -versioning -initializer -refactoring -prefetch -unformatted - -// crypto events -Edcon - -// Famous crypto people -Gluchowski -Vitalik's -Buterin's -multisignature -onchain -convertion -Keyhash -Armeabi -scijava -gluk -@Deniallugo's -emilluta - -// Programming related words -backfill -bytecode -bytecodes -impl -subrange -timeframe -leaf_count -mkdir -librocksdb -zksolc -zksyncrobot -precompiles -vyper -zkvyper -undol -applyl -Upgradability -Initializable -Hola -mundo -ISTN -Zerion -Maverik -zk_evm_1_3_3 -vk -vks -CORS -verifier -crypto -callee -Subcalls -Vec -vec -vecs -L1Messenger -SystemL2ToL1Log -witness_inputs -StateKeeper -enum_index -virtual_block_start_batch -virtual_block_finish_l2_block -base_token_address -maxFeePerGas -maxPriorityFeePerGas -structs -all_circuit -OversizedData -M5 -eth_sign -geth -reth -ethers -js -recovery_id -&self -ETHSignature -recover_signer -BlockNumber -(de) -{result -DebugCall} -CREATE2 -memtables -memtable -PostgreSQL -OneTx -DefaultTracer -Tx1 -Tx2 -TxN -VmStopped -Unversioned -versioned -l2_block -submodule -enums -deserialized -deserialize -hashmap -vm_m5 -SDK -1M -dir -SSD -getter -Getters -WebSocket -gasLimit -MiBs -MiB -GiB -GiBs -pubsub -\x19Ethereum -nibbles–node -ZkSyncTree -invariants -LEB128 -workflow -L1Batch -runtime -Tokio -Blobstore -S3 -AWS -ExternalIO -ClosedFormInputWrapper -AggregationWrapper -(de)serializer -typesafe -LRU -ns -Q3 -loadnext -args -with_arg -node_aggregation_job -scheduler_job -leaf_aggregation_job -MAX_ATTEMPTs -fsync -TEST_DATABASE_URL -newest_block -block_count -contracts_verification_info -RNG -jsonrpsee -l1_batch -Namespace -ExecutionStatus -VmStarted -reproducibility -CFs -key–value -enum_index_migration_cursor -block_number -initial_writes -errored -FactoryDeps -de -StorageView's -Yul -eth_txs -eth_tx -ExecuteBlock -PublishProofBlocksOnchain -CommitBlocks -entrypoint -gas_limit -TxSender -UX -BasicWitnessInputProducer -eth_tx_history -PENDING_BLOCK -from_block -namespace -PriorityQueue -Görli -Ropsten -Rinkeby -tokio -threadpool -IntrinsicGas -InsufficientFundsForTransfer -ChainId -hyperchains -eth_getLogs -façade -virtual_blocks_per_miniblock -virtual_block_interval -max_overhead -total_gas_limit -cloneable -timestamped -healthcheck -Healthcheck -HealthCheck -readonly -upgrader -startup -BFT -PingCAP -witgen -ok -hacky -ceil -Infura -synth -proto - -AUTOGENERATED -x19Ethereum -block_timestamp -SYSTEM_BLOCK_INFO_BLOCK_NUMBER_MULTIPLIER -MAX_L2_TX_GAS_LIMIT -MAX_TX_ERGS_LIMIT -OneTxTracer -multicall -Multicall's -Multicall3 -proxied -scalers -updatable -instantiation -unexecuted -transactional -benchmarking -virtual_blocks_interval -dal -codebase -compactions -M6 -compiler_common -noop -tokenized -rustc -sqlx -zkevm -Boojum -Sepolia -psql -Cuda -cuda -hdcaa -impls -abda -edaf -unsynchronized -CUDA -gcloud -NVME -OTLP -multiVM -Deduplicator -lobkc -sread -myfunction -merklelization -beaf -subcalls -unallowed -Nuxt -Merklized -satisfiability -demultiplex -precompile -statekeeper -matchers -lifecycle -dedup -deduped -crаsh -protobuf -L1Tx -EIP -DecommittmentProcessor -decommitment -tokenized -Aggregator -DecommittmentProcessor -decommitment -hardcoded -plookup -shivini -EIP4844 -KZG -secp256k1 -vendoring -publickey -keypair -Electrum -healthcheck -healthchecks -after_node_shutdown -runnable -downcasting -parameterized -reimplementation -composability -md5 -shivini -balancer -lookups -stateful -getPubdataPricingMode -Uint -implementors -WIP -oneshot -p2p -StorageProcessor -StorageMarker -SIGINT -opentelemetry -PubdataSendingMode -FriGpuProverArchiver -vm -demuxer -2k -4k -superset -80M -780kb -None -Nones -evm_simulator_code_hash -pubdata_costs -storage_refunds -state_keeper's -witness_generator -arity -recursion_tip -RECURSION_TIP_ARITY -empty_proof -hyperchain -storages -vec -zksync_merkle_tree -TreeMetadata -delegator -decrement -whitelisted -Bbellman -Sbellman -DCMAKE -preloaded -e2e -upcasting -foundryup -uncached -untrimmed -UNNEST -semver -TeeRequestProcessor -l1_batch_number -RequestProcessorError -map_err -proof_inputs -submit_proofs -ready_to_be_proven -privkey diff --git a/checks-config/links.json b/checks-config/links.json deleted file mode 100644 index b18b9608f16f..000000000000 --- a/checks-config/links.json +++ /dev/null @@ -1,32 +0,0 @@ -{ - "ignorePatterns": [ - { - "pattern": "^https://github\\.com/matter-labs/zksync-2-dev/" - }, - { - "pattern": "^https://www\\.notion\\.so/" - }, - { - "pattern": "^https://github\\.com/matter-labs/zksync-era/compare/" - }, - { - "pattern": "^https://twitter\\.com/zksync" - }, - { - "pattern": "^https://twitter\\.com/zkSyncDevs" - }, - { - "pattern": "^https://github\\.com/matter-labs/zk_evm" - }, - { - "pattern": "^https://sepolia\\.etherscan\\.io/tx/0x18c2a113d18c53237a4056403047ff9fafbf772cb83ccd44bb5b607f8108a64c" - }, - { - "pattern": "^https://github\\.com/matter-labs/zksync-era/commit/" - }, - { - "pattern": "^https://github\\.com/matter-labs//era-compiler-llvm" - } - ], - "aliveStatusCodes": [0, 200, 206, 304] -} diff --git a/contracts b/contracts index 8a70bbbc4812..c863a6592293 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit 8a70bbbc48125f5bde6189b4e3c6a3ee79631678 +Subproject commit c863a659229319966c55cf7e66cd6542c6da9899 diff --git a/core/CHANGELOG.md b/core/CHANGELOG.md index 608af4d9b010..d9a944c7efe3 100644 --- a/core/CHANGELOG.md +++ b/core/CHANGELOG.md @@ -1,5 +1,183 @@ # Changelog +## [24.11.0](https://github.com/matter-labs/zksync-era/compare/core-v24.10.0...core-v24.11.0) (2024-07-23) + + +### Features + +* add revert tests (external node) to zk_toolbox ([#2408](https://github.com/matter-labs/zksync-era/issues/2408)) ([3fbbee1](https://github.com/matter-labs/zksync-era/commit/3fbbee10be99e8c5a696bfd50d81230141bccbf4)) +* add state override for gas estimates ([#1358](https://github.com/matter-labs/zksync-era/issues/1358)) ([761bda1](https://github.com/matter-labs/zksync-era/commit/761bda19844fb3935f8a57c47df39010f88ef9dc)) +* added consensus_config to general config ([#2462](https://github.com/matter-labs/zksync-era/issues/2462)) ([c5650a4](https://github.com/matter-labs/zksync-era/commit/c5650a4f1747f59d7a2d4e1986a91ae3fa7d75b0)) +* added key generation command to EN ([#2461](https://github.com/matter-labs/zksync-era/issues/2461)) ([9861415](https://github.com/matter-labs/zksync-era/commit/986141562646c4d96dca205593e48e4d8df46fba)) +* remove leftovers after BWIP ([#2456](https://github.com/matter-labs/zksync-era/issues/2456)) ([990676c](https://github.com/matter-labs/zksync-era/commit/990676c5f84afd2ff8cd337f495c82e8d1f305a4)) + +## [24.10.0](https://github.com/matter-labs/zksync-era/compare/core-v24.9.0...core-v24.10.0) (2024-07-22) + + +### Features + +* Add blob size metrics ([#2411](https://github.com/matter-labs/zksync-era/issues/2411)) ([41c535a](https://github.com/matter-labs/zksync-era/commit/41c535af2bcc72000116277d5dd9e04b5c0b2372)) +* **en:** Switch EN to use node framework ([#2427](https://github.com/matter-labs/zksync-era/issues/2427)) ([0cee530](https://github.com/matter-labs/zksync-era/commit/0cee530b2f2e8304b7e20a093a32abe116463b57)) +* **eth-sender:** add early return in sending new transactions to not spam logs with errors ([#2425](https://github.com/matter-labs/zksync-era/issues/2425)) ([192f2a3](https://github.com/matter-labs/zksync-era/commit/192f2a374d83eaecb52f198fdcfa615262378530)) +* **eth-watch:** Integrate decentralized upgrades ([#2401](https://github.com/matter-labs/zksync-era/issues/2401)) ([5a48e10](https://github.com/matter-labs/zksync-era/commit/5a48e1026260024c6ae2b4d1100ee9b798a83e8d)) +* L1 batch signing (BFT-474) ([#2414](https://github.com/matter-labs/zksync-era/issues/2414)) ([ab699db](https://github.com/matter-labs/zksync-era/commit/ab699dbe8cffa8bd291d6054579061b47fd4aa0e)) +* **prover:** Make it possible to run prover out of GCP ([#2448](https://github.com/matter-labs/zksync-era/issues/2448)) ([c9da549](https://github.com/matter-labs/zksync-era/commit/c9da5497e2aa9d85f204ab7b74fefcfe941793ff)) +* **zk_toolbox:** Small adjustment for zk toolbox ([#2424](https://github.com/matter-labs/zksync-era/issues/2424)) ([ce43c42](https://github.com/matter-labs/zksync-era/commit/ce43c422fddccfe88c07ee22a2b8726dd0bd5f61)) + + +### Bug Fixes + +* **eth-sender:** add bump of min 10% when resending txs to avoid "replacement transaction underpriced" ([#2422](https://github.com/matter-labs/zksync-era/issues/2422)) ([a7bcf5d](https://github.com/matter-labs/zksync-era/commit/a7bcf5d7f75eb45384312d7c97f25a50a91e7a31)) +* Set attesters in Connection::adjust_genesis (BFT-489) ([#2429](https://github.com/matter-labs/zksync-era/issues/2429)) ([ca4cb3c](https://github.com/matter-labs/zksync-era/commit/ca4cb3cba04757dc1760397c667a838931cd2d11)) + +## [24.9.0](https://github.com/matter-labs/zksync-era/compare/core-v24.8.0...core-v24.9.0) (2024-07-10) + + +### Features + +* add block timestamp to `eth_getLogs` ([#2374](https://github.com/matter-labs/zksync-era/issues/2374)) ([50422b8](https://github.com/matter-labs/zksync-era/commit/50422b897d2b0fdbb82f1c4cdb97c1a39ace02c7)) +* add revert tests to zk_toolbox ([#2317](https://github.com/matter-labs/zksync-era/issues/2317)) ([c9ad002](https://github.com/matter-labs/zksync-era/commit/c9ad002d17ed91d1e5f225e19698c12cb3adc665)) +* add zksync_tee_prover and container to nix ([#2403](https://github.com/matter-labs/zksync-era/issues/2403)) ([e0975db](https://github.com/matter-labs/zksync-era/commit/e0975db317ae7934ce47b5267790b696fc9a1113)) +* Adding unstable RPC endpoint to return the execution_info ([#2332](https://github.com/matter-labs/zksync-era/issues/2332)) ([3d047ea](https://github.com/matter-labs/zksync-era/commit/3d047ea953d6fed4d0463fce60f743086f4a13b9)) +* **api:** Retry `read_value` ([#2352](https://github.com/matter-labs/zksync-era/issues/2352)) ([256a43c](https://github.com/matter-labs/zksync-era/commit/256a43cdd01619b89e348419bc361454ba4fdabb)) +* Base Token Fundamentals ([#2204](https://github.com/matter-labs/zksync-era/issues/2204)) ([39709f5](https://github.com/matter-labs/zksync-era/commit/39709f58071ac77bfd447145e1c3342b7da70560)) +* **base-token:** Base token price ratio cache update frequency configurable ([#2388](https://github.com/matter-labs/zksync-era/issues/2388)) ([fb4d700](https://github.com/matter-labs/zksync-era/commit/fb4d7008db919281f7a328c0baaaa5b93c5166c1)) +* BWIP ([#2258](https://github.com/matter-labs/zksync-era/issues/2258)) ([75bdfcc](https://github.com/matter-labs/zksync-era/commit/75bdfcc0ef4a99d93ac152db12a59ef2b2af0d27)) +* **config:** Make getaway_url optional ([#2412](https://github.com/matter-labs/zksync-era/issues/2412)) ([200bc82](https://github.com/matter-labs/zksync-era/commit/200bc825032b18ad9d8f3f49d4eb7cb0e1b5b645)) +* consensus support for pruning (BFT-473) ([#2334](https://github.com/matter-labs/zksync-era/issues/2334)) ([abc4256](https://github.com/matter-labs/zksync-era/commit/abc4256570b899e2b47ed8362e69ae0150247490)) +* **contract-verifier:** Add file based config for contract verifier ([#2415](https://github.com/matter-labs/zksync-era/issues/2415)) ([f4410e3](https://github.com/matter-labs/zksync-era/commit/f4410e3254dafdfe400e1c2c420f664ba951e2cd)) +* **en:** file based configs for en ([#2110](https://github.com/matter-labs/zksync-era/issues/2110)) ([7940fa3](https://github.com/matter-labs/zksync-era/commit/7940fa32a27ee4de43753c7083f92ca8c2ebe86b)) +* **en:** Unify snapshot recovery and recovery from L1 ([#2256](https://github.com/matter-labs/zksync-era/issues/2256)) ([e03a929](https://github.com/matter-labs/zksync-era/commit/e03a9293852288b36d23f5ccbc784876435dd18d)) +* **eth-sender:** Add transient ethereum gateway errors metric ([#2323](https://github.com/matter-labs/zksync-era/issues/2323)) ([287958d](https://github.com/matter-labs/zksync-era/commit/287958db6ca54959fd56c04d4a7a3cbfc9baa877)) +* **eth-sender:** handle transactions for different operators separately to increase throughtput ([#2341](https://github.com/matter-labs/zksync-era/issues/2341)) ([0619ecc](https://github.com/matter-labs/zksync-era/commit/0619eccc335311298bfc0c75f0a4bf8562db759e)) +* **eth-sender:** separate gas calculations for blobs transactions ([#2247](https://github.com/matter-labs/zksync-era/issues/2247)) ([627aab9](https://github.com/matter-labs/zksync-era/commit/627aab9703c47795247f8b6d21533520498ed025)) +* **gas_adjuster:** Use eth_feeHistory for both base fee and blobs ([#2322](https://github.com/matter-labs/zksync-era/issues/2322)) ([9985c26](https://github.com/matter-labs/zksync-era/commit/9985c2659177656788a1f6143120eafccfccdae9)) +* L1 batch QC database (BFT-476) ([#2340](https://github.com/matter-labs/zksync-era/issues/2340)) ([5886b8d](https://github.com/matter-labs/zksync-era/commit/5886b8df304ded15104ec228e0477bc5f44b7fbe)) +* **metadata-calculator:** option to use VM runner for protective reads ([#2318](https://github.com/matter-labs/zksync-era/issues/2318)) ([c147b0c](https://github.com/matter-labs/zksync-era/commit/c147b0c68e6e1db5bd658c4f7a591bf3cddb9417)) +* Minimal External API Fetcher ([#2383](https://github.com/matter-labs/zksync-era/issues/2383)) ([9f255c0](https://github.com/matter-labs/zksync-era/commit/9f255c073cfdab60832fcf9a6d3a4a9258641ef3)) +* **node_framework:** Document implementations ([#2319](https://github.com/matter-labs/zksync-era/issues/2319)) ([7b3877f](https://github.com/matter-labs/zksync-era/commit/7b3877fd35b5c894fbe18666953eace8910dba0c)) +* **node_framework:** Implement FromContext and IntoContext derive macro ([#2330](https://github.com/matter-labs/zksync-era/issues/2330)) ([34f2a45](https://github.com/matter-labs/zksync-era/commit/34f2a45e073052519697f41f264d05fa187ea678)) +* **node_framework:** Support shutdown hooks + more ([#2293](https://github.com/matter-labs/zksync-era/issues/2293)) ([2b2c790](https://github.com/matter-labs/zksync-era/commit/2b2c790b64beb59a885ce785ab01d5c1bd089c43)) +* **node_framework:** Unify Task types + misc improvements ([#2325](https://github.com/matter-labs/zksync-era/issues/2325)) ([298a97e](https://github.com/matter-labs/zksync-era/commit/298a97e800b4c156628050789de7a490a7565d60)) +* **node-framework:** New wiring interface ([#2384](https://github.com/matter-labs/zksync-era/issues/2384)) ([f2f4056](https://github.com/matter-labs/zksync-era/commit/f2f405669ec9f6edd3f2d5e5c1248582c5962ae8)) +* **prover:** Add prometheus port to witness generator config ([#2385](https://github.com/matter-labs/zksync-era/issues/2385)) ([d0e1add](https://github.com/matter-labs/zksync-era/commit/d0e1addfccf6b5d3b21facd6bb74455f098f0177)) +* **prover:** Add prover_cli stats command ([#2362](https://github.com/matter-labs/zksync-era/issues/2362)) ([fe65319](https://github.com/matter-labs/zksync-era/commit/fe65319da0f26ca45e95f067c1e8b97cf7874c45)) +* **snapshots_applier:** Add a method to check whether snapshot recovery is done ([#2338](https://github.com/matter-labs/zksync-era/issues/2338)) ([610a7cf](https://github.com/matter-labs/zksync-era/commit/610a7cf037c6c655564deffebbf5a3fe5533783b)) +* Switch to using crates.io deps ([#2409](https://github.com/matter-labs/zksync-era/issues/2409)) ([27fabaf](https://github.com/matter-labs/zksync-era/commit/27fabafbec66bf4cb65c4fa9e3fab4c3c981d0f2)) +* **tee:** add Prometheus metrics to the TEE Prover ([#2386](https://github.com/matter-labs/zksync-era/issues/2386)) ([6153e99](https://github.com/matter-labs/zksync-era/commit/6153e9956065bfb04b94cc909315a6f1b6fdd364)) +* **tee:** TEE Prover Gateway ([#2333](https://github.com/matter-labs/zksync-era/issues/2333)) ([f8df34d](https://github.com/matter-labs/zksync-era/commit/f8df34d9bff5e165fe40d4f67afa582a84038303)) +* Unify and port node storage initialization ([#2363](https://github.com/matter-labs/zksync-era/issues/2363)) ([8ea9791](https://github.com/matter-labs/zksync-era/commit/8ea979171e56af20c779e08fb2c55be30f655149)) +* Validium with DA ([#2010](https://github.com/matter-labs/zksync-era/issues/2010)) ([fe03d0e](https://github.com/matter-labs/zksync-era/commit/fe03d0e254a98fea60ecb7485a7de9e7fdecaee1)) +* **vm-runner:** make vm runner report time taken ([#2369](https://github.com/matter-labs/zksync-era/issues/2369)) ([275a333](https://github.com/matter-labs/zksync-era/commit/275a3337840c6722c2cd16241c785ff507da4521)) +* **zk toolbox:** External node support ([#2287](https://github.com/matter-labs/zksync-era/issues/2287)) ([6384cad](https://github.com/matter-labs/zksync-era/commit/6384cad26aead4d1bdbb606a97d623dacebf912c)) +* **zk_toolbox:** Add prover init command ([#2298](https://github.com/matter-labs/zksync-era/issues/2298)) ([159af3c](https://github.com/matter-labs/zksync-era/commit/159af3c54cc9beb742b2ab43ce3b89b14c8368b7)) + + +### Bug Fixes + +* **api:** fix log timestamp format ([#2407](https://github.com/matter-labs/zksync-era/issues/2407)) ([e9d63db](https://github.com/matter-labs/zksync-era/commit/e9d63dbe357a07fb07c7d35389b99e7b1ae47402)) +* BWIP race condition ([#2405](https://github.com/matter-labs/zksync-era/issues/2405)) ([8099ab0](https://github.com/matter-labs/zksync-era/commit/8099ab0b77da3168a4184611adecb98a7d32fbaa)) +* **config:** Implement proper tests ([#2381](https://github.com/matter-labs/zksync-era/issues/2381)) ([2ec494b](https://github.com/matter-labs/zksync-era/commit/2ec494bf6917bbce8a6e4e0c61ad77bf006815ec)) +* **db:** Fix / extend transaction isolation levels ([#2350](https://github.com/matter-labs/zksync-era/issues/2350)) ([404ceb9](https://github.com/matter-labs/zksync-era/commit/404ceb91e9a179c269baed4d218261aae48a8061)) +* **en:** Fix panics when queuing sync actions during shutdown ([d5935c7](https://github.com/matter-labs/zksync-era/commit/d5935c77b1496f24b829fe8e7f1c019ec6848db0)) +* **erc20-test:** only approving baseToken allowance when needed ([#2379](https://github.com/matter-labs/zksync-era/issues/2379)) ([087a3c4](https://github.com/matter-labs/zksync-era/commit/087a3c4d01992c2173eb35ada24c63f290ef6140)) +* **eth-sender:** confirm eth-txs in order of their creation ([#2310](https://github.com/matter-labs/zksync-era/issues/2310)) ([31a1a04](https://github.com/matter-labs/zksync-era/commit/31a1a04183c213cf1270e1487e05d6f9548c0afd)) +* **eth-sender:** fix query returning inflight txs ([#2404](https://github.com/matter-labs/zksync-era/issues/2404)) ([6a89ca0](https://github.com/matter-labs/zksync-era/commit/6a89ca077c02c1d1bba511409d4e4196642205a6)) +* **eth-sender:** missing fix in second query calculating txs unsent txs ([#2406](https://github.com/matter-labs/zksync-era/issues/2406)) ([948b532](https://github.com/matter-labs/zksync-era/commit/948b532ff4c94a80689e7906791d03cef64e3804)) +* **eth-sender:** revert commit changing which type of txs we resend first ([#2327](https://github.com/matter-labs/zksync-era/issues/2327)) ([ef75292](https://github.com/matter-labs/zksync-era/commit/ef752926691d768ea412d0fdc78f43a62f16cd15)) +* Fix rustls setup for jsonrpsee clients ([#2417](https://github.com/matter-labs/zksync-era/issues/2417)) ([a040f09](https://github.com/matter-labs/zksync-era/commit/a040f099cd9863d47d49cbdb3360e53a82e0423e)) +* **merkle-tree:** Change `LazyAsyncTreeReader::wait()` signature ([#2314](https://github.com/matter-labs/zksync-era/issues/2314)) ([408393c](https://github.com/matter-labs/zksync-era/commit/408393c7d8ceee0ae95cbc1f2b24a3375e345e97)) +* **merkle-tree:** Fix chunk recovery reporting during tree recovery ([#2348](https://github.com/matter-labs/zksync-era/issues/2348)) ([70b3a8a](https://github.com/matter-labs/zksync-era/commit/70b3a8aea33820d5bf932b608c9e68ecc2915d4c)) +* **merkle-tree:** Fix connection timeouts during tree pruning ([#2372](https://github.com/matter-labs/zksync-era/issues/2372)) ([d5935c7](https://github.com/matter-labs/zksync-era/commit/d5935c77b1496f24b829fe8e7f1c019ec6848db0)) +* **object-store:** Consider some token source errors transient ([#2331](https://github.com/matter-labs/zksync-era/issues/2331)) ([85386d3](https://github.com/matter-labs/zksync-era/commit/85386d314a934b7eaa0bf2707f6d5af039e93340)) +* **tee:** Introduce a 1 second delay in the batch poll ([#2398](https://github.com/matter-labs/zksync-era/issues/2398)) ([312defe](https://github.com/matter-labs/zksync-era/commit/312defed86fbbbc1dfee489be373af1417ee624a)) +* **vm-runner:** change `processing_started_at` column type to `timestamp` ([#2397](https://github.com/matter-labs/zksync-era/issues/2397)) ([4221155](https://github.com/matter-labs/zksync-era/commit/4221155d7f7467a1a8d57c4cbb8f1d9de3bac9e3)) + + +### Reverts + +* "refactor: Rename consensus tasks and split storage (BFT-476)" ([#2364](https://github.com/matter-labs/zksync-era/issues/2364)) ([e67ec5d](https://github.com/matter-labs/zksync-era/commit/e67ec5de15d01a0edce741efd6f5fe126ce76290)) + +## [24.8.0](https://github.com/matter-labs/zksync-era/compare/core-v24.7.0...core-v24.8.0) (2024-06-24) + + +### ⚠ BREAKING CHANGES + +* updated boojum and nightly rust compiler ([#2126](https://github.com/matter-labs/zksync-era/issues/2126)) + +### Features + +* Add metrics for transaction execution result in state keeper ([#2021](https://github.com/matter-labs/zksync-era/issues/2021)) ([dde0fc4](https://github.com/matter-labs/zksync-era/commit/dde0fc4b469474525fd5e4fd1594c3710d6d91f5)) +* **api:** Add new `l1_committed` block tag ([#2282](https://github.com/matter-labs/zksync-era/issues/2282)) ([d5e8e9b](https://github.com/matter-labs/zksync-era/commit/d5e8e9bc66ff38b828730b62d8a7b8794cb1758a)) +* **api:** Rework zks_getProtocolVersion ([#2146](https://github.com/matter-labs/zksync-era/issues/2146)) ([800b8f4](https://github.com/matter-labs/zksync-era/commit/800b8f456282685e81d3423ba3e27d017db2f183)) +* change `zkSync` occurences to `ZKsync` ([#2227](https://github.com/matter-labs/zksync-era/issues/2227)) ([0b4104d](https://github.com/matter-labs/zksync-era/commit/0b4104dbb996ec6333619ea05f3a99e6d4f3b8fa)) +* **contract-verifier:** Adjust contract verifier for zksolc 1.5.0 ([#2255](https://github.com/matter-labs/zksync-era/issues/2255)) ([63efb2e](https://github.com/matter-labs/zksync-era/commit/63efb2e530d8b1445bdd58537d6f0cdb5593cd75)) +* **docs:** Add documentation for subset of wiring layer implementations, used by Main node ([#2292](https://github.com/matter-labs/zksync-era/issues/2292)) ([06c287b](https://github.com/matter-labs/zksync-era/commit/06c287b630707843fd92cb88f899a8fd1dcc7147)) +* **docs:** Pruning and Snapshots recovery basic docs ([#2265](https://github.com/matter-labs/zksync-era/issues/2265)) ([619a525](https://github.com/matter-labs/zksync-era/commit/619a525bc8f1098297259ddb296b4b5dee223944)) +* **en:** Allow recovery from specific snapshot ([#2137](https://github.com/matter-labs/zksync-era/issues/2137)) ([ac61fed](https://github.com/matter-labs/zksync-era/commit/ac61fedb5756ed700e35f231a364b9c933423ab8)) +* **eth-sender:** fix for missing eth_txs_history entries ([#2236](https://github.com/matter-labs/zksync-era/issues/2236)) ([f05b0ae](https://github.com/matter-labs/zksync-era/commit/f05b0aefbb04ce715431bf039b8760e95f87dc93)) +* Expose fair_pubdata_price for blocks and batches ([#2244](https://github.com/matter-labs/zksync-era/issues/2244)) ([0d51cd6](https://github.com/matter-labs/zksync-era/commit/0d51cd6f3e65eef1bda981fe96f3026d8e12156d)) +* **merkle-tree:** Rework tree rollback ([#2207](https://github.com/matter-labs/zksync-era/issues/2207)) ([c3b9c38](https://github.com/matter-labs/zksync-era/commit/c3b9c38ca07f01e6f7b2d7e631b2b811cacecf3a)) +* **node-framework:** Add Main Node Client layer ([#2132](https://github.com/matter-labs/zksync-era/issues/2132)) ([927d842](https://github.com/matter-labs/zksync-era/commit/927d8427e05b6d1a3aa9a63ee8e0db4fb1b82094)) +* **node:** Move some stuff around ([#2151](https://github.com/matter-labs/zksync-era/issues/2151)) ([bad5a6c](https://github.com/matter-labs/zksync-era/commit/bad5a6c0ec2e166235418a2796b6ccf6f8b3b05f)) +* **node:** Port (most of) Node to the Node Framework ([#2196](https://github.com/matter-labs/zksync-era/issues/2196)) ([7842bc4](https://github.com/matter-labs/zksync-era/commit/7842bc4842c5c92437639105d8edac5f775ad0e6)) +* **object-store:** Allow caching object store objects locally ([#2153](https://github.com/matter-labs/zksync-era/issues/2153)) ([6c6e65c](https://github.com/matter-labs/zksync-era/commit/6c6e65ce646bcb4ed9ba8b2dd6be676bb6e66324)) +* **proof_data_handler:** add new endpoints to the TEE prover interface API ([#1993](https://github.com/matter-labs/zksync-era/issues/1993)) ([eca98cc](https://github.com/matter-labs/zksync-era/commit/eca98cceeb74a979040279caaf1d05d1fdf1b90c)) +* **prover:** Add file based config for fri prover gateway ([#2150](https://github.com/matter-labs/zksync-era/issues/2150)) ([81ffc6a](https://github.com/matter-labs/zksync-era/commit/81ffc6a753fb72747c01ddc8a37211bf6a8a1a27)) +* Remove initialize_components function ([#2284](https://github.com/matter-labs/zksync-era/issues/2284)) ([0a38891](https://github.com/matter-labs/zksync-era/commit/0a388911914bfcf58785e394db9d5ddce3afdef0)) +* **state-keeper:** Add metric for l2 block seal reason ([#2229](https://github.com/matter-labs/zksync-era/issues/2229)) ([f967e6d](https://github.com/matter-labs/zksync-era/commit/f967e6d20bb7f9192af08e5040c58af97585862d)) +* **state-keeper:** More state keeper metrics ([#2224](https://github.com/matter-labs/zksync-era/issues/2224)) ([1e48cd9](https://github.com/matter-labs/zksync-era/commit/1e48cd99a0e5ea8bedff91135938dbbb70141d43)) +* **sync-layer:** adapt MiniMerkleTree to manage priority queue ([#2068](https://github.com/matter-labs/zksync-era/issues/2068)) ([3e72364](https://github.com/matter-labs/zksync-era/commit/3e7236494e346324fe1254038632ee005e0083e5)) +* **tee_verifier_input_producer:** use `FactoryDepsDal::get_factory_deps() ([#2271](https://github.com/matter-labs/zksync-era/issues/2271)) ([2c0a00a](https://github.com/matter-labs/zksync-era/commit/2c0a00add179cc4ed521bbb9d616b8828f0ad3c1)) +* **toolbox:** add zk_toolbox ci ([#1985](https://github.com/matter-labs/zksync-era/issues/1985)) ([4ab4922](https://github.com/matter-labs/zksync-era/commit/4ab492201a1654a254c0b14a382a2cb67e3cb9e5)) +* updated boojum and nightly rust compiler ([#2126](https://github.com/matter-labs/zksync-era/issues/2126)) ([9e39f13](https://github.com/matter-labs/zksync-era/commit/9e39f13c29788e66645ea57f623555c4b36b8aff)) +* upgraded encoding of transactions in consensus Payload. ([#2245](https://github.com/matter-labs/zksync-era/issues/2245)) ([cb6a6c8](https://github.com/matter-labs/zksync-era/commit/cb6a6c88de54806d0f4ae4af7ea873a911605780)) +* Use info log level for crates named zksync_* by default ([#2296](https://github.com/matter-labs/zksync-era/issues/2296)) ([9303142](https://github.com/matter-labs/zksync-era/commit/9303142de5e6af3da69fa836a7e537287bdde4b0)) +* verification of L1Batch witness (BFT-471) - attempt 2 ([#2232](https://github.com/matter-labs/zksync-era/issues/2232)) ([dbcf3c6](https://github.com/matter-labs/zksync-era/commit/dbcf3c6d02a6bfb9197bf4278f296632b0fd7d66)) +* verification of L1Batch witness (BFT-471) ([#2019](https://github.com/matter-labs/zksync-era/issues/2019)) ([6cc5455](https://github.com/matter-labs/zksync-era/commit/6cc54555972804be4cd2ca118f0e425c490fbfca)) +* **vm-runner:** add basic metrics ([#2203](https://github.com/matter-labs/zksync-era/issues/2203)) ([dd154f3](https://github.com/matter-labs/zksync-era/commit/dd154f388c23ff67068a1053fec878e80ba9bd17)) +* **vm-runner:** add protective reads persistence flag for state keeper ([#2307](https://github.com/matter-labs/zksync-era/issues/2307)) ([36d2eb6](https://github.com/matter-labs/zksync-era/commit/36d2eb651a583293a5103dc990813e74e8532f52)) +* **vm-runner:** shadow protective reads using VM runner ([#2017](https://github.com/matter-labs/zksync-era/issues/2017)) ([1402dd0](https://github.com/matter-labs/zksync-era/commit/1402dd054e3248de55bcc6899bb58a2cfe900473)) + + +### Bug Fixes + +* **api:** Fix getting pending block ([#2186](https://github.com/matter-labs/zksync-era/issues/2186)) ([93315ba](https://github.com/matter-labs/zksync-era/commit/93315ba95c54bd0730c964998bfc0c64080b3c04)) +* **api:** Fix transaction methods for pruned transactions ([#2168](https://github.com/matter-labs/zksync-era/issues/2168)) ([00c4cca](https://github.com/matter-labs/zksync-era/commit/00c4cca1635e6cd17bbc74e7841f47ead7f8e445)) +* **config:** Fix object store ([#2183](https://github.com/matter-labs/zksync-era/issues/2183)) ([551cdc2](https://github.com/matter-labs/zksync-era/commit/551cdc2da38dbd2ca1f07e9a49f9f2745f21556a)) +* **config:** Split object stores ([#2187](https://github.com/matter-labs/zksync-era/issues/2187)) ([9bcdabc](https://github.com/matter-labs/zksync-era/commit/9bcdabcaa8462ae19da1688052a7a78fa4108298)) +* **db:** Fix `insert_proof_generation_details()` ([#2291](https://github.com/matter-labs/zksync-era/issues/2291)) ([c2412cf](https://github.com/matter-labs/zksync-era/commit/c2412cf2421448c706a08e3c8fda3b0af6aac497)) +* **db:** Optimize `get_l2_blocks_to_execute_for_l1_batch` ([#2199](https://github.com/matter-labs/zksync-era/issues/2199)) ([06ec5f3](https://github.com/matter-labs/zksync-era/commit/06ec5f3e6bb66025a3ec1e5b4d314c7ff1e116c7)) +* **en:** Fix reorg detection in presence of tree data fetcher ([#2197](https://github.com/matter-labs/zksync-era/issues/2197)) ([20da566](https://github.com/matter-labs/zksync-era/commit/20da5668a42a11cc0ea07f9d1a5d5c39e32ce3b4)) +* **en:** Fix transient error detection in consistency checker ([#2140](https://github.com/matter-labs/zksync-era/issues/2140)) ([38fdfe0](https://github.com/matter-labs/zksync-era/commit/38fdfe083f61f5aad11b5a0efb41215c674f3186)) +* **en:** Remove L1 client health check ([#2136](https://github.com/matter-labs/zksync-era/issues/2136)) ([49198f6](https://github.com/matter-labs/zksync-era/commit/49198f695a93d24a5e2d37a24b2c5e1b6c70b9c5)) +* **eth-sender:** Don't resend already sent transactions in the same block ([#2208](https://github.com/matter-labs/zksync-era/issues/2208)) ([3538e9c](https://github.com/matter-labs/zksync-era/commit/3538e9c346ef7bacf62fd76874d41548a4be46ea)) +* **eth-sender:** etter error handling in eth-sender ([#2163](https://github.com/matter-labs/zksync-era/issues/2163)) ([0cad504](https://github.com/matter-labs/zksync-era/commit/0cad504b1c40399a24b604c3454ae4ab98550ad6)) +* **node_framework:** Run gas adjuster task only if necessary ([#2266](https://github.com/matter-labs/zksync-era/issues/2266)) ([2dac846](https://github.com/matter-labs/zksync-era/commit/2dac8463376b5ca7cb3aeefab83b9220f3b2466a)) +* **object-store:** Consider more GCS errors transient ([#2246](https://github.com/matter-labs/zksync-era/issues/2246)) ([2f6cd41](https://github.com/matter-labs/zksync-era/commit/2f6cd41642d9c2680f17e5c1adf22ad8e1b0288a)) +* **prover_cli:** Remove outdated fix for circuit id in node wg ([#2248](https://github.com/matter-labs/zksync-era/issues/2248)) ([db8e71b](https://github.com/matter-labs/zksync-era/commit/db8e71b55393b3d0e419886b62712b61305ac030)) +* **prover:** Disallow state changes from successful ([#2233](https://github.com/matter-labs/zksync-era/issues/2233)) ([2488a76](https://github.com/matter-labs/zksync-era/commit/2488a767a362ea3b40a348ae9822bed77d4b8de9)) +* **pruning:** Check pruning in metadata calculator ([#2286](https://github.com/matter-labs/zksync-era/issues/2286)) ([7bd8f27](https://github.com/matter-labs/zksync-era/commit/7bd8f27e5171f37da3aa1d6c6abb06b9a291fbbf)) +* Treat 502s and 503s as transient for GCS OS ([#2202](https://github.com/matter-labs/zksync-era/issues/2202)) ([0a12c52](https://github.com/matter-labs/zksync-era/commit/0a12c5224b0b6b6d937311e6d6d81c26b03b1d9d)) +* **vm-runner:** add config value for the first processed batch ([#2158](https://github.com/matter-labs/zksync-era/issues/2158)) ([f666717](https://github.com/matter-labs/zksync-era/commit/f666717e01beb90ff878d1cdf060284b27faf680)) +* **vm-runner:** make `last_ready_batch` account for `first_processed_batch` ([#2238](https://github.com/matter-labs/zksync-era/issues/2238)) ([3889794](https://github.com/matter-labs/zksync-era/commit/38897947439db539920d97f2318b2133ddc40284)) +* **vm:** fix insertion to `decommitted_code_hashes` ([#2275](https://github.com/matter-labs/zksync-era/issues/2275)) ([15bb71e](https://github.com/matter-labs/zksync-era/commit/15bb71ec3ee66796e62cb7e61dec6e496e1f2774)) +* **vm:** Update `decommitted_code_hashes` in `prepare_to_decommit` ([#2253](https://github.com/matter-labs/zksync-era/issues/2253)) ([6c49a50](https://github.com/matter-labs/zksync-era/commit/6c49a50eb4374a06143e5bac130d0e0e74347597)) + + +### Performance Improvements + +* **db:** Improve storage switching for state keeper cache ([#2234](https://github.com/matter-labs/zksync-era/issues/2234)) ([7c8e24c](https://github.com/matter-labs/zksync-era/commit/7c8e24ce7d6e6d47359d5ae4ab1db4ddbd3e9441)) +* **db:** Try yet another storage log pruning approach ([#2268](https://github.com/matter-labs/zksync-era/issues/2268)) ([3ee34be](https://github.com/matter-labs/zksync-era/commit/3ee34be7e48fb4b7c5030a6422a0a9f8a8ebc35b)) +* **en:** Parallelize persistence and chunk processing during tree recovery ([#2050](https://github.com/matter-labs/zksync-era/issues/2050)) ([b08a667](https://github.com/matter-labs/zksync-era/commit/b08a667c819f8b3d222c237fc4447be6b75d334e)) +* **pruning:** Use more efficient query to delete past storage logs ([#2179](https://github.com/matter-labs/zksync-era/issues/2179)) ([4c18755](https://github.com/matter-labs/zksync-era/commit/4c18755876a42ee81840cadb365b3040194d0ae3)) + + +### Reverts + +* **pruning:** Revert pruning query ([#2220](https://github.com/matter-labs/zksync-era/issues/2220)) ([8427cdd](https://github.com/matter-labs/zksync-era/commit/8427cddcbd5ba13388e5b96fb988128f8dabe0f4)) +* verification of L1Batch witness (BFT-471) ([#2230](https://github.com/matter-labs/zksync-era/issues/2230)) ([227e101](https://github.com/matter-labs/zksync-era/commit/227e10180396fbb54a2e99cab775f13bc93745f3)) + ## [24.7.0](https://github.com/matter-labs/zksync-era/compare/core-v24.6.0...core-v24.7.0) (2024-06-03) diff --git a/core/bin/block_reverter/Cargo.toml b/core/bin/block_reverter/Cargo.toml index 5f32f68acbd8..9ac7a49335c4 100644 --- a/core/bin/block_reverter/Cargo.toml +++ b/core/bin/block_reverter/Cargo.toml @@ -1,5 +1,6 @@ [package] name = "block_reverter" +description = "Utility to revert blocks" version.workspace = true edition.workspace = true authors.workspace = true @@ -12,12 +13,14 @@ publish = false [dependencies] zksync_config.workspace = true +zksync_core_leftovers.workspace = true zksync_env_config.workspace = true zksync_dal.workspace = true +zksync_protobuf_config.workspace = true zksync_object_store.workspace = true zksync_types.workspace = true zksync_block_reverter.workspace = true -vlog.workspace = true +zksync_vlog.workspace = true anyhow.workspace = true clap = { workspace = true, features = ["derive"] } diff --git a/core/bin/block_reverter/src/main.rs b/core/bin/block_reverter/src/main.rs index 8d1198627a83..513de522aa40 100644 --- a/core/bin/block_reverter/src/main.rs +++ b/core/bin/block_reverter/src/main.rs @@ -1,4 +1,4 @@ -use std::env; +use std::path::PathBuf; use anyhow::Context as _; use clap::{Parser, Subcommand}; @@ -11,9 +11,13 @@ use zksync_block_reverter::{ BlockReverter, BlockReverterEthConfig, NodeRole, }; use zksync_config::{ - configs::{chain::NetworkConfig, DatabaseSecrets, L1Secrets, ObservabilityConfig}, - ContractsConfig, DBConfig, EthConfig, PostgresConfig, + configs::{ + chain::NetworkConfig, wallets::Wallets, DatabaseSecrets, GeneralConfig, L1Secrets, + ObservabilityConfig, + }, + ContractsConfig, DBConfig, EthConfig, GenesisConfig, PostgresConfig, }; +use zksync_core_leftovers::temp_config_store::decode_yaml_repr; use zksync_dal::{ConnectionPool, Core}; use zksync_env_config::{object_store::SnapshotsObjectStoreConfig, FromEnv}; use zksync_object_store::ObjectStoreFactory; @@ -24,6 +28,21 @@ use zksync_types::{Address, L1BatchNumber}; struct Cli { #[command(subcommand)] command: Command, + /// Path to yaml config. If set, it will be used instead of env vars + #[arg(long, global = true)] + config_path: Option, + /// Path to yaml contracts config. If set, it will be used instead of env vars + #[arg(long, global = true)] + contracts_config_path: Option, + /// Path to yaml secrets config. If set, it will be used instead of env vars + #[arg(long, global = true)] + secrets_path: Option, + /// Path to yaml wallets config. If set, it will be used instead of env vars + #[arg(long, global = true)] + wallets_path: Option, + /// Path to yaml genesis config. If set, it will be used instead of env vars + #[arg(long, global = true)] + genesis_path: Option, } #[derive(Debug, Subcommand)] @@ -84,15 +103,17 @@ enum Command { #[tokio::main] async fn main() -> anyhow::Result<()> { - let command = Cli::parse().command; + let opts = Cli::parse(); let observability_config = ObservabilityConfig::from_env().context("ObservabilityConfig::from_env()")?; - let log_format: vlog::LogFormat = observability_config + let log_format: zksync_vlog::LogFormat = observability_config .log_format .parse() .context("Invalid log format")?; - let mut builder = vlog::ObservabilityBuilder::new().with_log_format(log_format); + let mut builder = zksync_vlog::ObservabilityBuilder::new() + .with_log_format(log_format) + .disable_default_logs(); // It's a CLI application, so we only need to show logs that were actually requested. if let Some(sentry_url) = observability_config.sentry_url { builder = builder .with_sentry_url(&sentry_url) @@ -101,35 +122,111 @@ async fn main() -> anyhow::Result<()> { } let _guard = builder.build(); - let eth_sender = EthConfig::from_env().context("EthConfig::from_env()")?; - let db_config = DBConfig::from_env().context("DBConfig::from_env()")?; + let general_config: Option = if let Some(path) = opts.config_path { + let yaml = std::fs::read_to_string(&path).with_context(|| path.display().to_string())?; + let config = + decode_yaml_repr::(&yaml) + .context("failed decoding general YAML config")?; + Some(config) + } else { + None + }; + let wallets_config: Option = if let Some(path) = opts.wallets_path { + let yaml = std::fs::read_to_string(&path).with_context(|| path.display().to_string())?; + let config = decode_yaml_repr::(&yaml) + .context("failed decoding wallets YAML config")?; + Some(config) + } else { + None + }; + let genesis_config: Option = if let Some(path) = opts.genesis_path { + let yaml = std::fs::read_to_string(&path).with_context(|| path.display().to_string())?; + let config = decode_yaml_repr::(&yaml) + .context("failed decoding genesis YAML config")?; + Some(config) + } else { + None + }; + + let eth_sender = match &general_config { + Some(general_config) => general_config + .eth + .clone() + .context("Failed to find eth config")?, + None => EthConfig::from_env().context("EthConfig::from_env()")?, + }; + let db_config = match &general_config { + Some(general_config) => general_config + .db_config + .clone() + .context("Failed to find eth config")?, + None => DBConfig::from_env().context("DBConfig::from_env()")?, + }; + let contracts = match opts.contracts_config_path { + Some(path) => { + let yaml = + std::fs::read_to_string(&path).with_context(|| path.display().to_string())?; + decode_yaml_repr::(&yaml) + .context("failed decoding contracts YAML config")? + } + None => ContractsConfig::from_env().context("ContractsConfig::from_env()")?, + }; + let secrets_config = if let Some(path) = opts.secrets_path { + let yaml = std::fs::read_to_string(&path).with_context(|| path.display().to_string())?; + let config = decode_yaml_repr::(&yaml) + .context("failed decoding secrets YAML config")?; + Some(config) + } else { + None + }; + let default_priority_fee_per_gas = eth_sender .gas_adjuster .context("gas_adjuster")? .default_priority_fee_per_gas; - let contracts = ContractsConfig::from_env().context("ContractsConfig::from_env()")?; - let network = NetworkConfig::from_env().context("NetworkConfig::from_env()")?; - let database_secrets = DatabaseSecrets::from_env().context("DatabaseSecrets::from_env()")?; - let l1_secrets = L1Secrets::from_env().context("L1Secrets::from_env()")?; - let postgress_config = PostgresConfig::from_env().context("PostgresConfig::from_env()")?; - let era_chain_id = env::var("CONTRACTS_ERA_CHAIN_ID") - .context("`CONTRACTS_ERA_CHAIN_ID` env variable is not set")? - .parse() - .map_err(|err| { - anyhow::anyhow!("failed parsing `CONTRACTS_ERA_CHAIN_ID` env variable: {err}") - })?; - let config = BlockReverterEthConfig::new(ð_sender, &contracts, &network, era_chain_id)?; + + let database_secrets = match &secrets_config { + Some(secrets_config) => secrets_config + .database + .clone() + .context("Failed to find database config")?, + None => DatabaseSecrets::from_env().context("DatabaseSecrets::from_env()")?, + }; + let l1_secrets = match &secrets_config { + Some(secrets_config) => secrets_config + .l1 + .clone() + .context("Failed to find l1 config")?, + None => L1Secrets::from_env().context("L1Secrets::from_env()")?, + }; + let postgres_config = match &general_config { + Some(general_config) => general_config + .postgres_config + .clone() + .context("Failed to find postgres config")?, + None => PostgresConfig::from_env().context("PostgresConfig::from_env()")?, + }; + let zksync_network_id = match &genesis_config { + Some(genesis_config) => genesis_config.l2_chain_id, + None => { + NetworkConfig::from_env() + .context("NetworkConfig::from_env()")? + .zksync_network_id + } + }; + + let config = BlockReverterEthConfig::new(ð_sender, &contracts, zksync_network_id)?; let connection_pool = ConnectionPool::::builder( database_secrets.master_url()?, - postgress_config.max_connections()?, + postgres_config.max_connections()?, ) .build() .await .context("failed to build a connection pool")?; let mut block_reverter = BlockReverter::new(NodeRole::Main, connection_pool); - match command { + match opts.command { Command::Display { json, operator_address, @@ -155,13 +252,22 @@ async fn main() -> anyhow::Result<()> { let eth_client = Client::http(l1_secrets.l1_rpc_url.clone()) .context("Ethereum client")? .build(); - #[allow(deprecated)] - let reverter_private_key = eth_sender - .sender - .context("eth_sender_config")? - .private_key() - .context("eth_sender_config.private_key")? - .context("eth_sender_config.private_key is not set")?; + let reverter_private_key = if let Some(wallets_config) = wallets_config { + wallets_config + .eth_sender + .unwrap() + .operator + .private_key() + .to_owned() + } else { + #[allow(deprecated)] + eth_sender + .sender + .context("eth_sender_config")? + .private_key() + .context("eth_sender_config.private_key")? + .context("eth_sender_config.private_key is not set")? + }; let priority_fee_per_gas = priority_fee_per_gas.unwrap_or(default_priority_fee_per_gas); let l1_chain_id = eth_client diff --git a/core/bin/contract-verifier/Cargo.toml b/core/bin/contract-verifier/Cargo.toml index 3e9832f995f9..d57b44f046cc 100644 --- a/core/bin/contract-verifier/Cargo.toml +++ b/core/bin/contract-verifier/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zksync_contract_verifier" -description = "The zkEVM contract verifier" +description = "The ZKsync contract verifier" version.workspace = true edition.workspace = true authors.workspace = true @@ -17,8 +17,8 @@ zksync_config.workspace = true zksync_contract_verifier_lib.workspace = true zksync_queued_job_processor.workspace = true zksync_utils.workspace = true -prometheus_exporter.workspace = true -vlog.workspace = true +zksync_vlog.workspace = true +zksync_core_leftovers.workspace = true anyhow.workspace = true tokio = { workspace = true, features = ["full"] } diff --git a/core/bin/contract-verifier/src/main.rs b/core/bin/contract-verifier/src/main.rs index 118e7f41be97..fe33a34a7583 100644 --- a/core/bin/contract-verifier/src/main.rs +++ b/core/bin/contract-verifier/src/main.rs @@ -1,19 +1,16 @@ use std::{cell::RefCell, time::Duration}; -use anyhow::Context as _; +use anyhow::Context; use futures::{channel::mpsc, executor::block_on, SinkExt, StreamExt}; -use prometheus_exporter::PrometheusExporterConfig; use structopt::StructOpt; use tokio::sync::watch; -use zksync_config::{ - configs::{ObservabilityConfig, PrometheusConfig}, - ApiConfig, ContractVerifierConfig, -}; +use zksync_config::configs::PrometheusConfig; use zksync_contract_verifier_lib::ContractVerifier; +use zksync_core_leftovers::temp_config_store::{load_database_secrets, load_general_config}; use zksync_dal::{ConnectionPool, Core, CoreDal}; -use zksync_env_config::FromEnv; use zksync_queued_job_processor::JobProcessor; use zksync_utils::{wait_for_tasks::ManagedTasks, workspace_dir_or_current_dir}; +use zksync_vlog::prometheus::PrometheusExporterConfig; async fn update_compiler_versions(connection_pool: &ConnectionPool) { let mut storage = connection_pool.connection().await.unwrap(); @@ -109,26 +106,34 @@ async fn update_compiler_versions(connection_pool: &ConnectionPool) { transaction.commit().await.unwrap(); } -use zksync_config::configs::DatabaseSecrets; - #[derive(StructOpt)] #[structopt(name = "ZKsync contract code verifier", author = "Matter Labs")] struct Opt { /// Number of jobs to process. If None, runs indefinitely. #[structopt(long)] jobs_number: Option, + /// Path to the configuration file. + #[structopt(long)] + config_path: Option, + /// Path to the secrets file. + #[structopt(long)] + secrets_path: Option, } #[tokio::main] async fn main() -> anyhow::Result<()> { let opt = Opt::from_args(); - let verifier_config = ContractVerifierConfig::from_env().context("ContractVerifierConfig")?; + let general_config = load_general_config(opt.config_path).context("general config")?; + let database_secrets = load_database_secrets(opt.secrets_path).context("database secrets")?; + + let verifier_config = general_config + .contract_verifier + .context("ContractVerifierConfig")?; let prometheus_config = PrometheusConfig { listener_port: verifier_config.prometheus_port, - ..ApiConfig::from_env().context("ApiConfig")?.prometheus + ..general_config.api_config.context("ApiConfig")?.prometheus }; - let database_secrets = DatabaseSecrets::from_env().context("DatabaseSecrets")?; let pool = ConnectionPool::::singleton( database_secrets .master_url() @@ -138,13 +143,14 @@ async fn main() -> anyhow::Result<()> { .await .unwrap(); - let observability_config = - ObservabilityConfig::from_env().context("ObservabilityConfig::from_env()")?; - let log_format: vlog::LogFormat = observability_config + let observability_config = general_config + .observability + .context("ObservabilityConfig")?; + let log_format: zksync_vlog::LogFormat = observability_config .log_format .parse() .context("Invalid log format")?; - let mut builder = vlog::ObservabilityBuilder::new().with_log_format(log_format); + let mut builder = zksync_vlog::ObservabilityBuilder::new().with_log_format(log_format); if let Some(sentry_url) = &observability_config.sentry_url { builder = builder .with_sentry_url(sentry_url) diff --git a/core/bin/external_node/Cargo.toml b/core/bin/external_node/Cargo.toml index ee6aa08be9da..c3e8a4bb18e2 100644 --- a/core/bin/external_node/Cargo.toml +++ b/core/bin/external_node/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "zksync_external_node" -version = "24.7.0" # x-release-please-version +description = "Non-validator ZKsync node" +version = "24.11.0" # x-release-please-version edition.workspace = true authors.workspace = true homepage.workspace = true @@ -25,7 +26,6 @@ zksync_contracts.workspace = true zksync_l1_contract_interface.workspace = true zksync_snapshots_applier.workspace = true zksync_object_store.workspace = true -prometheus_exporter.workspace = true zksync_health_check.workspace = true zksync_web3_decl.workspace = true zksync_types.workspace = true @@ -43,12 +43,14 @@ zksync_node_sync.workspace = true zksync_node_api_server.workspace = true zksync_node_consensus.workspace = true zksync_node_framework.workspace = true -vlog.workspace = true +zksync_vlog.workspace = true zksync_concurrency.workspace = true zksync_consensus_roles.workspace = true +zksync_consensus_crypto.workspace = true vise.workspace = true +async-trait.workspace = true anyhow.workspace = true tokio = { workspace = true, features = ["full"] } futures.workspace = true diff --git a/core/bin/external_node/src/config/mod.rs b/core/bin/external_node/src/config/mod.rs index 9cd6a758a25c..5674396652f4 100644 --- a/core/bin/external_node/src/config/mod.rs +++ b/core/bin/external_node/src/config/mod.rs @@ -2,6 +2,7 @@ use std::{ env, ffi::OsString, num::{NonZeroU32, NonZeroU64, NonZeroUsize}, + path::PathBuf, time::Duration, }; @@ -11,10 +12,14 @@ use zksync_config::{ configs::{ api::{MaxResponseSize, MaxResponseSizeOverrides}, consensus::{ConsensusConfig, ConsensusSecrets}, + en_config::ENConfig, + GeneralConfig, Secrets, }, ObjectStoreConfig, }; -use zksync_core_leftovers::temp_config_store::decode_yaml_repr; +use zksync_consensus_crypto::TextFmt; +use zksync_consensus_roles as roles; +use zksync_core_leftovers::temp_config_store::{decode_yaml_repr, read_yaml_repr}; #[cfg(test)] use zksync_dal::{ConnectionPool, Core}; use zksync_metadata_calculator::MetadataCalculatorRecoveryConfig; @@ -41,6 +46,32 @@ pub(crate) mod observability; #[cfg(test)] mod tests; +macro_rules! load_optional_config_or_default { + ($config:expr, $($name:ident).+, $default:ident) => { + $config + .as_ref() + .map(|a| a.$($name).+.map(|a| a.try_into())).flatten().transpose()? + .unwrap_or_else(Self::$default) + }; +} + +macro_rules! load_config_or_default { + ($config:expr, $($name:ident).+, $default:ident) => { + $config + .as_ref() + .map(|a| a.$($name).+.clone().try_into()).transpose()? + .unwrap_or_else(Self::$default) + }; +} + +macro_rules! load_config { + ($config:expr, $($name:ident).+) => { + $config + .as_ref() + .map(|a| a.$($name).+.clone().map(|a| a.try_into())).flatten().transpose()? + }; +} + const BYTES_IN_MEGABYTE: usize = 1_024 * 1_024; /// Encapsulation of configuration source with a mock implementation used in tests. @@ -392,6 +423,9 @@ pub(crate) struct OptionalENConfig { #[serde(default = "OptionalENConfig::default_snapshots_recovery_postgres_max_concurrency")] pub snapshots_recovery_postgres_max_concurrency: NonZeroUsize, + #[serde(default)] + pub snapshots_recovery_object_store: Option, + /// Enables pruning of the historical node state (Postgres and Merkle tree). The node will retain /// recent state and will continuously remove (prune) old enough parts of the state in the background. #[serde(default)] @@ -407,12 +441,236 @@ pub(crate) struct OptionalENConfig { /// may be temporarily retained for other reasons; e.g., a batch cannot be pruned until it is executed on L1, /// which happens roughly 24 hours after its generation on the mainnet. Thus, in practice this value can specify /// the retention period greater than that implicitly imposed by other criteria (e.g., 7 or 30 days). - /// If set to 0, L1 batches will not be retained based on their timestamp. The default value is 1 hour. + /// If set to 0, L1 batches will not be retained based on their timestamp. The default value is 7 days. #[serde(default = "OptionalENConfig::default_pruning_data_retention_sec")] pruning_data_retention_sec: u64, } impl OptionalENConfig { + fn from_configs(general_config: &GeneralConfig, enconfig: &ENConfig) -> anyhow::Result { + let api_namespaces = load_config!(general_config.api_config, web3_json_rpc.api_namespaces) + .map(|a: Vec| a.iter().map(|a| a.parse()).collect::>()) + .transpose()?; + + Ok(OptionalENConfig { + filters_limit: load_optional_config_or_default!( + general_config.api_config, + web3_json_rpc.filters_limit, + default_filters_limit + ), + subscriptions_limit: load_optional_config_or_default!( + general_config.api_config, + web3_json_rpc.subscriptions_limit, + default_subscriptions_limit + ), + req_entities_limit: load_optional_config_or_default!( + general_config.api_config, + web3_json_rpc.req_entities_limit, + default_req_entities_limit + ), + max_tx_size_bytes: load_config_or_default!( + general_config.api_config, + web3_json_rpc.max_tx_size, + default_max_tx_size_bytes + ), + vm_execution_cache_misses_limit: load_config!( + general_config.api_config, + web3_json_rpc.vm_execution_cache_misses_limit + ), + fee_history_limit: load_optional_config_or_default!( + general_config.api_config, + web3_json_rpc.fee_history_limit, + default_fee_history_limit + ), + max_batch_request_size: load_optional_config_or_default!( + general_config.api_config, + web3_json_rpc.max_batch_request_size, + default_max_batch_request_size + ), + max_response_body_size_mb: load_optional_config_or_default!( + general_config.api_config, + web3_json_rpc.max_response_body_size_mb, + default_max_response_body_size_mb + ), + max_response_body_size_overrides_mb: load_config_or_default!( + general_config.api_config, + web3_json_rpc.max_response_body_size_overrides_mb, + default_max_response_body_size_overrides_mb + ), + pubsub_polling_interval_ms: load_optional_config_or_default!( + general_config.api_config, + web3_json_rpc.pubsub_polling_interval, + default_polling_interval + ), + max_nonce_ahead: load_config_or_default!( + general_config.api_config, + web3_json_rpc.max_nonce_ahead, + default_max_nonce_ahead + ), + vm_concurrency_limit: load_optional_config_or_default!( + general_config.api_config, + web3_json_rpc.vm_concurrency_limit, + default_vm_concurrency_limit + ), + factory_deps_cache_size_mb: load_optional_config_or_default!( + general_config.api_config, + web3_json_rpc.factory_deps_cache_size_mb, + default_factory_deps_cache_size_mb + ), + initial_writes_cache_size_mb: load_optional_config_or_default!( + general_config.api_config, + web3_json_rpc.initial_writes_cache_size_mb, + default_initial_writes_cache_size_mb + ), + latest_values_cache_size_mb: load_optional_config_or_default!( + general_config.api_config, + web3_json_rpc.latest_values_cache_size_mb, + default_latest_values_cache_size_mb + ), + filters_disabled: general_config + .api_config + .as_ref() + .map(|a| a.web3_json_rpc.filters_disabled) + .unwrap_or_default(), + mempool_cache_update_interval_ms: load_optional_config_or_default!( + general_config.api_config, + web3_json_rpc.mempool_cache_update_interval, + default_mempool_cache_update_interval_ms + ), + mempool_cache_size: load_optional_config_or_default!( + general_config.api_config, + web3_json_rpc.mempool_cache_size, + default_mempool_cache_size + ), + + healthcheck_slow_time_limit_ms: load_config!( + general_config.api_config, + healthcheck.slow_time_limit_ms + ), + healthcheck_hard_time_limit_ms: load_config!( + general_config.api_config, + healthcheck.hard_time_limit_ms + ), + estimate_gas_scale_factor: load_config_or_default!( + general_config.api_config, + web3_json_rpc.estimate_gas_scale_factor, + default_estimate_gas_scale_factor + ), + estimate_gas_acceptable_overestimation: load_config_or_default!( + general_config.api_config, + web3_json_rpc.estimate_gas_acceptable_overestimation, + default_estimate_gas_acceptable_overestimation + ), + gas_price_scale_factor: load_config_or_default!( + general_config.api_config, + web3_json_rpc.gas_price_scale_factor, + default_gas_price_scale_factor + ), + merkle_tree_max_l1_batches_per_iter: load_config_or_default!( + general_config.db_config, + merkle_tree.max_l1_batches_per_iter, + default_merkle_tree_max_l1_batches_per_iter + ), + merkle_tree_max_open_files: load_config!( + general_config.db_config, + experimental.state_keeper_db_max_open_files + ), + merkle_tree_multi_get_chunk_size: load_config_or_default!( + general_config.db_config, + merkle_tree.multi_get_chunk_size, + default_merkle_tree_multi_get_chunk_size + ), + merkle_tree_block_cache_size_mb: load_config_or_default!( + general_config.db_config, + merkle_tree.block_cache_size_mb, + default_merkle_tree_block_cache_size_mb + ), + merkle_tree_memtable_capacity_mb: load_config_or_default!( + general_config.db_config, + merkle_tree.memtable_capacity_mb, + default_merkle_tree_memtable_capacity_mb + ), + merkle_tree_stalled_writes_timeout_sec: load_config_or_default!( + general_config.db_config, + merkle_tree.stalled_writes_timeout_sec, + default_merkle_tree_stalled_writes_timeout_sec + ), + database_long_connection_threshold_ms: load_config!( + general_config.postgres_config, + long_connection_threshold_ms + ), + database_slow_query_threshold_ms: load_config!( + general_config.postgres_config, + slow_query_threshold_ms + ), + l2_block_seal_queue_capacity: load_config_or_default!( + general_config.state_keeper_config, + l2_block_seal_queue_capacity, + default_l2_block_seal_queue_capacity + ), + l1_batch_commit_data_generator_mode: enconfig.l1_batch_commit_data_generator_mode, + snapshots_recovery_enabled: general_config + .snapshot_recovery + .as_ref() + .map(|a| a.enabled) + .unwrap_or_default(), + snapshots_recovery_postgres_max_concurrency: load_optional_config_or_default!( + general_config.snapshot_recovery, + postgres.max_concurrency, + default_snapshots_recovery_postgres_max_concurrency + ), + pruning_enabled: general_config + .pruning + .as_ref() + .map(|a| a.enabled) + .unwrap_or_default(), + snapshots_recovery_object_store: load_config!( + general_config.snapshot_recovery, + object_store + ), + pruning_chunk_size: load_optional_config_or_default!( + general_config.pruning, + chunk_size, + default_pruning_chunk_size + ), + pruning_removal_delay_sec: load_optional_config_or_default!( + general_config.pruning, + removal_delay_sec, + default_pruning_removal_delay_sec + ), + pruning_data_retention_sec: load_optional_config_or_default!( + general_config.pruning, + data_retention_sec, + default_pruning_data_retention_sec + ), + protective_reads_persistence_enabled: general_config + .db_config + .as_ref() + .map(|a| a.experimental.protective_reads_persistence_enabled) + .unwrap_or(true), + merkle_tree_processing_delay_ms: load_config_or_default!( + general_config.db_config, + experimental.processing_delay_ms, + default_merkle_tree_processing_delay_ms + ), + merkle_tree_include_indices_and_filters_in_block_cache: general_config + .db_config + .as_ref() + .map(|a| a.experimental.include_indices_and_filters_in_block_cache) + .unwrap_or_default(), + extended_rpc_tracing: load_config_or_default!( + general_config.api_config, + web3_json_rpc.extended_api_tracing, + default_extended_api_tracing + ), + main_node_rate_limit_rps: enconfig + .main_node_rate_limit_rps + .unwrap_or_else(Self::default_main_node_rate_limit_rps), + api_namespaces, + contracts_diamond_proxy_addr: None, + }) + } + const fn default_filters_limit() -> usize { 10_000 } @@ -504,6 +762,10 @@ impl OptionalENConfig { 10 } + fn default_max_response_body_size_overrides_mb() -> MaxResponseSizeOverrides { + MaxResponseSizeOverrides::empty() + } + const fn default_l2_block_seal_queue_capacity() -> usize { 10 } @@ -541,13 +803,15 @@ impl OptionalENConfig { } fn default_pruning_data_retention_sec() -> u64 { - 3_600 // 1 hour + 3_600 * 24 * 7 // 7 days } fn from_env() -> anyhow::Result { - envy::prefixed("EN_") + let mut result: OptionalENConfig = envy::prefixed("EN_") .from_env() - .context("could not load external node config") + .context("could not load external node config")?; + result.snapshots_recovery_object_store = snapshot_recovery_object_store_config().ok(); + Ok(result) } pub fn polling_interval(&self) -> Duration { @@ -674,6 +938,37 @@ impl RequiredENConfig { .context("could not load external node config") } + fn from_configs( + general: &GeneralConfig, + en_config: &ENConfig, + secrets: &Secrets, + ) -> anyhow::Result { + let api_config = general + .api_config + .as_ref() + .context("Api config is required")?; + let db_config = general + .db_config + .as_ref() + .context("Database config is required")?; + Ok(RequiredENConfig { + l1_chain_id: en_config.l1_chain_id, + l2_chain_id: en_config.l2_chain_id, + http_port: api_config.web3_json_rpc.http_port, + ws_port: api_config.web3_json_rpc.ws_port, + healthcheck_port: api_config.healthcheck.port, + eth_client_url: secrets + .l1 + .as_ref() + .context("L1 secrets are required")? + .l1_rpc_url + .clone(), + main_node_url: en_config.main_node_url.clone(), + state_cache_path: db_config.state_keeper_db_path.clone(), + merkle_tree_path: db_config.merkle_tree.path.clone(), + }) + } + #[cfg(test)] fn mock(temp_dir: &tempfile::TempDir) -> Self { Self { @@ -748,6 +1043,10 @@ pub(crate) struct ExperimentalENConfig { // Snapshot recovery /// L1 batch number of the snapshot to use during recovery. Specifying this parameter is mostly useful for testing. pub snapshots_recovery_l1_batch: Option, + /// Enables dropping storage key preimages when recovering storage logs from a snapshot with version 0. + /// This is a temporary flag that will eventually be removed together with version 0 snapshot support. + #[serde(default)] + pub snapshots_recovery_drop_storage_key_preimages: bool, /// Approximate chunk size (measured in the number of entries) to recover in a single iteration. /// Reasonable values are order of 100,000 (meaning an iteration takes several seconds). /// @@ -784,6 +1083,7 @@ impl ExperimentalENConfig { Self::default_state_keeper_db_block_cache_capacity_mb(), state_keeper_db_max_open_files: None, snapshots_recovery_l1_batch: None, + snapshots_recovery_drop_storage_key_preimages: false, snapshots_recovery_tree_chunk_size: Self::default_snapshots_recovery_tree_chunk_size(), snapshots_recovery_tree_parallel_persistence_buffer: None, commitment_generator_max_parallelism: None, @@ -794,6 +1094,53 @@ impl ExperimentalENConfig { pub fn state_keeper_db_block_cache_capacity(&self) -> usize { self.state_keeper_db_block_cache_capacity_mb * BYTES_IN_MEGABYTE } + + pub fn from_configs(general_config: &GeneralConfig) -> anyhow::Result { + Ok(Self { + state_keeper_db_block_cache_capacity_mb: load_config_or_default!( + general_config.db_config, + experimental.state_keeper_db_block_cache_capacity_mb, + default_state_keeper_db_block_cache_capacity_mb + ), + state_keeper_db_max_open_files: load_config!( + general_config.db_config, + experimental.state_keeper_db_max_open_files + ), + snapshots_recovery_l1_batch: load_config!(general_config.snapshot_recovery, l1_batch), + snapshots_recovery_tree_chunk_size: load_optional_config_or_default!( + general_config.snapshot_recovery, + tree.chunk_size, + default_snapshots_recovery_tree_chunk_size + ), + snapshots_recovery_tree_parallel_persistence_buffer: load_config!( + general_config.snapshot_recovery, + tree.parallel_persistence_buffer + ), + snapshots_recovery_drop_storage_key_preimages: general_config + .snapshot_recovery + .as_ref() + .map_or(false, |config| config.drop_storage_key_preimages), + commitment_generator_max_parallelism: general_config + .commitment_generator + .as_ref() + .map(|a| a.max_parallelism), + }) + } +} + +/// Generates all possible consensus secrets (from system entropy) +/// and prints them to stdout. +/// They should be copied over to the secrets.yaml/consensus_secrets.yaml file. +pub fn generate_consensus_secrets() { + let validator_key = roles::validator::SecretKey::generate(); + let attester_key = roles::attester::SecretKey::generate(); + let node_key = roles::node::SecretKey::generate(); + println!("# {}", validator_key.public().encode()); + println!("validator_key: {}", validator_key.encode()); + println!("# {}", attester_key.public().encode()); + println!("attester_key: {}", attester_key.encode()); + println!("# {}", node_key.public().encode()); + println!("node_key: {}", node_key.encode()); } pub(crate) fn read_consensus_secrets() -> anyhow::Result> { @@ -832,11 +1179,32 @@ pub struct ApiComponentConfig { pub tree_api_remote_url: Option, } +impl ApiComponentConfig { + fn from_configs(general_config: &GeneralConfig) -> Self { + ApiComponentConfig { + tree_api_remote_url: general_config + .api_config + .as_ref() + .and_then(|a| a.web3_json_rpc.tree_api_url.clone()), + } + } +} + #[derive(Debug, Deserialize)] pub struct TreeComponentConfig { pub api_port: Option, } +impl TreeComponentConfig { + fn from_configs(general_config: &GeneralConfig) -> Self { + let api_port = general_config + .api_config + .as_ref() + .map(|a| a.merkle_tree.port); + TreeComponentConfig { api_port } + } +} + /// External Node Config contains all the configuration required for the EN operation. /// It is split into three parts: required, optional and remote for easier navigation. #[derive(Debug)] @@ -874,6 +1242,64 @@ impl ExternalNodeConfig<()> { }) } + pub fn from_files( + general_config_path: PathBuf, + external_node_config_path: PathBuf, + secrets_configs_path: PathBuf, + consensus_config_path: Option, + ) -> anyhow::Result { + let general_config = read_yaml_repr::(general_config_path) + .context("failed decoding general YAML config")?; + let external_node_config = + read_yaml_repr::(external_node_config_path) + .context("failed decoding external node YAML config")?; + let secrets_config = read_yaml_repr::(secrets_configs_path) + .context("failed decoding secrets YAML config")?; + + let consensus = consensus_config_path + .map(read_yaml_repr::) + .transpose() + .context("failed decoding consensus YAML config")?; + + let required = RequiredENConfig::from_configs( + &general_config, + &external_node_config, + &secrets_config, + )?; + let optional = OptionalENConfig::from_configs(&general_config, &external_node_config)?; + let postgres = PostgresConfig { + database_url: secrets_config + .database + .as_ref() + .context("DB secrets is required")? + .server_url + .clone() + .context("Server url is required")?, + max_connections: general_config + .postgres_config + .as_ref() + .context("Postgres config is required")? + .max_connections()?, + }; + let observability = ObservabilityENConfig::from_configs(&general_config)?; + let experimental = ExperimentalENConfig::from_configs(&general_config)?; + + let api_component = ApiComponentConfig::from_configs(&general_config); + let tree_component = TreeComponentConfig::from_configs(&general_config); + + Ok(Self { + required, + postgres, + optional, + observability, + experimental, + consensus, + api_component, + tree_component, + remote: (), + }) + } + /// Fetches contracts addresses from the main node, completing the configuration. pub async fn fetch_remote( self, diff --git a/core/bin/external_node/src/config/observability.rs b/core/bin/external_node/src/config/observability.rs index a571b071b5e2..4cd4efe0df04 100644 --- a/core/bin/external_node/src/config/observability.rs +++ b/core/bin/external_node/src/config/observability.rs @@ -1,9 +1,9 @@ use std::{collections::HashMap, time::Duration}; use anyhow::Context as _; -use prometheus_exporter::PrometheusExporterConfig; use serde::Deserialize; -use vlog::LogFormat; +use zksync_config::configs::GeneralConfig; +use zksync_vlog::{prometheus::PrometheusExporterConfig, LogFormat}; use super::{ConfigurationSource, Environment}; @@ -26,6 +26,8 @@ pub(crate) struct ObservabilityENConfig { /// Log format to use: either `plain` (default) or `json`. #[serde(default)] pub log_format: LogFormat, + // Log directives in format that is used in `RUST_LOG` + pub log_directives: Option, } impl ObservabilityENConfig { @@ -78,8 +80,11 @@ impl ObservabilityENConfig { } } - pub fn build_observability(&self) -> anyhow::Result { - let mut builder = vlog::ObservabilityBuilder::new().with_log_format(self.log_format); + pub fn build_observability(&self) -> anyhow::Result { + let mut builder = zksync_vlog::ObservabilityBuilder::new().with_log_format(self.log_format); + if let Some(log_directives) = self.log_directives.clone() { + builder = builder.with_log_directives(log_directives) + }; // Some legacy deployments use `unset` as an equivalent of `None`. let sentry_url = self.sentry_url.as_deref().filter(|&url| url != "unset"); if let Some(sentry_url) = sentry_url { @@ -98,4 +103,40 @@ impl ObservabilityENConfig { } Ok(guard) } + + pub(crate) fn from_configs(general_config: &GeneralConfig) -> anyhow::Result { + let (sentry_url, sentry_environment, log_format, log_directives) = + if let Some(observability) = general_config.observability.as_ref() { + ( + observability.sentry_url.clone(), + observability.sentry_environment.clone(), + observability + .log_format + .parse() + .context("Invalid log format")?, + observability.log_directives.clone(), + ) + } else { + (None, None, LogFormat::default(), None) + }; + let (prometheus_port, prometheus_pushgateway_url, prometheus_push_interval_ms) = + if let Some(prometheus) = general_config.prometheus_config.as_ref() { + ( + Some(prometheus.listener_port), + prometheus.pushgateway_url.clone(), + prometheus.push_interval_ms.unwrap_or_default(), + ) + } else { + (None, None, 0) + }; + Ok(Self { + prometheus_port, + prometheus_pushgateway_url, + prometheus_push_interval_ms, + sentry_url, + sentry_environment, + log_format, + log_directives, + }) + } } diff --git a/core/bin/external_node/src/config/tests.rs b/core/bin/external_node/src/config/tests.rs index 79db5a174970..1b42b98a32a4 100644 --- a/core/bin/external_node/src/config/tests.rs +++ b/core/bin/external_node/src/config/tests.rs @@ -42,12 +42,12 @@ fn parsing_observability_config() { assert_eq!(config.prometheus_port, Some(3322)); assert_eq!(config.sentry_url.unwrap(), "https://example.com/"); assert_eq!(config.sentry_environment.unwrap(), "mainnet - mainnet2"); - assert_matches!(config.log_format, vlog::LogFormat::Plain); + assert_matches!(config.log_format, zksync_vlog::LogFormat::Plain); assert_eq!(config.prometheus_push_interval_ms, 10_000); env_vars.0.insert("MISC_LOG_FORMAT", "json"); let config = ObservabilityENConfig::new(&env_vars).unwrap(); - assert_matches!(config.log_format, vlog::LogFormat::Json); + assert_matches!(config.log_format, zksync_vlog::LogFormat::Json); // If both the canonical and obsolete vars are specified, the canonical one should prevail. env_vars.0.insert("EN_LOG_FORMAT", "plain"); @@ -55,7 +55,7 @@ fn parsing_observability_config() { .0 .insert("EN_SENTRY_URL", "https://example.com/new"); let config = ObservabilityENConfig::new(&env_vars).unwrap(); - assert_matches!(config.log_format, vlog::LogFormat::Plain); + assert_matches!(config.log_format, zksync_vlog::LogFormat::Plain); assert_eq!(config.sentry_url.unwrap(), "https://example.com/new"); } diff --git a/core/bin/external_node/src/init.rs b/core/bin/external_node/src/init.rs index a9ee796194cc..a56e51953899 100644 --- a/core/bin/external_node/src/init.rs +++ b/core/bin/external_node/src/init.rs @@ -3,6 +3,8 @@ use std::time::Instant; use anyhow::Context as _; +use tokio::sync::watch; +use zksync_config::ObjectStoreConfig; use zksync_dal::{ConnectionPool, Core, CoreDal}; use zksync_health_check::AppHealthCheck; use zksync_node_sync::genesis::perform_genesis_if_needed; @@ -12,12 +14,12 @@ use zksync_snapshots_applier::{SnapshotsApplierConfig, SnapshotsApplierTask}; use zksync_types::{L1BatchNumber, L2ChainId}; use zksync_web3_decl::client::{DynClient, L2}; -use crate::config::snapshot_recovery_object_store_config; - #[derive(Debug)] pub(crate) struct SnapshotRecoveryConfig { /// If not specified, the latest snapshot will be used. pub snapshot_l1_batch_override: Option, + pub drop_storage_key_preimages: bool, + pub object_store_config: Option, } #[derive(Debug)] @@ -29,6 +31,7 @@ enum InitDecision { } pub(crate) async fn ensure_storage_initialized( + stop_receiver: watch::Receiver, pool: ConnectionPool, main_node_client: Box>, app_health: &AppHealthCheck, @@ -90,7 +93,9 @@ pub(crate) async fn ensure_storage_initialized( )?; tracing::warn!("Proceeding with snapshot recovery. This is an experimental feature; use at your own risk"); - let object_store_config = snapshot_recovery_object_store_config()?; + let object_store_config = recovery_config.object_store_config.context( + "Snapshot object store must be presented if snapshot recovery is activated", + )?; let object_store = ObjectStoreFactory::new(object_store_config) .create_store() .await?; @@ -109,11 +114,15 @@ pub(crate) async fn ensure_storage_initialized( ); snapshots_applier_task.set_snapshot_l1_batch(snapshot_l1_batch); } + if recovery_config.drop_storage_key_preimages { + tracing::info!("Dropping storage key preimages for snapshot storage logs"); + snapshots_applier_task.drop_storage_key_preimages(); + } app_health.insert_component(snapshots_applier_task.health_check())?; let recovery_started_at = Instant::now(); let stats = snapshots_applier_task - .run() + .run(stop_receiver) .await .context("snapshot recovery failed")?; if stats.done_work { @@ -122,6 +131,10 @@ pub(crate) async fn ensure_storage_initialized( .set(latency); tracing::info!("Recovered Postgres from snapshot in {latency:?}"); } + assert!( + !stats.canceled, + "Snapshot recovery task cannot be canceled in the current implementation" + ); } } Ok(()) diff --git a/core/bin/external_node/src/main.rs b/core/bin/external_node/src/main.rs index 0adf3ddf8cb5..f6696d733482 100644 --- a/core/bin/external_node/src/main.rs +++ b/core/bin/external_node/src/main.rs @@ -17,9 +17,7 @@ use zksync_config::configs::{api::MerkleTreeApiConfig, database::MerkleTreeMode} use zksync_consistency_checker::ConsistencyChecker; use zksync_core_leftovers::setup_sigint_handler; use zksync_dal::{metrics::PostgresMetrics, ConnectionPool, Core}; -use zksync_db_connection::{ - connection_pool::ConnectionPoolBuilder, healthcheck::ConnectionPoolHealthCheck, -}; +use zksync_db_connection::connection_pool::ConnectionPoolBuilder; use zksync_health_check::{AppHealthCheck, HealthStatus, ReactiveHealthCheck}; use zksync_metadata_calculator::{ api_server::{TreeApiClient, TreeApiHttpClient}, @@ -56,7 +54,7 @@ use zksync_web3_decl::{ }; use crate::{ - config::ExternalNodeConfig, + config::{generate_consensus_secrets, ExternalNodeConfig}, init::{ensure_storage_initialized, SnapshotRecoveryConfig}, }; @@ -105,7 +103,6 @@ async fn build_state_keeper( Box::new(main_node_client.for_component("external_io")), chain_id, ) - .await .context("Failed initializing I/O for external node state keeper")?; Ok(ZkSyncStateKeeper::new( @@ -139,6 +136,7 @@ async fn run_tree( .merkle_tree_include_indices_and_filters_in_block_cache, memtable_capacity: config.optional.merkle_tree_memtable_capacity(), stalled_writes_timeout: config.optional.merkle_tree_stalled_writes_timeout(), + sealed_batches_have_protective_reads: config.optional.protective_reads_persistence_enabled, recovery: MetadataCalculatorRecoveryConfig { desired_chunk_size: config.experimental.snapshots_recovery_tree_chunk_size, parallel_persistence_buffer: config @@ -184,14 +182,19 @@ async fn run_tree( if let Some(api_config) = api_config { let address = (Ipv4Addr::UNSPECIFIED, api_config.port).into(); let tree_reader = metadata_calculator.tree_reader(); - let stop_receiver = stop_receiver.clone(); + let mut stop_receiver = stop_receiver.clone(); task_futures.push(tokio::spawn(async move { - tree_reader - .wait() - .await - .context("Cannot initialize tree reader")? - .run_api_server(address, stop_receiver) - .await + if let Some(reader) = tree_reader.wait().await { + reader.run_api_server(address, stop_receiver).await + } else { + // Tree is dropped before initialized, e.g. because the node is getting shut down. + // We don't want to treat this as an error since it could mask the real shutdown cause in logs etc. + tracing::warn!( + "Tree is dropped before initialized, not starting the tree API server" + ); + stop_receiver.changed().await?; + Ok(()) + } })); } @@ -280,7 +283,7 @@ async fn run_core( // but we only need to wait for stop signal once, and it will be propagated to all child contexts. let ctx = ctx::root(); scope::run!(&ctx, |ctx, s| async move { - s.spawn_bg(consensus::era::run_en( + s.spawn_bg(consensus::era::run_external_node( ctx, cfg, pool, @@ -692,10 +695,20 @@ async fn shutdown_components( Ok(()) } +#[derive(Debug, Clone, clap::Subcommand)] +enum Command { + /// Generates consensus secret keys to use in the secrets file. + /// Prints the keys to the stdout, you need to copy the relevant keys into your secrets file. + GenerateSecrets, +} + /// External node for ZKsync Era. #[derive(Debug, Parser)] #[command(author = "Matter Labs", version)] struct Cli { + #[command(subcommand)] + command: Option, + /// Enables consensus-based syncing instead of JSON-RPC based one. This is an experimental and incomplete feature; /// do not use unless you know what you're doing. #[arg(long)] @@ -704,10 +717,28 @@ struct Cli { /// Comma-separated list of components to launch. #[arg(long, default_value = "all")] components: ComponentsToRun, - - /// Run the node using the node framework. - #[arg(long)] - use_node_framework: bool, + /// Path to the yaml config. If set, it will be used instead of env vars. + #[arg( + long, + requires = "secrets_path", + requires = "external_node_config_path" + )] + config_path: Option, + /// Path to the yaml with secrets. If set, it will be used instead of env vars. + #[arg(long, requires = "config_path", requires = "external_node_config_path")] + secrets_path: Option, + /// Path to the yaml with external node specific configuration. If set, it will be used instead of env vars. + #[arg(long, requires = "config_path", requires = "secrets_path")] + external_node_config_path: Option, + /// Path to the yaml with consensus config. If set, it will be used instead of env vars. + #[arg( + long, + requires = "config_path", + requires = "secrets_path", + requires = "external_node_config_path", + requires = "enable_consensus" + )] + consensus_path: Option, } #[derive(Debug, Clone, Copy, PartialEq, Hash, Eq)] @@ -764,7 +795,32 @@ async fn main() -> anyhow::Result<()> { // Initial setup. let opt = Cli::parse(); - let mut config = ExternalNodeConfig::new().context("Failed to load node configuration")?; + if let Some(cmd) = &opt.command { + match cmd { + Command::GenerateSecrets => generate_consensus_secrets(), + } + return Ok(()); + } + + let mut config = if let Some(config_path) = opt.config_path.clone() { + let secrets_path = opt.secrets_path.clone().unwrap(); + let external_node_config_path = opt.external_node_config_path.clone().unwrap(); + if opt.enable_consensus { + anyhow::ensure!( + opt.consensus_path.is_some(), + "if --config-path and --enable-consensus are specified, then --consensus-path should be used to specify the location of the consensus config" + ); + } + ExternalNodeConfig::from_files( + config_path, + external_node_config_path, + secrets_path, + opt.consensus_path.clone(), + )? + } else { + ExternalNodeConfig::new().context("Failed to load node configuration")? + }; + if !opt.enable_consensus { config.consensus = None; } @@ -792,8 +848,11 @@ async fn main() -> anyhow::Result<()> { .await .context("failed fetching remote part of node config from main node")?; + // Can be used to force the old approach to the external node. + let force_old_approach = std::env::var("EXTERNAL_NODE_OLD_APPROACH").is_ok(); + // If the node framework is used, run the node. - if opt.use_node_framework { + if !force_old_approach { // We run the node from a different thread, since the current thread is in tokio context. std::thread::spawn(move || { let node = @@ -807,6 +866,8 @@ async fn main() -> anyhow::Result<()> { return Ok(()); } + tracing::info!("Running the external node in the old approach"); + if let Some(threshold) = config.optional.slow_query_threshold() { ConnectionPool::::global_config().set_slow_query_threshold(threshold)?; } @@ -815,7 +876,11 @@ async fn main() -> anyhow::Result<()> { } RUST_METRICS.initialize(); - EN_METRICS.observe_config(&config); + EN_METRICS.observe_config( + config.required.l1_chain_id, + config.required.l2_chain_id, + config.postgres.max_connections, + ); let singleton_pool_builder = ConnectionPool::singleton(config.postgres.database_url()); let connection_pool = ConnectionPool::::builder( @@ -878,9 +943,6 @@ async fn run_node( app_health.insert_custom_component(Arc::new(MainNodeHealthCheck::from( main_node_client.clone(), )))?; - app_health.insert_custom_component(Arc::new(ConnectionPoolHealthCheck::new( - connection_pool.clone(), - )))?; // Start the health check server early into the node lifecycle so that its health can be monitored from the very start. let healthcheck_handle = HealthCheckHandle::spawn_server( @@ -938,8 +1000,15 @@ async fn run_node( .snapshots_recovery_enabled .then_some(SnapshotRecoveryConfig { snapshot_l1_batch_override: config.experimental.snapshots_recovery_l1_batch, + drop_storage_key_preimages: config + .experimental + .snapshots_recovery_drop_storage_key_preimages, + object_store_config: config.optional.snapshots_recovery_object_store.clone(), }); + // Note: while stop receiver is passed there, it won't be respected, since we wait this task + // to complete. Will be fixed after migration to the node framework. ensure_storage_initialized( + stop_receiver.clone(), connection_pool.clone(), main_node_client.clone(), &app_health, diff --git a/core/bin/external_node/src/metrics/framework.rs b/core/bin/external_node/src/metrics/framework.rs new file mode 100644 index 000000000000..82f9263e44db --- /dev/null +++ b/core/bin/external_node/src/metrics/framework.rs @@ -0,0 +1,82 @@ +use std::time::Duration; + +use zksync_dal::{ConnectionPool, Core, CoreDal as _}; +use zksync_node_framework::{ + implementations::resources::pools::{MasterPool, PoolResource}, + FromContext, IntoContext, StopReceiver, Task, TaskId, WiringError, WiringLayer, +}; +use zksync_shared_metrics::rustc::RUST_METRICS; +use zksync_types::{L1ChainId, L2ChainId}; + +use super::EN_METRICS; + +#[derive(Debug)] +pub struct ExternalNodeMetricsLayer { + pub l1_chain_id: L1ChainId, + pub l2_chain_id: L2ChainId, + pub postgres_pool_size: u32, +} + +#[derive(Debug, FromContext)] +pub struct Input { + pub master_pool: PoolResource, +} + +#[derive(Debug, IntoContext)] +pub struct Output { + #[context(task)] + pub task: ProtocolVersionMetricsTask, +} + +#[async_trait::async_trait] +impl WiringLayer for ExternalNodeMetricsLayer { + type Input = Input; + type Output = Output; + + fn layer_name(&self) -> &'static str { + "external_node_metrics" + } + + async fn wire(self, input: Self::Input) -> Result { + RUST_METRICS.initialize(); + EN_METRICS.observe_config(self.l1_chain_id, self.l2_chain_id, self.postgres_pool_size); + + let pool = input.master_pool.get_singleton().await?; + let task = ProtocolVersionMetricsTask { pool }; + Ok(Output { task }) + } +} + +#[derive(Debug)] +pub struct ProtocolVersionMetricsTask { + pool: ConnectionPool, +} + +#[async_trait::async_trait] +impl Task for ProtocolVersionMetricsTask { + fn id(&self) -> TaskId { + "en_protocol_version_metrics".into() + } + + async fn run(self: Box, mut stop_receiver: StopReceiver) -> anyhow::Result<()> { + const QUERY_INTERVAL: Duration = Duration::from_secs(10); + + while !*stop_receiver.0.borrow_and_update() { + let maybe_protocol_version = self + .pool + .connection() + .await? + .protocol_versions_dal() + .last_used_version_id() + .await; + if let Some(version) = maybe_protocol_version { + EN_METRICS.protocol_version.set(version as u64); + } + + tokio::time::timeout(QUERY_INTERVAL, stop_receiver.0.changed()) + .await + .ok(); + } + Ok(()) + } +} diff --git a/core/bin/external_node/src/metrics.rs b/core/bin/external_node/src/metrics/mod.rs similarity index 84% rename from core/bin/external_node/src/metrics.rs rename to core/bin/external_node/src/metrics/mod.rs index ca4495180226..fe1b81adc266 100644 --- a/core/bin/external_node/src/metrics.rs +++ b/core/bin/external_node/src/metrics/mod.rs @@ -3,8 +3,11 @@ use std::time::Duration; use tokio::sync::watch; use vise::{EncodeLabelSet, Gauge, Info, Metrics}; use zksync_dal::{ConnectionPool, Core, CoreDal}; +use zksync_types::{L1ChainId, L2ChainId}; -use crate::{config::ExternalNodeConfig, metadata::SERVER_VERSION}; +use crate::metadata::SERVER_VERSION; + +pub(crate) mod framework; /// Immutable EN parameters that affect multiple components. #[derive(Debug, Clone, Copy, EncodeLabelSet)] @@ -26,12 +29,17 @@ pub(crate) struct ExternalNodeMetrics { } impl ExternalNodeMetrics { - pub(crate) fn observe_config(&self, config: &ExternalNodeConfig) { + pub(crate) fn observe_config( + &self, + l1_chain_id: L1ChainId, + l2_chain_id: L2ChainId, + postgres_pool_size: u32, + ) { let info = ExternalNodeInfo { server_version: SERVER_VERSION, - l1_chain_id: config.required.l1_chain_id.0, - l2_chain_id: config.required.l2_chain_id.as_u64(), - postgres_pool_size: config.postgres.max_connections, + l1_chain_id: l1_chain_id.0, + l2_chain_id: l2_chain_id.as_u64(), + postgres_pool_size, }; tracing::info!("Setting general node information: {info:?}"); diff --git a/core/bin/external_node/src/node_builder.rs b/core/bin/external_node/src/node_builder.rs index 5eaff63d20a0..ff851999f623 100644 --- a/core/bin/external_node/src/node_builder.rs +++ b/core/bin/external_node/src/node_builder.rs @@ -2,6 +2,7 @@ //! as well as an interface to run the node with the specified components. use anyhow::Context as _; +use zksync_block_reverter::NodeRole; use zksync_config::{ configs::{ api::{HealthCheckConfig, MerkleTreeApiConfig}, @@ -15,19 +16,25 @@ use zksync_node_api_server::{tx_sender::ApiContracts, web3::Namespace}; use zksync_node_framework::{ implementations::layers::{ batch_status_updater::BatchStatusUpdaterLayer, + block_reverter::BlockReverterLayer, commitment_generator::CommitmentGeneratorLayer, - consensus::{ConsensusLayer, Mode}, + consensus::ExternalNodeConsensusLayer, consistency_checker::ConsistencyCheckerLayer, healtcheck_server::HealthCheckLayer, l1_batch_commitment_mode_validation::L1BatchCommitmentModeValidationLayer, main_node_client::MainNodeClientLayer, main_node_fee_params_fetcher::MainNodeFeeParamsFetcherLayer, metadata_calculator::MetadataCalculatorLayer, + node_storage_init::{ + external_node_strategy::{ExternalNodeInitStrategyLayer, SnapshotRecoveryConfig}, + NodeStorageInitializerLayer, + }, pools_layer::PoolsLayerBuilder, postgres_metrics::PostgresMetricsLayer, prometheus_exporter::PrometheusExporterLayer, pruning::PruningLayer, query_eth_client::QueryEthClientLayer, + reorg_detector::ReorgDetectorLayer, sigint::SigintHandlerLayer, state_keeper::{ external_io::ExternalIOLayer, main_batch_executor::MainBatchExecutorLayer, @@ -41,7 +48,7 @@ use zksync_node_framework::{ server::{Web3ServerLayer, Web3ServerOptionalConfig}, tree_api_client::TreeApiClientLayer, tx_sender::{PostgresStorageCachesConfig, TxSenderLayer}, - tx_sink::TxSinkLayer, + tx_sink::ProxySinkLayer, }, }, service::{ZkStackService, ZkStackServiceBuilder}, @@ -50,13 +57,14 @@ use zksync_state::RocksdbStorageOptions; use crate::{ config::{self, ExternalNodeConfig}, + metrics::framework::ExternalNodeMetricsLayer, Component, }; /// Builder for the external node. #[derive(Debug)] pub(crate) struct ExternalNodeBuilder { - node: ZkStackServiceBuilder, + pub(crate) node: ZkStackServiceBuilder, config: ExternalNodeConfig, } @@ -110,6 +118,15 @@ impl ExternalNodeBuilder { Ok(self) } + fn add_external_node_metrics_layer(mut self) -> anyhow::Result { + self.node.add_layer(ExternalNodeMetricsLayer { + l1_chain_id: self.config.required.l1_chain_id, + l2_chain_id: self.config.required.l2_chain_id, + postgres_pool_size: self.config.postgres.max_connections, + }); + Ok(self) + } + fn add_main_node_client_layer(mut self) -> anyhow::Result { let layer = MainNodeClientLayer::new( self.config.required.main_node_url.clone(), @@ -209,11 +226,7 @@ impl ExternalNodeBuilder { let config = self.config.consensus.clone(); let secrets = config::read_consensus_secrets().context("config::read_consensus_secrets()")?; - let layer = ConsensusLayer { - mode: Mode::External, - config, - secrets, - }; + let layer = ExternalNodeConsensusLayer { config, secrets }; self.node.add_layer(layer); Ok(self) } @@ -306,6 +319,10 @@ impl ExternalNodeBuilder { .merkle_tree_include_indices_and_filters_in_block_cache, memtable_capacity: self.config.optional.merkle_tree_memtable_capacity(), stalled_writes_timeout: self.config.optional.merkle_tree_stalled_writes_timeout(), + sealed_batches_have_protective_reads: self + .config + .optional + .protective_reads_persistence_enabled, recovery: MetadataCalculatorRecoveryConfig { desired_chunk_size: self.config.experimental.snapshots_recovery_tree_chunk_size, parallel_persistence_buffer: self @@ -355,7 +372,7 @@ impl ExternalNodeBuilder { ) .with_whitelisted_tokens_for_aa_cache(true); - self.node.add_layer(TxSinkLayer::ProxySink); + self.node.add_layer(ProxySinkLayer); self.node.add_layer(tx_sender_layer); Ok(self) } @@ -421,6 +438,61 @@ impl ExternalNodeBuilder { Ok(self) } + fn add_reorg_detector_layer(mut self) -> anyhow::Result { + self.node.add_layer(ReorgDetectorLayer); + Ok(self) + } + + fn add_block_reverter_layer(mut self) -> anyhow::Result { + let mut layer = BlockReverterLayer::new(NodeRole::External); + // Reverting executed batches is more-or-less safe for external nodes. + layer + .allow_rolling_back_executed_batches() + .enable_rolling_back_postgres() + .enable_rolling_back_merkle_tree(self.config.required.merkle_tree_path.clone()) + .enable_rolling_back_state_keeper_cache(self.config.required.state_cache_path.clone()); + self.node.add_layer(layer); + Ok(self) + } + + /// This layer will make sure that the database is initialized correctly, + /// e.g.: + /// - genesis or snapshot recovery will be performed if it's required. + /// - we perform the storage rollback if required (e.g. if reorg is detected). + /// + /// Depending on the `kind` provided, either a task or a precondition will be added. + /// + /// *Important*: the task should be added by at most one component, because + /// it assumes unique control over the database. Multiple components adding this + /// layer in a distributed mode may result in the database corruption. + /// + /// This task works in pair with precondition, which must be present in every component: + /// the precondition will prevent node from starting until the database is initialized. + fn add_storage_initialization_layer(mut self, kind: LayerKind) -> anyhow::Result { + let config = &self.config; + let snapshot_recovery_config = + config + .optional + .snapshots_recovery_enabled + .then_some(SnapshotRecoveryConfig { + snapshot_l1_batch_override: config.experimental.snapshots_recovery_l1_batch, + drop_storage_key_preimages: config + .experimental + .snapshots_recovery_drop_storage_key_preimages, + object_store_config: config.optional.snapshots_recovery_object_store.clone(), + }); + self.node.add_layer(ExternalNodeInitStrategyLayer { + l2_chain_id: self.config.required.l2_chain_id, + snapshot_recovery_config, + }); + let mut layer = NodeStorageInitializerLayer::new(); + if matches!(kind, LayerKind::Precondition) { + layer = layer.as_precondition(); + } + self.node.add_layer(layer); + Ok(self) + } + pub fn build(mut self, mut components: Vec) -> anyhow::Result { // Add "base" layers self = self @@ -429,12 +501,29 @@ impl ExternalNodeBuilder { .add_prometheus_exporter_layer()? .add_pools_layer()? .add_main_node_client_layer()? - .add_query_eth_client_layer()?; + .add_query_eth_client_layer()? + .add_reorg_detector_layer()?; + + // Add layers that must run only on a single component. + if components.contains(&Component::Core) { + // Core is a singleton & mandatory component, + // so until we have a dedicated component for "auxiliary" tasks, + // it's responsible for things like metrics. + self = self + .add_postgres_metrics_layer()? + .add_external_node_metrics_layer()?; + // We assign the storage initialization to the core, as it's considered to be + // the "main" component. + self = self + .add_block_reverter_layer()? + .add_storage_initialization_layer(LayerKind::Task)?; + } // Add preconditions for all the components. self = self .add_l1_batch_commitment_mode_validation_layer()? - .add_validate_chain_ids_layer()?; + .add_validate_chain_ids_layer()? + .add_storage_initialization_layer(LayerKind::Precondition)?; // Sort the components, so that the components they may depend on each other are added in the correct order. components.sort_unstable_by_key(|component| match component { @@ -486,11 +575,6 @@ impl ExternalNodeBuilder { self = self.add_tree_data_fetcher_layer()?; } Component::Core => { - // Core is a singleton & mandatory component, - // so until we have a dedicated component for "auxiliary" tasks, - // it's responsible for things like metrics. - self = self.add_postgres_metrics_layer()?; - // Main tasks self = self .add_state_keeper_layer()? @@ -506,3 +590,10 @@ impl ExternalNodeBuilder { Ok(self.node.build()?) } } + +/// Marker for layers that can add either a task or a precondition. +#[derive(Debug)] +enum LayerKind { + Task, + Precondition, +} diff --git a/core/bin/external_node/src/tests.rs b/core/bin/external_node/src/tests.rs deleted file mode 100644 index 8966a7ac3f3b..000000000000 --- a/core/bin/external_node/src/tests.rs +++ /dev/null @@ -1,313 +0,0 @@ -//! High-level tests for EN. - -use assert_matches::assert_matches; -use test_casing::test_casing; -use zksync_dal::CoreDal; -use zksync_eth_client::clients::MockEthereum; -use zksync_node_genesis::{insert_genesis_batch, GenesisParams}; -use zksync_types::{ - api, ethabi, fee_model::FeeParams, Address, L1BatchNumber, L2BlockNumber, ProtocolVersionId, - H256, U64, -}; -use zksync_web3_decl::{ - client::{MockClient, L1}, - jsonrpsee::core::ClientError, -}; - -use super::*; - -const SHUTDOWN_TIMEOUT: Duration = Duration::from_secs(10); -const POLL_INTERVAL: Duration = Duration::from_millis(100); - -fn block_details_base(hash: H256) -> api::BlockDetailsBase { - api::BlockDetailsBase { - timestamp: 0, - l1_tx_count: 0, - l2_tx_count: 0, - root_hash: Some(hash), - status: api::BlockStatus::Sealed, - commit_tx_hash: None, - committed_at: None, - prove_tx_hash: None, - proven_at: None, - execute_tx_hash: None, - executed_at: None, - l1_gas_price: 0, - l2_fair_gas_price: 0, - fair_pubdata_price: None, - base_system_contracts_hashes: Default::default(), - } -} - -#[derive(Debug)] -struct TestEnvironment { - sigint_receiver: Option>, - app_health_sender: Option>>, -} - -impl TestEnvironment { - fn new() -> (Self, TestEnvironmentHandles) { - let (sigint_sender, sigint_receiver) = oneshot::channel(); - let (app_health_sender, app_health_receiver) = oneshot::channel(); - let this = Self { - sigint_receiver: Some(sigint_receiver), - app_health_sender: Some(app_health_sender), - }; - let handles = TestEnvironmentHandles { - sigint_sender, - app_health_receiver, - }; - (this, handles) - } -} - -impl NodeEnvironment for TestEnvironment { - fn setup_sigint_handler(&mut self) -> oneshot::Receiver<()> { - self.sigint_receiver - .take() - .expect("requested to setup sigint handler twice") - } - - fn set_app_health(&mut self, health: Arc) { - self.app_health_sender - .take() - .expect("set app health twice") - .send(health) - .ok(); - } -} - -#[derive(Debug)] -struct TestEnvironmentHandles { - sigint_sender: oneshot::Sender<()>, - app_health_receiver: oneshot::Receiver>, -} - -// The returned components have the fully implemented health check life cycle (i.e., signal their shutdown). -fn expected_health_components(components: &ComponentsToRun) -> Vec<&'static str> { - let mut output = vec!["reorg_detector"]; - if components.0.contains(&Component::Core) { - output.extend(["consistency_checker", "commitment_generator"]); - } - if components.0.contains(&Component::Tree) { - output.push("tree"); - } - if components.0.contains(&Component::HttpApi) { - output.push("http_api"); - } - if components.0.contains(&Component::WsApi) { - output.push("ws_api"); - } - output -} - -fn mock_eth_client(diamond_proxy_addr: Address) -> MockClient { - let mock = MockEthereum::builder().with_call_handler(move |call, _| { - tracing::info!("L1 call: {call:?}"); - if call.to == Some(diamond_proxy_addr) { - let packed_semver = ProtocolVersionId::latest().into_packed_semver_with_patch(0); - let call_signature = &call.data.as_ref().unwrap().0[..4]; - let contract = zksync_contracts::hyperchain_contract(); - let pricing_mode_sig = contract - .function("getPubdataPricingMode") - .unwrap() - .short_signature(); - let protocol_version_sig = contract - .function("getProtocolVersion") - .unwrap() - .short_signature(); - match call_signature { - sig if sig == pricing_mode_sig => { - return ethabi::Token::Uint(0.into()); // "rollup" mode encoding - } - sig if sig == protocol_version_sig => return ethabi::Token::Uint(packed_semver), - _ => { /* unknown call; panic below */ } - } - } - panic!("Unexpected L1 call: {call:?}"); - }); - mock.build().into_client() -} - -#[test_casing(5, ["all", "core", "api", "tree", "tree,tree_api"])] -#[tokio::test] -#[tracing::instrument] // Add args to the test logs -async fn external_node_basics(components_str: &'static str) { - let _guard = vlog::ObservabilityBuilder::new().build(); // Enable logging to simplify debugging - let temp_dir = tempfile::TempDir::new().unwrap(); - - // Simplest case to mock: the EN already has a genesis L1 batch / L2 block, and it's the only L1 batch / L2 block - // in the network. - let connection_pool = ConnectionPool::test_pool().await; - let singleton_pool_builder = ConnectionPool::singleton(connection_pool.database_url().clone()); - let mut storage = connection_pool.connection().await.unwrap(); - let genesis_params = insert_genesis_batch(&mut storage, &GenesisParams::mock()) - .await - .unwrap(); - let genesis_l2_block = storage - .blocks_dal() - .get_l2_block_header(L2BlockNumber(0)) - .await - .unwrap() - .expect("No genesis L2 block"); - drop(storage); - - let components: ComponentsToRun = components_str.parse().unwrap(); - let expected_health_components = expected_health_components(&components); - let opt = Cli { - enable_consensus: false, - components, - use_node_framework: false, - }; - let mut config = ExternalNodeConfig::mock(&temp_dir, &connection_pool); - if opt.components.0.contains(&Component::TreeApi) { - config.tree_component.api_port = Some(0); - } - - let diamond_proxy_addr = config.remote.diamond_proxy_addr; - - let l2_client = MockClient::builder(L2::default()) - .method("eth_chainId", || Ok(U64::from(270))) - .method("zks_L1ChainId", || Ok(U64::from(9))) - .method("zks_L1BatchNumber", || Ok(U64::from(0))) - .method("zks_getL1BatchDetails", move |number: L1BatchNumber| { - assert_eq!(number, L1BatchNumber(0)); - Ok(api::L1BatchDetails { - number: L1BatchNumber(0), - base: block_details_base(genesis_params.root_hash), - }) - }) - .method("eth_blockNumber", || Ok(U64::from(0))) - .method( - "eth_getBlockByNumber", - move |number: api::BlockNumber, _with_txs: bool| { - assert_eq!(number, api::BlockNumber::Number(0.into())); - Ok(api::Block:: { - hash: genesis_l2_block.hash, - ..api::Block::default() - }) - }, - ) - .method("zks_getFeeParams", || Ok(FeeParams::sensible_v1_default())) - .method("en_whitelistedTokensForAA", || Ok([] as [Address; 0])) - .build(); - let l2_client = Box::new(l2_client); - let eth_client = Box::new(mock_eth_client(diamond_proxy_addr)); - - let (env, env_handles) = TestEnvironment::new(); - let node_handle = tokio::spawn(async move { - run_node( - env, - &opt, - &config, - connection_pool, - singleton_pool_builder, - l2_client, - eth_client, - ) - .await - }); - - // Wait until the node is ready. - let app_health = match env_handles.app_health_receiver.await { - Ok(app_health) => app_health, - Err(_) if node_handle.is_finished() => { - node_handle.await.unwrap().unwrap(); - unreachable!("Node tasks should have panicked or errored"); - } - Err(_) => unreachable!("Node tasks should have panicked or errored"), - }; - - loop { - let health_data = app_health.check_health().await; - tracing::info!(?health_data, "received health data"); - if matches!(health_data.inner().status(), HealthStatus::Ready) - && expected_health_components - .iter() - .all(|name| health_data.components().contains_key(name)) - { - break; - } - tokio::time::sleep(POLL_INTERVAL).await; - } - - // Stop the node and check that it timely terminates. - env_handles.sigint_sender.send(()).unwrap(); - - tokio::time::timeout(SHUTDOWN_TIMEOUT, node_handle) - .await - .expect("Node hanged up during shutdown") - .expect("Node panicked") - .expect("Node errored"); - - // Check that the node health was appropriately updated. - let health_data = app_health.check_health().await; - tracing::info!(?health_data, "final health data"); - assert_matches!(health_data.inner().status(), HealthStatus::ShutDown); - for name in expected_health_components { - let component_health = &health_data.components()[name]; - assert_matches!(component_health.status(), HealthStatus::ShutDown); - } -} - -#[tokio::test] -async fn node_reacts_to_stop_signal_during_initial_reorg_detection() { - let _guard = vlog::ObservabilityBuilder::new().build(); // Enable logging to simplify debugging - let temp_dir = tempfile::TempDir::new().unwrap(); - - let connection_pool = ConnectionPool::test_pool().await; - let singleton_pool_builder = ConnectionPool::singleton(connection_pool.database_url().clone()); - let mut storage = connection_pool.connection().await.unwrap(); - insert_genesis_batch(&mut storage, &GenesisParams::mock()) - .await - .unwrap(); - drop(storage); - - let opt = Cli { - enable_consensus: false, - components: "core".parse().unwrap(), - use_node_framework: false, - }; - let mut config = ExternalNodeConfig::mock(&temp_dir, &connection_pool); - if opt.components.0.contains(&Component::TreeApi) { - config.tree_component.api_port = Some(0); - } - - let l2_client = MockClient::builder(L2::default()) - .method("eth_chainId", || Ok(U64::from(270))) - .method("zks_L1ChainId", || Ok(U64::from(9))) - .method("zks_L1BatchNumber", || { - Err::<(), _>(ClientError::RequestTimeout) - }) - .method("eth_blockNumber", || { - Err::<(), _>(ClientError::RequestTimeout) - }) - .method("zks_getFeeParams", || Ok(FeeParams::sensible_v1_default())) - .method("en_whitelistedTokensForAA", || Ok([] as [Address; 0])) - .build(); - let l2_client = Box::new(l2_client); - let diamond_proxy_addr = config.remote.diamond_proxy_addr; - let eth_client = Box::new(mock_eth_client(diamond_proxy_addr)); - - let (env, env_handles) = TestEnvironment::new(); - let mut node_handle = tokio::spawn(async move { - run_node( - env, - &opt, - &config, - connection_pool, - singleton_pool_builder, - l2_client, - eth_client, - ) - .await - }); - - // Check that the node doesn't stop on its own. - let timeout_result = tokio::time::timeout(Duration::from_millis(50), &mut node_handle).await; - assert_matches!(timeout_result, Err(tokio::time::error::Elapsed { .. })); - - // Send a stop signal and check that the node reacts to it. - env_handles.sigint_sender.send(()).unwrap(); - node_handle.await.unwrap().unwrap(); -} diff --git a/core/bin/external_node/src/tests/framework.rs b/core/bin/external_node/src/tests/framework.rs new file mode 100644 index 000000000000..ea0cc366ca64 --- /dev/null +++ b/core/bin/external_node/src/tests/framework.rs @@ -0,0 +1,161 @@ +use std::sync::Arc; + +use tokio::sync::oneshot; +use zksync_health_check::AppHealthCheck; +use zksync_node_framework::{ + implementations::{ + layers::{ + main_node_client::MainNodeClientLayer, query_eth_client::QueryEthClientLayer, + sigint::SigintHandlerLayer, + }, + resources::{ + eth_interface::EthInterfaceResource, healthcheck::AppHealthCheckResource, + main_node_client::MainNodeClientResource, + }, + }, + service::ServiceContext, + task::TaskKind, + FromContext, IntoContext, StopReceiver, Task, TaskId, WiringError, WiringLayer, +}; +use zksync_types::{L1ChainId, L2ChainId}; +use zksync_web3_decl::client::{MockClient, L1, L2}; + +use super::ExternalNodeBuilder; + +pub(super) fn inject_test_layers( + node: &mut ExternalNodeBuilder, + sigint_receiver: oneshot::Receiver<()>, + app_health_sender: oneshot::Sender>, + l1_client: MockClient, + l2_client: MockClient, +) { + node.node + .add_layer(TestSigintLayer { + receiver: sigint_receiver, + }) + .add_layer(AppHealthHijackLayer { + sender: app_health_sender, + }) + .add_layer(MockL1ClientLayer { client: l1_client }) + .add_layer(MockL2ClientLayer { client: l2_client }); +} + +/// A test layer that would stop the node upon request. +/// Replaces the `SigintHandlerLayer` in tests. +#[derive(Debug)] +struct TestSigintLayer { + receiver: oneshot::Receiver<()>, +} + +#[async_trait::async_trait] +impl WiringLayer for TestSigintLayer { + type Input = (); + type Output = TestSigintTask; + + fn layer_name(&self) -> &'static str { + // We want to override layer by inserting it first. + SigintHandlerLayer.layer_name() + } + + async fn wire(self, _: Self::Input) -> Result { + Ok(TestSigintTask(self.receiver)) + } +} + +struct TestSigintTask(oneshot::Receiver<()>); + +#[async_trait::async_trait] +impl Task for TestSigintTask { + fn kind(&self) -> TaskKind { + TaskKind::UnconstrainedTask + } + + fn id(&self) -> TaskId { + "test_sigint_task".into() + } + + async fn run(self: Box, _: StopReceiver) -> anyhow::Result<()> { + self.0.await?; + Ok(()) + } +} + +impl IntoContext for TestSigintTask { + fn into_context(self, context: &mut ServiceContext<'_>) -> Result<(), WiringError> { + context.add_task(self); + Ok(()) + } +} + +/// Hijacks the `AppHealthCheck` from the context and passes it to the test. +/// Note: It's a separate layer to get access to the app health check, not an override. +#[derive(Debug)] +struct AppHealthHijackLayer { + sender: oneshot::Sender>, +} + +#[derive(Debug, FromContext)] +struct AppHealthHijackInput { + #[context(default)] + app_health_check: AppHealthCheckResource, +} + +#[async_trait::async_trait] +impl WiringLayer for AppHealthHijackLayer { + type Input = AppHealthHijackInput; + type Output = (); + + fn layer_name(&self) -> &'static str { + "app_health_hijack" + } + + async fn wire(self, input: Self::Input) -> Result { + self.sender.send(input.app_health_check.0).unwrap(); + Ok(()) + } +} + +#[derive(Debug)] +struct MockL1ClientLayer { + client: MockClient, +} + +#[async_trait::async_trait] +impl WiringLayer for MockL1ClientLayer { + type Input = (); + type Output = EthInterfaceResource; + + fn layer_name(&self) -> &'static str { + // We don't care about values, we just want to hijack the layer name. + QueryEthClientLayer::new(L1ChainId(1), "https://example.com".parse().unwrap()).layer_name() + } + + async fn wire(self, _: Self::Input) -> Result { + Ok(EthInterfaceResource(Box::new(self.client))) + } +} + +#[derive(Debug)] +struct MockL2ClientLayer { + client: MockClient, +} + +#[async_trait::async_trait] +impl WiringLayer for MockL2ClientLayer { + type Input = (); + type Output = MainNodeClientResource; + + fn layer_name(&self) -> &'static str { + // We don't care about values, we just want to hijack the layer name. + MainNodeClientLayer::new( + "https://example.com".parse().unwrap(), + 100.try_into().unwrap(), + L2ChainId::default(), + ) + .layer_name() + } + + async fn wire(self, _: Self::Input) -> Result { + Ok(MainNodeClientResource(Box::new(self.client))) + } +} diff --git a/core/bin/external_node/src/tests/mod.rs b/core/bin/external_node/src/tests/mod.rs new file mode 100644 index 000000000000..e2b7edc174c4 --- /dev/null +++ b/core/bin/external_node/src/tests/mod.rs @@ -0,0 +1,198 @@ +//! High-level tests for EN. + +use assert_matches::assert_matches; +use framework::inject_test_layers; +use test_casing::test_casing; +use zksync_types::{fee_model::FeeParams, L1BatchNumber, U64}; +use zksync_web3_decl::jsonrpsee::core::ClientError; + +use super::*; + +mod framework; +mod utils; + +const SHUTDOWN_TIMEOUT: Duration = Duration::from_secs(10); +const POLL_INTERVAL: Duration = Duration::from_millis(100); + +#[test_casing(3, ["all", "core", "api"])] +#[tokio::test] +#[tracing::instrument] // Add args to the test logs +async fn external_node_basics(components_str: &'static str) { + let _guard = zksync_vlog::ObservabilityBuilder::new().build(); // Enable logging to simplify debugging + + let (env, env_handles) = utils::TestEnvironment::with_genesis_block(components_str).await; + + let expected_health_components = utils::expected_health_components(&env.components); + let l2_client = utils::mock_l2_client(&env); + let eth_client = utils::mock_eth_client(env.config.remote.diamond_proxy_addr); + + let node_handle = tokio::task::spawn_blocking(move || { + std::thread::spawn(move || { + let mut node = ExternalNodeBuilder::new(env.config); + inject_test_layers( + &mut node, + env.sigint_receiver, + env.app_health_sender, + eth_client, + l2_client, + ); + + let node = node.build(env.components.0.into_iter().collect())?; + node.run()?; + anyhow::Ok(()) + }) + .join() + .unwrap() + }); + + // Wait until the node is ready. + let app_health = match env_handles.app_health_receiver.await { + Ok(app_health) => app_health, + Err(_) if node_handle.is_finished() => { + node_handle.await.unwrap().unwrap(); + unreachable!("Node tasks should have panicked or errored"); + } + Err(_) => unreachable!("Node tasks should have panicked or errored"), + }; + + loop { + let health_data = app_health.check_health().await; + tracing::info!(?health_data, "received health data"); + if matches!(health_data.inner().status(), HealthStatus::Ready) + && expected_health_components + .iter() + .all(|name| health_data.components().contains_key(name)) + { + break; + } + tokio::time::sleep(POLL_INTERVAL).await; + } + + // Stop the node and check that it timely terminates. + env_handles.sigint_sender.send(()).unwrap(); + + tokio::time::timeout(SHUTDOWN_TIMEOUT, node_handle) + .await + .expect("Node hanged up during shutdown") + .expect("Node panicked") + .expect("Node errored"); + + // Check that the node health was appropriately updated. + let health_data = app_health.check_health().await; + tracing::info!(?health_data, "final health data"); + assert_matches!(health_data.inner().status(), HealthStatus::ShutDown); + for name in expected_health_components { + let component_health = &health_data.components()[name]; + assert_matches!(component_health.status(), HealthStatus::ShutDown); + } +} + +#[tokio::test] +async fn node_reacts_to_stop_signal_during_initial_reorg_detection() { + let _guard = zksync_vlog::ObservabilityBuilder::new().build(); // Enable logging to simplify debugging + let (env, env_handles) = utils::TestEnvironment::with_genesis_block("core").await; + + let l2_client = utils::mock_l2_client_hanging(); + let eth_client = utils::mock_eth_client(env.config.remote.diamond_proxy_addr); + + let mut node_handle = tokio::task::spawn_blocking(move || { + std::thread::spawn(move || { + let mut node = ExternalNodeBuilder::new(env.config); + inject_test_layers( + &mut node, + env.sigint_receiver, + env.app_health_sender, + eth_client, + l2_client, + ); + + let node = node.build(env.components.0.into_iter().collect())?; + node.run()?; + anyhow::Ok(()) + }) + .join() + .unwrap() + }); + + // Check that the node doesn't stop on its own. + let timeout_result = tokio::time::timeout(Duration::from_millis(50), &mut node_handle).await; + assert_matches!(timeout_result, Err(tokio::time::error::Elapsed { .. })); + + // Send a stop signal and check that the node reacts to it. + env_handles.sigint_sender.send(()).unwrap(); + node_handle.await.unwrap().unwrap(); +} + +#[tokio::test] +async fn running_tree_without_core_is_not_allowed() { + let _guard = zksync_vlog::ObservabilityBuilder::new().build(); // Enable logging to simplify debugging + let (env, _env_handles) = utils::TestEnvironment::with_genesis_block("tree").await; + + let l2_client = utils::mock_l2_client(&env); + let eth_client = utils::mock_eth_client(env.config.remote.diamond_proxy_addr); + + let node_handle = tokio::task::spawn_blocking(move || { + std::thread::spawn(move || { + let mut node = ExternalNodeBuilder::new(env.config); + inject_test_layers( + &mut node, + env.sigint_receiver, + env.app_health_sender, + eth_client, + l2_client, + ); + + // We're only interested in the error, so we drop the result. + node.build(env.components.0.into_iter().collect()).map(drop) + }) + .join() + .unwrap() + }); + + // Check that we cannot build the node without the core component. + let result = node_handle.await.expect("Building the node panicked"); + let err = result.expect_err("Building the node with tree but without core should fail"); + assert!( + err.to_string() + .contains("Tree must run on the same machine as Core"), + "Unexpected errror: {}", + err + ); +} + +#[tokio::test] +async fn running_tree_api_without_tree_is_not_allowed() { + let _guard = zksync_vlog::ObservabilityBuilder::new().build(); // Enable logging to simplify debugging + let (env, _env_handles) = utils::TestEnvironment::with_genesis_block("core,tree_api").await; + + let l2_client = utils::mock_l2_client(&env); + let eth_client = utils::mock_eth_client(env.config.remote.diamond_proxy_addr); + + let node_handle = tokio::task::spawn_blocking(move || { + std::thread::spawn(move || { + let mut node = ExternalNodeBuilder::new(env.config); + inject_test_layers( + &mut node, + env.sigint_receiver, + env.app_health_sender, + eth_client, + l2_client, + ); + + // We're only interested in the error, so we drop the result. + node.build(env.components.0.into_iter().collect()).map(drop) + }) + .join() + .unwrap() + }); + + // Check that we cannot build the node without the core component. + let result = node_handle.await.expect("Building the node panicked"); + let err = result.expect_err("Building the node with tree api but without tree should fail"); + assert!( + err.to_string() + .contains("Merkle tree API cannot be started without a tree component"), + "Unexpected errror: {}", + err + ); +} diff --git a/core/bin/external_node/src/tests/utils.rs b/core/bin/external_node/src/tests/utils.rs new file mode 100644 index 000000000000..3784fea4763b --- /dev/null +++ b/core/bin/external_node/src/tests/utils.rs @@ -0,0 +1,195 @@ +use tempfile::TempDir; +use zksync_dal::CoreDal; +use zksync_db_connection::connection_pool::TestTemplate; +use zksync_eth_client::clients::MockEthereum; +use zksync_node_genesis::{insert_genesis_batch, GenesisBatchParams, GenesisParams}; +use zksync_types::{ + api, block::L2BlockHeader, ethabi, Address, L2BlockNumber, ProtocolVersionId, H256, +}; +use zksync_web3_decl::client::{MockClient, L1}; + +use super::*; + +pub(super) fn block_details_base(hash: H256) -> api::BlockDetailsBase { + api::BlockDetailsBase { + timestamp: 0, + l1_tx_count: 0, + l2_tx_count: 0, + root_hash: Some(hash), + status: api::BlockStatus::Sealed, + commit_tx_hash: None, + committed_at: None, + prove_tx_hash: None, + proven_at: None, + execute_tx_hash: None, + executed_at: None, + l1_gas_price: 0, + l2_fair_gas_price: 0, + fair_pubdata_price: None, + base_system_contracts_hashes: Default::default(), + } +} + +#[derive(Debug)] +pub(super) struct TestEnvironment { + pub(super) sigint_receiver: oneshot::Receiver<()>, + pub(super) app_health_sender: oneshot::Sender>, + pub(super) components: ComponentsToRun, + pub(super) config: ExternalNodeConfig, + pub(super) genesis_params: GenesisBatchParams, + pub(super) genesis_l2_block: L2BlockHeader, + // We have to prevent object from dropping the temp dir, so we store it here. + _temp_dir: TempDir, +} + +impl TestEnvironment { + pub async fn with_genesis_block(components_str: &str) -> (Self, TestEnvironmentHandles) { + // Generate a new environment with a genesis block. + let temp_dir = tempfile::TempDir::new().unwrap(); + + // Simplest case to mock: the EN already has a genesis L1 batch / L2 block, and it's the only L1 batch / L2 block + // in the network. + let test_db: ConnectionPoolBuilder = + TestTemplate::empty().unwrap().create_db(100).await.unwrap(); + let connection_pool = test_db.build().await.unwrap(); + // let singleton_pool_builder = ConnectionPool::singleton(connection_pool.database_url().clone()); + let mut storage = connection_pool.connection().await.unwrap(); + let genesis_params = insert_genesis_batch(&mut storage, &GenesisParams::mock()) + .await + .unwrap(); + let genesis_l2_block = storage + .blocks_dal() + .get_l2_block_header(L2BlockNumber(0)) + .await + .unwrap() + .expect("No genesis L2 block"); + drop(storage); + + let components: ComponentsToRun = components_str.parse().unwrap(); + let mut config = ExternalNodeConfig::mock(&temp_dir, &connection_pool); + if components.0.contains(&Component::TreeApi) { + config.tree_component.api_port = Some(0); + } + drop(connection_pool); + + // Generate channels to control the node. + + let (sigint_sender, sigint_receiver) = oneshot::channel(); + let (app_health_sender, app_health_receiver) = oneshot::channel(); + let this = Self { + sigint_receiver, + app_health_sender, + components, + config, + genesis_params, + genesis_l2_block, + _temp_dir: temp_dir, + }; + let handles = TestEnvironmentHandles { + sigint_sender, + app_health_receiver, + }; + + (this, handles) + } +} + +#[derive(Debug)] +pub(super) struct TestEnvironmentHandles { + pub(super) sigint_sender: oneshot::Sender<()>, + pub(super) app_health_receiver: oneshot::Receiver>, +} + +// The returned components have the fully implemented health check life cycle (i.e., signal their shutdown). +pub(super) fn expected_health_components(components: &ComponentsToRun) -> Vec<&'static str> { + let mut output = vec!["reorg_detector"]; + if components.0.contains(&Component::Core) { + output.extend(["consistency_checker", "commitment_generator"]); + } + if components.0.contains(&Component::Tree) { + output.push("tree"); + } + if components.0.contains(&Component::HttpApi) { + output.push("http_api"); + } + if components.0.contains(&Component::WsApi) { + output.push("ws_api"); + } + output +} + +pub(super) fn mock_eth_client(diamond_proxy_addr: Address) -> MockClient { + let mock = MockEthereum::builder().with_call_handler(move |call, _| { + tracing::info!("L1 call: {call:?}"); + if call.to == Some(diamond_proxy_addr) { + let packed_semver = ProtocolVersionId::latest().into_packed_semver_with_patch(0); + let call_signature = &call.data.as_ref().unwrap().0[..4]; + let contract = zksync_contracts::hyperchain_contract(); + let pricing_mode_sig = contract + .function("getPubdataPricingMode") + .unwrap() + .short_signature(); + let protocol_version_sig = contract + .function("getProtocolVersion") + .unwrap() + .short_signature(); + match call_signature { + sig if sig == pricing_mode_sig => { + return ethabi::Token::Uint(0.into()); // "rollup" mode encoding + } + sig if sig == protocol_version_sig => return ethabi::Token::Uint(packed_semver), + _ => { /* unknown call; panic below */ } + } + } + panic!("Unexpected L1 call: {call:?}"); + }); + mock.build().into_client() +} + +/// Creates a mock L2 client with the genesis block information. +pub(super) fn mock_l2_client(env: &TestEnvironment) -> MockClient { + let genesis_root_hash = env.genesis_params.root_hash; + let genesis_l2_block_hash = env.genesis_l2_block.hash; + + MockClient::builder(L2::default()) + .method("eth_chainId", || Ok(U64::from(270))) + .method("zks_L1ChainId", || Ok(U64::from(9))) + .method("zks_L1BatchNumber", || Ok(U64::from(0))) + .method("zks_getL1BatchDetails", move |number: L1BatchNumber| { + assert_eq!(number, L1BatchNumber(0)); + Ok(api::L1BatchDetails { + number: L1BatchNumber(0), + base: utils::block_details_base(genesis_root_hash), + }) + }) + .method("eth_blockNumber", || Ok(U64::from(0))) + .method( + "eth_getBlockByNumber", + move |number: api::BlockNumber, _with_txs: bool| { + assert_eq!(number, api::BlockNumber::Number(0.into())); + Ok(api::Block:: { + hash: genesis_l2_block_hash, + ..api::Block::default() + }) + }, + ) + .method("zks_getFeeParams", || Ok(FeeParams::sensible_v1_default())) + .method("en_whitelistedTokensForAA", || Ok([] as [Address; 0])) + .build() +} + +/// Creates a mock L2 client that will mimic request timeouts on block info requests. +pub(super) fn mock_l2_client_hanging() -> MockClient { + MockClient::builder(L2::default()) + .method("eth_chainId", || Ok(U64::from(270))) + .method("zks_L1ChainId", || Ok(U64::from(9))) + .method("zks_L1BatchNumber", || { + Err::<(), _>(ClientError::RequestTimeout) + }) + .method("eth_blockNumber", || { + Err::<(), _>(ClientError::RequestTimeout) + }) + .method("zks_getFeeParams", || Ok(FeeParams::sensible_v1_default())) + .method("en_whitelistedTokensForAA", || Ok([] as [Address; 0])) + .build() +} diff --git a/core/bin/genesis_generator/Cargo.toml b/core/bin/genesis_generator/Cargo.toml index e6ac400c0ff0..1ece9ea09d2e 100644 --- a/core/bin/genesis_generator/Cargo.toml +++ b/core/bin/genesis_generator/Cargo.toml @@ -1,5 +1,6 @@ [package] name = "genesis_generator" +description = "Tool to generate ZKsync genesis data" version.workspace = true edition.workspace = true authors.workspace = true @@ -8,8 +9,7 @@ repository.workspace = true license.workspace = true keywords.workspace = true categories.workspace = true - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html +publish = false [dependencies] zksync_config.workspace = true diff --git a/core/bin/merkle_tree_consistency_checker/Cargo.toml b/core/bin/merkle_tree_consistency_checker/Cargo.toml index 75fa4fc10be1..1399faec1d42 100644 --- a/core/bin/merkle_tree_consistency_checker/Cargo.toml +++ b/core/bin/merkle_tree_consistency_checker/Cargo.toml @@ -1,5 +1,6 @@ [package] name = "merkle_tree_consistency_checker" +description = "Tool to verify consistency of ZKsync Merkle Tree" version = "0.1.0" edition.workspace = true authors.workspace = true @@ -16,7 +17,7 @@ zksync_env_config.workspace = true zksync_merkle_tree.workspace = true zksync_types.workspace = true zksync_storage.workspace = true -vlog.workspace = true +zksync_vlog.workspace = true anyhow.workspace = true clap = { workspace = true, features = ["derive"] } diff --git a/core/bin/merkle_tree_consistency_checker/src/main.rs b/core/bin/merkle_tree_consistency_checker/src/main.rs index 82550d272771..f8584653681f 100644 --- a/core/bin/merkle_tree_consistency_checker/src/main.rs +++ b/core/bin/merkle_tree_consistency_checker/src/main.rs @@ -54,11 +54,11 @@ impl Cli { fn main() -> anyhow::Result<()> { let observability_config = ObservabilityConfig::from_env().context("ObservabilityConfig::from_env()")?; - let log_format: vlog::LogFormat = observability_config + let log_format: zksync_vlog::LogFormat = observability_config .log_format .parse() .context("Invalid log format")?; - let mut builder = vlog::ObservabilityBuilder::new().with_log_format(log_format); + let mut builder = zksync_vlog::ObservabilityBuilder::new().with_log_format(log_format); if let Some(sentry_url) = observability_config.sentry_url { builder = builder .with_sentry_url(&sentry_url) diff --git a/core/bin/snapshots_creator/Cargo.toml b/core/bin/snapshots_creator/Cargo.toml index 4fe88a64db49..33b1fa82a857 100644 --- a/core/bin/snapshots_creator/Cargo.toml +++ b/core/bin/snapshots_creator/Cargo.toml @@ -1,5 +1,6 @@ [package] name = "snapshots_creator" +description = "Tool to create ZKsync state snapshots" version = "0.1.0" edition.workspace = true authors.workspace = true @@ -12,15 +13,16 @@ publish = false [dependencies] vise.workspace = true -prometheus_exporter.workspace = true zksync_config.workspace = true zksync_dal.workspace = true zksync_env_config.workspace = true zksync_types.workspace = true zksync_object_store.workspace = true -vlog.workspace = true +zksync_vlog.workspace = true +zksync_core_leftovers.workspace = true anyhow.workspace = true +structopt.workspace = true tokio = { workspace = true, features = ["full"] } tracing.workspace = true futures.workspace = true diff --git a/core/bin/snapshots_creator/README.md b/core/bin/snapshots_creator/README.md index 5d9b599599c1..26ebbb6d652a 100644 --- a/core/bin/snapshots_creator/README.md +++ b/core/bin/snapshots_creator/README.md @@ -51,9 +51,9 @@ Creating a snapshot is a part of the [snapshot recovery integration test]. You c Each snapshot consists of three types of data (see [`snapshots.rs`] for exact definitions): -- **Header:** Includes basic information, such as the miniblock / L1 batch of the snapshot, miniblock / L1 batch - timestamps, miniblock hash and L1 batch root hash. Returned by the methods in the `snapshots` namespace of the - JSON-RPC API of the main node. +- **Header:** Includes basic information, such as the L2 block / L1 batch of the snapshot, L2 block / L1 batch + timestamps, L2 block hash and L1 batch root hash. Returned by the methods in the `snapshots` namespace of the JSON-RPC + API of the main node. - **Storage log chunks:** Latest values for all VM storage slots ever written to at the time the snapshot is made. Besides key–value pairs, each storage log record also contains the L1 batch number of its initial write and its enumeration index; both are used to restore the contents of the `initial_writes` table. Chunking storage logs is @@ -64,6 +64,16 @@ Each snapshot consists of three types of data (see [`snapshots.rs`] for exact de - **Factory dependencies:** All bytecodes deployed on L2 at the time the snapshot is made. Stored as a single gzipped Protobuf message in an object store. +### Versioning + +There are currently 2 versions of the snapshot format which differ in how keys are mentioned in storage logs. + +- Version 0 includes key preimages (EVM-compatible keys), i.e. address / contract slot tuples. +- Version 1 includes only hashed keys as used in Era ZKP circuits and in the Merkle tree. Besides reducing the snapshot + size (with the change, keys occupy 32 bytes instead of 52), this allows to unify snapshot recovery with recovery from + L1 data. Having only hashed keys for snapshot storage logs is safe; key preimages are only required for a couple of + components to sort keys in a batch, but these cases only require preimages for L1 batches locally executed on a node. + [`snapshots.rs`]: ../../lib/types/src/snapshots.rs [object store]: ../../lib/object_store [snapshot recovery integration test]: ../../tests/recovery-test/tests/snapshot-recovery.test.ts diff --git a/core/bin/snapshots_creator/src/creator.rs b/core/bin/snapshots_creator/src/creator.rs index 597f6168b93a..18212a7d2055 100644 --- a/core/bin/snapshots_creator/src/creator.rs +++ b/core/bin/snapshots_creator/src/creator.rs @@ -1,16 +1,17 @@ //! [`SnapshotCreator`] and tightly related types. -use std::sync::Arc; +use std::{sync::Arc, time::Duration}; use anyhow::Context as _; use tokio::sync::Semaphore; use zksync_config::SnapshotsCreatorConfig; use zksync_dal::{Connection, ConnectionPool, Core, CoreDal, DalResult}; -use zksync_object_store::ObjectStore; +use zksync_object_store::{ObjectStore, StoredObject}; use zksync_types::{ snapshots::{ uniform_hashed_keys_chunk, SnapshotFactoryDependencies, SnapshotFactoryDependency, - SnapshotMetadata, SnapshotStorageLogsChunk, SnapshotStorageLogsStorageKey, SnapshotVersion, + SnapshotMetadata, SnapshotStorageLog, SnapshotStorageLogsChunk, + SnapshotStorageLogsStorageKey, SnapshotVersion, }, L1BatchNumber, L2BlockNumber, }; @@ -22,6 +23,7 @@ use crate::tests::HandleEvent; /// Encapsulates progress of creating a particular storage snapshot. #[derive(Debug)] struct SnapshotProgress { + version: SnapshotVersion, l1_batch_number: L1BatchNumber, /// `true` if the snapshot is new (i.e., its progress is not recovered from Postgres). is_new_snapshot: bool, @@ -30,8 +32,9 @@ struct SnapshotProgress { } impl SnapshotProgress { - fn new(l1_batch_number: L1BatchNumber, chunk_count: u64) -> Self { + fn new(version: SnapshotVersion, l1_batch_number: L1BatchNumber, chunk_count: u64) -> Self { Self { + version, l1_batch_number, is_new_snapshot: true, chunk_count, @@ -48,6 +51,7 @@ impl SnapshotProgress { .collect(); Self { + version: snapshot.version, l1_batch_number: snapshot.l1_batch_number, is_new_snapshot: false, chunk_count: snapshot.storage_logs_filepaths.len() as u64, @@ -76,11 +80,13 @@ impl SnapshotCreator { async fn process_storage_logs_single_chunk( &self, semaphore: &Semaphore, + progress: &SnapshotProgress, l2_block_number: L2BlockNumber, - l1_batch_number: L1BatchNumber, chunk_id: u64, - chunk_count: u64, ) -> anyhow::Result<()> { + let chunk_count = progress.chunk_count; + let l1_batch_number = progress.l1_batch_number; + let _permit = semaphore.acquire().await?; #[cfg(test)] if self.event_listener.on_chunk_started().should_exit() { @@ -92,35 +98,45 @@ impl SnapshotCreator { let latency = METRICS.storage_logs_processing_duration[&StorageChunkStage::LoadFromPostgres].start(); - let logs = conn - .snapshots_creator_dal() - .get_storage_logs_chunk(l2_block_number, l1_batch_number, hashed_keys_range) - .await - .context("Error fetching storage logs count")?; - drop(conn); - let latency = latency.observe(); - tracing::info!( - "Loaded chunk {chunk_id} ({} logs) from Postgres in {latency:?}", - logs.len() - ); - - let latency = - METRICS.storage_logs_processing_duration[&StorageChunkStage::SaveToGcs].start(); - let storage_logs_chunk = SnapshotStorageLogsChunk { storage_logs: logs }; - let key = SnapshotStorageLogsStorageKey { - l1_batch_number, - chunk_id, + let (output_filepath, latency) = match progress.version { + SnapshotVersion::Version0 => { + #[allow(deprecated)] // support of version 0 snapshots will be removed eventually + let logs = conn + .snapshots_creator_dal() + .get_storage_logs_chunk_with_key_preimages( + l2_block_number, + l1_batch_number, + hashed_keys_range, + ) + .await + .context("error fetching storage logs")?; + drop(conn); + + let latency = latency.observe(); + tracing::info!( + "Loaded chunk {chunk_id} ({} logs) from Postgres in {latency:?}", + logs.len() + ); + self.store_storage_logs_chunk(l1_batch_number, chunk_id, logs) + .await? + } + SnapshotVersion::Version1 => { + let logs = conn + .snapshots_creator_dal() + .get_storage_logs_chunk(l2_block_number, l1_batch_number, hashed_keys_range) + .await + .context("error fetching storage logs")?; + drop(conn); + + let latency = latency.observe(); + tracing::info!( + "Loaded chunk {chunk_id} ({} logs) from Postgres in {latency:?}", + logs.len() + ); + self.store_storage_logs_chunk(l1_batch_number, chunk_id, logs) + .await? + } }; - let filename = self - .blob_store - .put(key, &storage_logs_chunk) - .await - .context("Error storing storage logs chunk in blob store")?; - let output_filepath_prefix = self - .blob_store - .get_storage_prefix::(); - let output_filepath = format!("{output_filepath_prefix}/{filename}"); - let latency = latency.observe(); let mut master_conn = self .master_pool @@ -141,6 +157,35 @@ impl SnapshotCreator { Ok(()) } + async fn store_storage_logs_chunk( + &self, + l1_batch_number: L1BatchNumber, + chunk_id: u64, + logs: Vec>, + ) -> anyhow::Result<(String, Duration)> + where + for<'a> SnapshotStorageLogsChunk: StoredObject = SnapshotStorageLogsStorageKey>, + { + let latency = + METRICS.storage_logs_processing_duration[&StorageChunkStage::SaveToGcs].start(); + let storage_logs_chunk = SnapshotStorageLogsChunk { storage_logs: logs }; + let key = SnapshotStorageLogsStorageKey { + l1_batch_number, + chunk_id, + }; + let filename = self + .blob_store + .put(key, &storage_logs_chunk) + .await + .context("Error storing storage logs chunk in blob store")?; + let output_filepath_prefix = self + .blob_store + .get_storage_prefix::>(); + let output_filepath = format!("{output_filepath_prefix}/{filename}"); + let latency = latency.observe(); + Ok((output_filepath, latency)) + } + async fn process_factory_deps( &self, l2_block_number: L2BlockNumber, @@ -190,18 +235,12 @@ impl SnapshotCreator { /// Returns `Ok(None)` if the created snapshot would coincide with `latest_snapshot`. async fn initialize_snapshot_progress( config: &SnapshotsCreatorConfig, + l1_batch_number: L1BatchNumber, min_chunk_count: u64, - latest_snapshot: Option<&SnapshotMetadata>, conn: &mut Connection<'_, Core>, ) -> anyhow::Result> { - // We subtract 1 so that after restore, EN node has at least one L1 batch to fetch. - let sealed_l1_batch_number = conn.blocks_dal().get_sealed_l1_batch_number().await?; - let sealed_l1_batch_number = sealed_l1_batch_number.context("No L1 batches in Postgres")?; - anyhow::ensure!( - sealed_l1_batch_number != L1BatchNumber(0), - "Cannot create snapshot when only the genesis L1 batch is present in Postgres" - ); - let l1_batch_number = sealed_l1_batch_number - 1; + let snapshot_version = SnapshotVersion::try_from(config.version) + .context("invalid snapshot version specified in config")?; // Sanity check: the selected L1 batch should have Merkle tree data; otherwise, it could be impossible // to recover from the generated snapshot. @@ -215,15 +254,6 @@ impl SnapshotCreator { ) })?; - let latest_snapshot_l1_batch_number = - latest_snapshot.map(|snapshot| snapshot.l1_batch_number); - if latest_snapshot_l1_batch_number == Some(l1_batch_number) { - tracing::info!( - "Snapshot at expected L1 batch #{l1_batch_number} is already created; exiting" - ); - return Ok(None); - } - let distinct_storage_logs_keys_count = conn .snapshots_creator_dal() .get_distinct_storage_logs_keys_count(l1_batch_number) @@ -238,7 +268,11 @@ impl SnapshotCreator { "Selected storage logs chunking for L1 batch {l1_batch_number}: \ {chunk_count} chunks of expected size {chunk_size}" ); - Ok(Some(SnapshotProgress::new(l1_batch_number, chunk_count))) + Ok(Some(SnapshotProgress::new( + snapshot_version, + l1_batch_number, + chunk_count, + ))) } /// Returns `Ok(None)` if a snapshot should not be created / resumed. @@ -251,25 +285,59 @@ impl SnapshotCreator { .master_pool .connection_tagged("snapshots_creator") .await?; - let latest_snapshot = master_conn + + let sealed_l1_batch_number = master_conn + .blocks_dal() + .get_sealed_l1_batch_number() + .await?; + let sealed_l1_batch_number = sealed_l1_batch_number.context("No L1 batches in Postgres")?; + let requested_l1_batch_number = if let Some(l1_batch_number) = config.l1_batch_number { + anyhow::ensure!( + l1_batch_number <= sealed_l1_batch_number, + "Requested a snapshot for L1 batch #{l1_batch_number} that doesn't exist in Postgres (latest L1 batch: {sealed_l1_batch_number})" + ); + l1_batch_number + } else { + // We subtract 1 so that after restore, EN node has at least one L1 batch to fetch. + anyhow::ensure!( + sealed_l1_batch_number != L1BatchNumber(0), + "Cannot create snapshot when only the genesis L1 batch is present in Postgres" + ); + sealed_l1_batch_number - 1 + }; + + let existing_snapshot = master_conn .snapshots_dal() - .get_newest_snapshot_metadata() + .get_snapshot_metadata(requested_l1_batch_number) .await?; drop(master_conn); - let pending_snapshot = latest_snapshot - .as_ref() - .filter(|snapshot| !snapshot.is_complete()); - if let Some(snapshot) = pending_snapshot { - Ok(Some(SnapshotProgress::from_existing_snapshot(snapshot))) - } else { - Self::initialize_snapshot_progress( - config, - min_chunk_count, - latest_snapshot.as_ref(), - &mut self.connect_to_replica().await?, - ) - .await + match existing_snapshot { + Some(snapshot) if snapshot.is_complete() => { + tracing::info!("Snapshot for the requested L1 batch is complete: {snapshot:?}"); + Ok(None) + } + Some(snapshot) if config.l1_batch_number.is_some() => { + Ok(Some(SnapshotProgress::from_existing_snapshot(&snapshot))) + } + Some(snapshot) => { + // Unless creating a snapshot for a specific L1 batch is requested, we never continue an existing snapshot, even if it's incomplete. + // This it to make running multiple snapshot creator instances in parallel easier to reason about. + tracing::warn!( + "Snapshot at expected L1 batch #{requested_l1_batch_number} exists, but is incomplete: {snapshot:?}. If you need to resume creating it, \ + specify the L1 batch number in the snapshot creator config" + ); + Ok(None) + } + None => { + Self::initialize_snapshot_progress( + config, + requested_l1_batch_number, + min_chunk_count, + &mut self.connect_to_replica().await?, + ) + .await + } } } @@ -319,7 +387,7 @@ impl SnapshotCreator { master_conn .snapshots_dal() .add_snapshot( - SnapshotVersion::Version0, + progress.version, progress.l1_batch_number, progress.chunk_count, &factory_deps_output_file, @@ -331,15 +399,18 @@ impl SnapshotCreator { .storage_logs_chunks_left_to_process .set(progress.remaining_chunk_ids.len()); let semaphore = Semaphore::new(config.concurrent_queries_count as usize); - let tasks = progress.remaining_chunk_ids.into_iter().map(|chunk_id| { - self.process_storage_logs_single_chunk( - &semaphore, - last_l2_block_number_in_batch, - progress.l1_batch_number, - chunk_id, - progress.chunk_count, - ) - }); + let tasks = progress + .remaining_chunk_ids + .iter() + .copied() + .map(|chunk_id| { + self.process_storage_logs_single_chunk( + &semaphore, + &progress, + last_l2_block_number_in_batch, + chunk_id, + ) + }); futures::future::try_join_all(tasks).await?; METRICS diff --git a/core/bin/snapshots_creator/src/main.rs b/core/bin/snapshots_creator/src/main.rs index 91751f6d2ddf..e07a879746ad 100644 --- a/core/bin/snapshots_creator/src/main.rs +++ b/core/bin/snapshots_creator/src/main.rs @@ -10,15 +10,13 @@ //! at a time). use anyhow::Context as _; -use prometheus_exporter::PrometheusExporterConfig; +use structopt::StructOpt; use tokio::{sync::watch, task::JoinHandle}; -use zksync_config::{ - configs::{DatabaseSecrets, ObservabilityConfig, PrometheusConfig}, - SnapshotsCreatorConfig, -}; +use zksync_config::configs::PrometheusConfig; +use zksync_core_leftovers::temp_config_store::{load_database_secrets, load_general_config}; use zksync_dal::{ConnectionPool, Core}; -use zksync_env_config::{object_store::SnapshotsObjectStoreConfig, FromEnv}; use zksync_object_store::ObjectStoreFactory; +use zksync_vlog::prometheus::PrometheusExporterConfig; use crate::creator::SnapshotCreator; @@ -28,40 +26,58 @@ mod metrics; mod tests; async fn maybe_enable_prometheus_metrics( + prometheus_config: Option, stop_receiver: watch::Receiver, ) -> anyhow::Result>>> { - let prometheus_config = PrometheusConfig::from_env().ok(); - if let Some(prometheus_config) = prometheus_config { - let exporter_config = PrometheusExporterConfig::push( - prometheus_config.gateway_endpoint(), - prometheus_config.push_interval(), - ); - - tracing::info!("Starting prometheus exporter with config {prometheus_config:?}"); - let prometheus_exporter_task = tokio::spawn(exporter_config.run(stop_receiver)); - Ok(Some(prometheus_exporter_task)) - } else { - tracing::info!("Starting without prometheus exporter"); - Ok(None) + match prometheus_config.map(|c| (c.gateway_endpoint(), c.push_interval())) { + Some((Some(gateway_endpoint), push_interval)) => { + tracing::info!("Starting prometheus exporter with gateway {gateway_endpoint:?} and push_interval {push_interval:?}"); + let exporter_config = PrometheusExporterConfig::push(gateway_endpoint, push_interval); + + let prometheus_exporter_task = tokio::spawn(exporter_config.run(stop_receiver)); + Ok(Some(prometheus_exporter_task)) + } + _ => { + tracing::info!("Starting without prometheus exporter"); + Ok(None) + } } } /// Minimum number of storage log chunks to produce. const MIN_CHUNK_COUNT: u64 = 10; +#[derive(StructOpt)] +#[structopt(name = "ZKsync snapshot creator", author = "Matter Labs")] +struct Opt { + /// Path to the configuration file. + #[structopt(long)] + config_path: Option, + + /// Path to the secrets file. + #[structopt(long)] + secrets_path: Option, +} + #[tokio::main] async fn main() -> anyhow::Result<()> { let (stop_sender, stop_receiver) = watch::channel(false); - let observability_config = - ObservabilityConfig::from_env().context("ObservabilityConfig::from_env()")?; - let log_format: vlog::LogFormat = observability_config + let opt = Opt::from_args(); + let general_config = load_general_config(opt.config_path).context("general config")?; + let database_secrets = load_database_secrets(opt.secrets_path).context("database secrets")?; + + let observability_config = general_config + .observability + .context("observability config")?; + let log_format: zksync_vlog::LogFormat = observability_config .log_format .parse() .context("Invalid log format")?; - let prometheus_exporter_task = maybe_enable_prometheus_metrics(stop_receiver).await?; - let mut builder = vlog::ObservabilityBuilder::new().with_log_format(log_format); + let prometheus_exporter_task = + maybe_enable_prometheus_metrics(general_config.prometheus_config, stop_receiver).await?; + let mut builder = zksync_vlog::ObservabilityBuilder::new().with_log_format(log_format); if let Some(sentry_url) = observability_config.sentry_url { builder = builder .with_sentry_url(&sentry_url) @@ -71,16 +87,19 @@ async fn main() -> anyhow::Result<()> { let _guard = builder.build(); tracing::info!("Starting snapshots creator"); - let object_store_config = - SnapshotsObjectStoreConfig::from_env().context("SnapshotsObjectStoreConfig::from_env()")?; - let blob_store = ObjectStoreFactory::new(object_store_config.0) + let creator_config = general_config + .snapshot_creator + .context("snapshot creator config")?; + + let object_store_config = creator_config + .clone() + .object_store + .context("snapshot creator object storage config")?; + + let blob_store = ObjectStoreFactory::new(object_store_config) .create_store() .await?; - let database_secrets = DatabaseSecrets::from_env().context("DatabaseSecrets")?; - let creator_config = - SnapshotsCreatorConfig::from_env().context("SnapshotsCreatorConfig::from_env")?; - let replica_pool = ConnectionPool::::builder( database_secrets.replica_url()?, creator_config.concurrent_queries_count, diff --git a/core/bin/snapshots_creator/src/tests.rs b/core/bin/snapshots_creator/src/tests.rs index 4fd553d0348d..89a3807422be 100644 --- a/core/bin/snapshots_creator/src/tests.rs +++ b/core/bin/snapshots_creator/src/tests.rs @@ -10,6 +10,7 @@ use std::{ }; use rand::{thread_rng, Rng}; +use zksync_config::SnapshotsCreatorConfig; use zksync_dal::{Connection, CoreDal}; use zksync_object_store::{MockObjectStore, ObjectStore}; use zksync_types::{ @@ -25,14 +26,15 @@ use zksync_types::{ use super::*; const TEST_CONFIG: SnapshotsCreatorConfig = SnapshotsCreatorConfig { + version: 1, + l1_batch_number: None, storage_logs_chunk_size: 1_000_000, concurrent_queries_count: 10, object_store: None, }; const SEQUENTIAL_TEST_CONFIG: SnapshotsCreatorConfig = SnapshotsCreatorConfig { - storage_logs_chunk_size: 1_000_000, concurrent_queries_count: 1, - object_store: None, + ..TEST_CONFIG }; #[derive(Debug)] @@ -181,6 +183,7 @@ async fn create_l1_batch( let mut written_keys: Vec<_> = logs_for_initial_writes.iter().map(|log| log.key).collect(); written_keys.sort_unstable(); + let written_keys: Vec<_> = written_keys.iter().map(StorageKey::hashed_key).collect(); conn.storage_logs_dedup_dal() .insert_initial_writes(l1_batch_number, &written_keys) .await @@ -241,7 +244,7 @@ async fn prepare_postgres( let (l1_batch_number_of_initial_write, enumeration_index) = expected_l1_batches_and_indices[&log.key.hashed_key()]; SnapshotStorageLog { - key: log.key, + key: log.key.hashed_key(), value: log.value, l1_batch_number_of_initial_write, enumeration_index, @@ -338,6 +341,29 @@ async fn persisting_snapshot_logs() { assert_storage_logs(&*object_store, snapshot_l1_batch_number, &expected_outputs).await; } +#[tokio::test] +async fn persisting_snapshot_logs_with_specified_l1_batch() { + let pool = ConnectionPool::::test_pool().await; + let mut rng = thread_rng(); + let object_store = MockObjectStore::arc(); + let mut conn = pool.connection().await.unwrap(); + let expected_outputs = prepare_postgres(&mut rng, &mut conn, 10).await; + + // L1 batch numbers are intentionally not ordered + for snapshot_l1_batch_number in [7, 1, 4, 6] { + let snapshot_l1_batch_number = L1BatchNumber(snapshot_l1_batch_number); + let mut config = TEST_CONFIG; + config.l1_batch_number = Some(snapshot_l1_batch_number); + + SnapshotCreator::for_tests(object_store.clone(), pool.clone()) + .run(config, MIN_CHUNK_COUNT) + .await + .unwrap(); + + assert_storage_logs(&*object_store, snapshot_l1_batch_number, &expected_outputs).await; + } +} + async fn assert_storage_logs( object_store: &dyn ObjectStore, snapshot_l1_batch_number: L1BatchNumber, @@ -350,7 +376,56 @@ async fn assert_storage_logs( chunk_id, }; let chunk: SnapshotStorageLogsChunk = object_store.get(key).await.unwrap(); - actual_logs.extend(chunk.storage_logs.into_iter()); + actual_logs.extend(chunk.storage_logs); + } + let expected_logs: HashSet<_> = expected_outputs + .storage_logs + .iter() + .filter(|log| log.l1_batch_number_of_initial_write <= snapshot_l1_batch_number) + .cloned() + .collect(); + assert_eq!(actual_logs, expected_logs); +} + +#[tokio::test] +async fn persisting_snapshot_logs_for_v0_snapshot() { + let pool = ConnectionPool::::test_pool().await; + let mut rng = thread_rng(); + let object_store = MockObjectStore::arc(); + let mut conn = pool.connection().await.unwrap(); + let expected_outputs = prepare_postgres(&mut rng, &mut conn, 10).await; + + let config = SnapshotsCreatorConfig { + version: 0, + ..TEST_CONFIG + }; + SnapshotCreator::for_tests(object_store.clone(), pool.clone()) + .run(config, MIN_CHUNK_COUNT) + .await + .unwrap(); + let snapshot_l1_batch_number = L1BatchNumber(8); + + // Logs must be compatible with version 1 `SnapshotStorageLog` format + assert_storage_logs(&*object_store, snapshot_l1_batch_number, &expected_outputs).await; + + // ...and must be compatible with version 0 format as well + let mut actual_logs = HashSet::new(); + for chunk_id in 0..MIN_CHUNK_COUNT { + let key = SnapshotStorageLogsStorageKey { + l1_batch_number: snapshot_l1_batch_number, + chunk_id, + }; + let chunk: SnapshotStorageLogsChunk = object_store.get(key).await.unwrap(); + let logs_with_hashed_key = chunk + .storage_logs + .into_iter() + .map(|log| SnapshotStorageLog { + key: log.key.hashed_key(), + value: log.value, + l1_batch_number_of_initial_write: log.l1_batch_number_of_initial_write, + enumeration_index: log.enumeration_index, + }); + actual_logs.extend(logs_with_hashed_key); } assert_eq!(actual_logs, expected_outputs.storage_logs); } @@ -386,12 +461,36 @@ async fn recovery_workflow() { let actual_deps: HashSet<_> = factory_deps.into_iter().collect(); assert_eq!(actual_deps, expected_outputs.deps); - // Process 2 storage log chunks, then stop. + // Check that the creator does nothing unless it's requested to create a new snapshot. SnapshotCreator::for_tests(object_store.clone(), pool.clone()) .stop_after_chunk_count(2) .run(SEQUENTIAL_TEST_CONFIG, MIN_CHUNK_COUNT) .await .unwrap(); + let snapshot_metadata = conn + .snapshots_dal() + .get_snapshot_metadata(snapshot_l1_batch_number) + .await + .unwrap() + .expect("No snapshot metadata"); + assert!( + snapshot_metadata + .storage_logs_filepaths + .iter() + .all(Option::is_none), + "{snapshot_metadata:?}" + ); + + // Process 2 storage log chunks, then stop. + let recovery_config = SnapshotsCreatorConfig { + l1_batch_number: Some(snapshot_l1_batch_number), + ..SEQUENTIAL_TEST_CONFIG + }; + SnapshotCreator::for_tests(object_store.clone(), pool.clone()) + .stop_after_chunk_count(2) + .run(recovery_config.clone(), MIN_CHUNK_COUNT) + .await + .unwrap(); let snapshot_metadata = conn .snapshots_dal() @@ -410,7 +509,7 @@ async fn recovery_workflow() { // Process the remaining chunks. SnapshotCreator::for_tests(object_store.clone(), pool.clone()) - .run(SEQUENTIAL_TEST_CONFIG, MIN_CHUNK_COUNT) + .run(recovery_config, MIN_CHUNK_COUNT) .await .unwrap(); @@ -425,13 +524,17 @@ async fn recovery_workflow_with_varying_chunk_size() { let mut conn = pool.connection().await.unwrap(); let expected_outputs = prepare_postgres(&mut rng, &mut conn, 10).await; + // Specifying the snapshot L1 batch right away should work fine. + let snapshot_l1_batch_number = L1BatchNumber(8); + let mut config = SEQUENTIAL_TEST_CONFIG; + config.l1_batch_number = Some(snapshot_l1_batch_number); + SnapshotCreator::for_tests(object_store.clone(), pool.clone()) .stop_after_chunk_count(2) - .run(SEQUENTIAL_TEST_CONFIG, MIN_CHUNK_COUNT) + .run(config.clone(), MIN_CHUNK_COUNT) .await .unwrap(); - let snapshot_l1_batch_number = L1BatchNumber(8); let snapshot_metadata = conn .snapshots_dal() .get_snapshot_metadata(snapshot_l1_batch_number) @@ -447,14 +550,24 @@ async fn recovery_workflow_with_varying_chunk_size() { 2 ); - let config_with_other_size = SnapshotsCreatorConfig { - storage_logs_chunk_size: 1, // << should be ignored - ..SEQUENTIAL_TEST_CONFIG - }; + config.storage_logs_chunk_size = 1; SnapshotCreator::for_tests(object_store.clone(), pool.clone()) - .run(config_with_other_size, MIN_CHUNK_COUNT) + .run(config, MIN_CHUNK_COUNT) .await .unwrap(); assert_storage_logs(&*object_store, snapshot_l1_batch_number, &expected_outputs).await; } + +#[tokio::test] +async fn creator_fails_if_specified_l1_batch_is_missing() { + let pool = ConnectionPool::::test_pool().await; + let object_store = MockObjectStore::arc(); + + let mut config = SEQUENTIAL_TEST_CONFIG; + config.l1_batch_number = Some(L1BatchNumber(20)); + SnapshotCreator::for_tests(object_store.clone(), pool.clone()) + .run(config, MIN_CHUNK_COUNT) + .await + .unwrap_err(); +} diff --git a/core/bin/system-constants-generator/Cargo.toml b/core/bin/system-constants-generator/Cargo.toml index 4dc4c507c07b..8632b4c554cc 100644 --- a/core/bin/system-constants-generator/Cargo.toml +++ b/core/bin/system-constants-generator/Cargo.toml @@ -1,5 +1,6 @@ [package] name = "system-constants-generator" +description = "Tool for generating JSON files with the system constants for L1/L2 contracts" version = "0.1.0" edition.workspace = true authors.workspace = true @@ -7,7 +8,6 @@ homepage.workspace = true license.workspace = true keywords.workspace = true categories.workspace = true -description = "Tool for generating JSON files with the system constants for L1/L2 contracts" publish = false [dependencies] @@ -15,7 +15,7 @@ zksync_state.workspace = true zksync_types.workspace = true zksync_utils.workspace = true zksync_contracts.workspace = true -multivm.workspace = true +zksync_multivm.workspace = true codegen.workspace = true serde.workspace = true diff --git a/core/bin/system-constants-generator/src/intrinsic_costs.rs b/core/bin/system-constants-generator/src/intrinsic_costs.rs index c94592defeee..f50cd9eb3a22 100644 --- a/core/bin/system-constants-generator/src/intrinsic_costs.rs +++ b/core/bin/system-constants-generator/src/intrinsic_costs.rs @@ -4,7 +4,7 @@ //! as well as contracts/SystemConfig.json //! -use multivm::utils::get_bootloader_encoding_space; +use zksync_multivm::utils::get_bootloader_encoding_space; use zksync_types::{ethabi::Address, IntrinsicSystemGasConstants, ProtocolVersionId, U256}; use crate::utils::{ diff --git a/core/bin/system-constants-generator/src/main.rs b/core/bin/system-constants-generator/src/main.rs index b0276aeb7fa1..7ada47302248 100644 --- a/core/bin/system-constants-generator/src/main.rs +++ b/core/bin/system-constants-generator/src/main.rs @@ -1,7 +1,8 @@ use std::fs; use codegen::{Block, Scope}; -use multivm::{ +use serde::{Deserialize, Serialize}; +use zksync_multivm::{ utils::{get_bootloader_encoding_space, get_bootloader_max_txs_in_batch}, vm_latest::constants::MAX_VM_PUBDATA_PER_BATCH, zk_evm_latest::zkevm_opcode_defs::{ @@ -12,7 +13,6 @@ use multivm::{ system_params::MAX_TX_ERGS_LIMIT, }, }; -use serde::{Deserialize, Serialize}; use zksync_types::{ IntrinsicSystemGasConstants, ProtocolVersionId, GUARANTEED_PUBDATA_IN_TX, L1_GAS_PER_PUBDATA_BYTE, MAX_NEW_FACTORY_DEPS, REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, diff --git a/core/bin/system-constants-generator/src/utils.rs b/core/bin/system-constants-generator/src/utils.rs index 1c0ceda18e7c..f2e73028e6e4 100644 --- a/core/bin/system-constants-generator/src/utils.rs +++ b/core/bin/system-constants-generator/src/utils.rs @@ -1,6 +1,11 @@ use std::{cell::RefCell, rc::Rc}; -use multivm::{ +use once_cell::sync::Lazy; +use zksync_contracts::{ + load_sys_contract, read_bootloader_code, read_sys_contract_bytecode, read_zbin_bytecode, + BaseSystemContracts, ContractLanguage, SystemContractCode, +}; +use zksync_multivm::{ interface::{ dyn_tracers::vm_1_5_0::DynTracer, tracer::VmExecutionStopReason, L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, VmFactory, VmInterface, @@ -12,11 +17,6 @@ use multivm::{ }, zk_evm_latest::aux_structures::Timestamp, }; -use once_cell::sync::Lazy; -use zksync_contracts::{ - load_sys_contract, read_bootloader_code, read_sys_contract_bytecode, read_zbin_bytecode, - BaseSystemContracts, ContractLanguage, SystemContractCode, -}; use zksync_state::{InMemoryStorage, StorageView, WriteStorage}; use zksync_types::{ block::L2BlockHasher, ethabi::Token, fee::Fee, fee_model::BatchFeeInput, l1::L1Tx, l2::L2Tx, diff --git a/core/bin/verified_sources_fetcher/Cargo.toml b/core/bin/verified_sources_fetcher/Cargo.toml index 2d83435e9c42..5fa90590ed5f 100644 --- a/core/bin/verified_sources_fetcher/Cargo.toml +++ b/core/bin/verified_sources_fetcher/Cargo.toml @@ -1,5 +1,6 @@ [package] name = "verified_sources_fetcher" +description = "Tool to fetch verified contract sources" version = "0.1.0" edition.workspace = true authors.workspace = true diff --git a/core/bin/zksync_server/Cargo.toml b/core/bin/zksync_server/Cargo.toml index a2f9067872e2..5470f24010c1 100644 --- a/core/bin/zksync_server/Cargo.toml +++ b/core/bin/zksync_server/Cargo.toml @@ -1,5 +1,6 @@ [package] name = "zksync_server" +description = "ZKsync validator/sequencer node" version = "0.1.0" edition.workspace = true authors.workspace = true @@ -20,13 +21,14 @@ zksync_utils.workspace = true zksync_types.workspace = true zksync_core_leftovers.workspace = true zksync_node_genesis.workspace = true +zksync_default_da_clients.workspace = true # Consensus dependenices zksync_consensus_crypto.workspace = true zksync_consensus_roles.workspace = true zksync_consensus_executor.workspace = true zksync_concurrency.workspace = true -vlog.workspace = true +zksync_vlog.workspace = true anyhow.workspace = true clap = { workspace = true, features = ["derive"] } @@ -38,7 +40,6 @@ futures.workspace = true zksync_node_framework.workspace = true zksync_metadata_calculator.workspace = true zksync_node_api_server.workspace = true -prometheus_exporter.workspace = true [target.'cfg(not(target_env = "msvc"))'.dependencies] tikv-jemallocator.workspace = true diff --git a/core/bin/zksync_server/src/main.rs b/core/bin/zksync_server/src/main.rs index 1c54895863c0..a59705b8e587 100644 --- a/core/bin/zksync_server/src/main.rs +++ b/core/bin/zksync_server/src/main.rs @@ -1,4 +1,4 @@ -use std::{str::FromStr, time::Duration}; +use std::str::FromStr; use anyhow::Context as _; use clap::Parser; @@ -11,23 +11,21 @@ use zksync_config::{ }, fri_prover_group::FriProverGroupConfig, house_keeper::HouseKeeperConfig, - ContractsConfig, DatabaseSecrets, FriProofCompressorConfig, FriProverConfig, + BasicWitnessInputProducerConfig, ContractsConfig, DatabaseSecrets, + ExternalPriceApiClientConfig, FriProofCompressorConfig, FriProverConfig, FriProverGatewayConfig, FriWitnessGeneratorConfig, FriWitnessVectorGeneratorConfig, L1Secrets, ObservabilityConfig, PrometheusConfig, ProofDataHandlerConfig, ProtectiveReadsWriterConfig, Secrets, }, - ApiConfig, ContractVerifierConfig, DBConfig, EthConfig, EthWatchConfig, GasAdjusterConfig, - GenesisConfig, ObjectStoreConfig, PostgresConfig, SnapshotsCreatorConfig, + ApiConfig, BaseTokenAdjusterConfig, ContractVerifierConfig, DADispatcherConfig, DBConfig, + EthConfig, EthWatchConfig, GasAdjusterConfig, GenesisConfig, ObjectStoreConfig, PostgresConfig, + SnapshotsCreatorConfig, }; use zksync_core_leftovers::{ - genesis_init, initialize_components, is_genesis_needed, setup_sigint_handler, temp_config_store::{decode_yaml_repr, TempConfigStore}, Component, Components, }; use zksync_env_config::FromEnv; -use zksync_eth_client::clients::Client; -use zksync_storage::RocksDB; -use zksync_utils::wait_for_tasks::ManagedTasks; use crate::node_builder::MainNodeBuilder; @@ -43,13 +41,10 @@ struct Cli { /// Generate genesis block for the first contract deployment using temporary DB. #[arg(long)] genesis: bool, - /// Rebuild tree. - #[arg(long)] - rebuild_tree: bool, /// Comma-separated list of components to launch. #[arg( long, - default_value = "api,tree,eth,state_keeper,housekeeper,tee_verifier_input_producer,commitment_generator" + default_value = "api,tree,eth,state_keeper,housekeeper,tee_verifier_input_producer,commitment_generator,da_dispatcher" )] components: ComponentsToRun, /// Path to the yaml config. If set, it will be used instead of env vars. @@ -67,7 +62,8 @@ struct Cli { /// Path to the yaml with genesis. If set, it will be used instead of env vars. #[arg(long)] genesis_path: Option, - /// Run the node using the node framework. + /// Used to enable node framework. + /// Now the node framework is used by default and this argument is left for backward compatibility. #[arg(long)] use_node_framework: bool, } @@ -88,20 +84,32 @@ impl FromStr for ComponentsToRun { } } -#[tokio::main] -async fn main() -> anyhow::Result<()> { +fn main() -> anyhow::Result<()> { let opt = Cli::parse(); // Load env config and use it if file config is not provided let tmp_config = load_env_config()?; let configs = match opt.config_path { - None => tmp_config.general(), + None => { + let mut configs = tmp_config.general(); + configs.consensus_config = + config::read_consensus_config().context("read_consensus_config()")?; + configs + } Some(path) => { let yaml = std::fs::read_to_string(&path).with_context(|| path.display().to_string())?; - decode_yaml_repr::(&yaml) - .context("failed decoding general YAML config")? + let mut configs = + decode_yaml_repr::(&yaml) + .context("failed decoding general YAML config")?; + // Fallback to the consensus_config.yaml file. + // TODO: remove once we move the consensus config to general config on stage + if configs.consensus_config.is_none() { + configs.consensus_config = + config::read_consensus_config().context("read_consensus_config()")?; + } + configs } }; @@ -110,12 +118,12 @@ async fn main() -> anyhow::Result<()> { .clone() .context("observability config")?; - let log_format: vlog::LogFormat = observability_config + let log_format: zksync_vlog::LogFormat = observability_config .log_format .parse() .context("Invalid log format")?; - let mut builder = vlog::ObservabilityBuilder::new().with_log_format(log_format); + let mut builder = zksync_vlog::ObservabilityBuilder::new().with_log_format(log_format); if let Some(log_directives) = observability_config.log_directives { builder = builder.with_log_directives(log_directives); } @@ -159,8 +167,6 @@ async fn main() -> anyhow::Result<()> { }, }; - let consensus = config::read_consensus_config().context("read_consensus_config()")?; - let contracts_config = match opt.contracts_config_path { None => ContractsConfig::from_env().context("contracts_config")?, Some(path) => { @@ -181,100 +187,15 @@ async fn main() -> anyhow::Result<()> { } }; - let database_secrets = secrets.database.clone().context("DatabaseSecrets")?; - - if opt.genesis || is_genesis_needed(&database_secrets).await { - genesis_init(genesis.clone(), &database_secrets) - .await - .context("genesis_init")?; - - if let Some(ecosystem_contracts) = &contracts_config.ecosystem_contracts { - let l1_secrets = secrets.l1.as_ref().context("l1_screts")?; - let query_client = Client::http(l1_secrets.l1_rpc_url.clone()) - .context("Ethereum client")? - .for_network(genesis.l1_chain_id.into()) - .build(); - zksync_node_genesis::save_set_chain_id_tx( - &query_client, - contracts_config.diamond_proxy_addr, - ecosystem_contracts.state_transition_proxy_addr, - &database_secrets, - ) - .await - .context("Failed to save SetChainId upgrade transaction")?; - } - - if opt.genesis { - return Ok(()); - } - } - - let components = if opt.rebuild_tree { - vec![Component::Tree] - } else { - opt.components.0 - }; - - // If the node framework is used, run the node. - if opt.use_node_framework { - // We run the node from a different thread, since the current thread is in tokio context. - std::thread::spawn(move || -> anyhow::Result<()> { - let node = MainNodeBuilder::new( - configs, - wallets, - genesis, - contracts_config, - secrets, - consensus, - ) - .build(components)?; - node.run()?; - Ok(()) - }) - .join() - .expect("Failed to run the node")?; + let node = MainNodeBuilder::new(configs, wallets, genesis, contracts_config, secrets); + if opt.genesis { + // If genesis is requested, we don't need to run the node. + node.only_genesis()?.run()?; return Ok(()); } - // Run core actors. - let sigint_receiver = setup_sigint_handler(); - let (core_task_handles, stop_sender, health_check_handle) = initialize_components( - &configs, - &wallets, - &genesis, - &contracts_config, - &components, - &secrets, - consensus, - ) - .await - .context("Unable to start Core actors")?; - - tracing::info!("Running {} core task handlers", core_task_handles.len()); - - let mut tasks = ManagedTasks::new(core_task_handles); - tokio::select! { - _ = tasks.wait_single() => {}, - _ = sigint_receiver => { - tracing::info!("Stop signal received, shutting down"); - }, - } - - stop_sender.send(true).ok(); - tokio::task::spawn_blocking(RocksDB::await_rocksdb_termination) - .await - .context("error waiting for RocksDB instances to drop")?; - let complete_timeout = - if components.contains(&Component::HttpApi) || components.contains(&Component::WsApi) { - // Increase timeout because of complicated graceful shutdown procedure for API servers. - Duration::from_secs(30) - } else { - Duration::from_secs(5) - }; - tasks.complete(complete_timeout).await; - health_check_handle.stop().await; - tracing::info!("Stopped"); + node.build(opt.components.0)?.run()?; Ok(()) } @@ -306,7 +227,14 @@ fn load_env_config() -> anyhow::Result { gas_adjuster_config: GasAdjusterConfig::from_env().ok(), observability: ObservabilityConfig::from_env().ok(), snapshot_creator: SnapshotsCreatorConfig::from_env().ok(), + da_dispatcher_config: DADispatcherConfig::from_env().ok(), protective_reads_writer_config: ProtectiveReadsWriterConfig::from_env().ok(), + basic_witness_input_producer_config: BasicWitnessInputProducerConfig::from_env().ok(), core_object_store: ObjectStoreConfig::from_env().ok(), + base_token_adjuster_config: BaseTokenAdjusterConfig::from_env().ok(), + commitment_generator: None, + pruning: None, + snapshot_recovery: None, + external_price_api_client_config: ExternalPriceApiClientConfig::from_env().ok(), }) } diff --git a/core/bin/zksync_server/src/node_builder.rs b/core/bin/zksync_server/src/node_builder.rs index d2d76c978684..64039ddcc873 100644 --- a/core/bin/zksync_server/src/node_builder.rs +++ b/core/bin/zksync_server/src/node_builder.rs @@ -2,12 +2,15 @@ //! as well as an interface to run the node with the specified components. use anyhow::Context; -use prometheus_exporter::PrometheusExporterConfig; use zksync_config::{ - configs::{consensus::ConsensusConfig, wallets::Wallets, GeneralConfig, Secrets}, + configs::{eth_sender::PubdataSendingMode, wallets::Wallets, GeneralConfig, Secrets}, ContractsConfig, GenesisConfig, }; use zksync_core_leftovers::Component; +use zksync_default_da_clients::{ + no_da::wiring_layer::NoDAClientWiringLayer, + object_store::{config::DAObjectStoreConfig, wiring_layer::ObjectStorageClientWiringLayer}, +}; use zksync_metadata_calculator::MetadataCalculatorConfig; use zksync_node_api_server::{ tx_sender::{ApiContracts, TxSenderConfig}, @@ -15,10 +18,17 @@ use zksync_node_api_server::{ }; use zksync_node_framework::{ implementations::layers::{ + base_token::{ + base_token_ratio_persister::BaseTokenRatioPersisterLayer, + base_token_ratio_provider::BaseTokenRatioProviderLayer, + coingecko_client::CoingeckoClientLayer, forced_price_client::ForcedPriceClientLayer, + no_op_external_price_api_client::NoOpExternalPriceApiClientLayer, + }, circuit_breaker_checker::CircuitBreakerCheckerLayer, commitment_generator::CommitmentGeneratorLayer, - consensus::{ConsensusLayer, Mode as ConsensusMode}, + consensus::MainNodeConsensusLayer, contract_verification_api::ContractVerificationApiLayer, + da_dispatcher::DataAvailabilityDispatcherLayer, eth_sender::{EthTxAggregatorLayer, EthTxManagerLayer}, eth_watch::EthWatchLayer, healtcheck_server::HealthCheckLayer, @@ -26,6 +36,9 @@ use zksync_node_framework::{ l1_batch_commitment_mode_validation::L1BatchCommitmentModeValidationLayer, l1_gas::SequencerL1GasLayer, metadata_calculator::MetadataCalculatorLayer, + node_storage_init::{ + main_node_strategy::MainNodeInitStrategyLayer, NodeStorageInitializerLayer, + }, object_store::ObjectStoreLayer, pk_signing_eth_client::PKSigningEthClientLayer, pools_layer::PoolsLayerBuilder, @@ -39,17 +52,21 @@ use zksync_node_framework::{ output_handler::OutputHandlerLayer, RocksdbStorageOptions, StateKeeperLayer, }, tee_verifier_input_producer::TeeVerifierInputProducerLayer, - vm_runner::protective_reads::ProtectiveReadsWriterLayer, + vm_runner::{ + bwip::BasicWitnessInputProducerLayer, protective_reads::ProtectiveReadsWriterLayer, + }, web3_api::{ caches::MempoolCacheLayer, server::{Web3ServerLayer, Web3ServerOptionalConfig}, tree_api_client::TreeApiClientLayer, tx_sender::{PostgresStorageCachesConfig, TxSenderLayer}, - tx_sink::TxSinkLayer, + tx_sink::MasterPoolSinkLayer, }, }, service::{ZkStackService, ZkStackServiceBuilder}, }; +use zksync_types::SHARED_BRIDGE_ETHER_TOKEN_ADDRESS; +use zksync_vlog::prometheus::PrometheusExporterConfig; /// Macro that looks into a path to fetch an optional config, /// and clones it into a variable. @@ -66,7 +83,6 @@ pub struct MainNodeBuilder { genesis_config: GenesisConfig, contracts_config: ContractsConfig, secrets: Secrets, - consensus_config: Option, } impl MainNodeBuilder { @@ -76,7 +92,6 @@ impl MainNodeBuilder { genesis_config: GenesisConfig, contracts_config: ContractsConfig, secrets: Secrets, - consensus_config: Option, ) -> Self { Self { node: ZkStackServiceBuilder::new(), @@ -85,7 +100,6 @@ impl MainNodeBuilder { genesis_config, contracts_config, secrets, - consensus_config, } } @@ -140,6 +154,13 @@ impl MainNodeBuilder { } fn add_sequencer_l1_gas_layer(mut self) -> anyhow::Result { + // Ensure the BaseTokenRatioProviderResource is inserted if the base token is not ETH. + if self.contracts_config.base_token_addr != Some(SHARED_BRIDGE_ETHER_TOKEN_ADDRESS) { + let base_token_adjuster_config = try_load_config!(self.configs.base_token_adjuster); + self.node + .add_layer(BaseTokenRatioProviderLayer::new(base_token_adjuster_config)); + } + let gas_adjuster_config = try_load_config!(self.configs.eth) .gas_adjuster .context("Gas adjuster")?; @@ -175,9 +196,11 @@ impl MainNodeBuilder { let merkle_tree_env_config = try_load_config!(self.configs.db_config).merkle_tree; let operations_manager_env_config = try_load_config!(self.configs.operations_manager_config); + let state_keeper_env_config = try_load_config!(self.configs.state_keeper_config); let metadata_calculator_config = MetadataCalculatorConfig::for_main_node( &merkle_tree_env_config, &operations_manager_env_config, + &state_keeper_env_config, ); let mut layer = MetadataCalculatorLayer::new(metadata_calculator_config); if with_tree_api { @@ -199,7 +222,8 @@ impl MainNodeBuilder { .l2_shared_bridge_addr .context("L2 shared bridge address")?, sk_config.l2_block_seal_queue_capacity, - ); + ) + .with_protective_reads_persistence_enabled(sk_config.protective_reads_persistence_enabled); let mempool_io_layer = MempoolIOLayer::new( self.genesis_config.l2_chain_id, sk_config.clone(), @@ -259,7 +283,7 @@ impl MainNodeBuilder { }; // On main node we always use master pool sink. - self.node.add_layer(TxSinkLayer::MasterPoolSink); + self.node.add_layer(MasterPoolSinkLayer); self.node.add_layer(TxSenderLayer::new( TxSenderConfig::new( &sk_config, @@ -326,7 +350,14 @@ impl MainNodeBuilder { let circuit_breaker_config = try_load_config!(self.configs.circuit_breaker_config); let with_debug_namespace = state_keeper_config.save_call_traces; - let mut namespaces = Namespace::DEFAULT.to_vec(); + let mut namespaces = if let Some(namespaces) = &rpc_config.api_namespaces { + namespaces + .iter() + .map(|a| a.parse()) + .collect::>()? + } else { + Namespace::DEFAULT.to_vec() + }; if with_debug_namespace { namespaces.push(Namespace::Debug) } @@ -342,6 +373,7 @@ impl MainNodeBuilder { rpc_config.websocket_requests_per_minute_limit(), ), replication_lag_limit: circuit_breaker_config.replication_lag_limit(), + with_extended_tracing: rpc_config.extended_api_tracing, ..Default::default() }; self.node.add_layer(Web3ServerLayer::ws( @@ -416,10 +448,17 @@ impl MainNodeBuilder { } fn add_consensus_layer(mut self) -> anyhow::Result { - self.node.add_layer(ConsensusLayer { - mode: ConsensusMode::Main, - config: self.consensus_config.clone(), - secrets: self.secrets.consensus.clone(), + self.node.add_layer(MainNodeConsensusLayer { + config: self + .configs + .consensus_config + .clone() + .context("Consensus config has to be provided")?, + secrets: self + .secrets + .consensus + .clone() + .context("Consensus secrets have to be provided")?, }); Ok(self) @@ -434,6 +473,38 @@ impl MainNodeBuilder { Ok(self) } + fn add_no_da_client_layer(mut self) -> anyhow::Result { + self.node.add_layer(NoDAClientWiringLayer); + Ok(self) + } + + #[allow(dead_code)] + fn add_object_storage_da_client_layer(mut self) -> anyhow::Result { + let object_store_config = DAObjectStoreConfig::from_env()?; + self.node + .add_layer(ObjectStorageClientWiringLayer::new(object_store_config.0)); + Ok(self) + } + + fn add_da_dispatcher_layer(mut self) -> anyhow::Result { + let eth_sender_config = try_load_config!(self.configs.eth); + if let Some(sender_config) = eth_sender_config.sender { + if sender_config.pubdata_sending_mode != PubdataSendingMode::Custom { + tracing::warn!("DA dispatcher is enabled, but the pubdata sending mode is not `Custom`. DA dispatcher will not be started."); + return Ok(self); + } + } + + let state_keeper_config = try_load_config!(self.configs.state_keeper_config); + let da_config = try_load_config!(self.configs.da_dispatcher_config); + self.node.add_layer(DataAvailabilityDispatcherLayer::new( + state_keeper_config, + da_config, + )); + + Ok(self) + } + fn add_vm_runner_protective_reads_layer(mut self) -> anyhow::Result { let protective_reads_writer_config = try_load_config!(self.configs.protective_reads_writer_config); @@ -445,6 +516,84 @@ impl MainNodeBuilder { Ok(self) } + fn add_external_api_client_layer(mut self) -> anyhow::Result { + let config = try_load_config!(self.configs.external_price_api_client_config); + match config.source.as_str() { + CoingeckoClientLayer::CLIENT_NAME => { + self.node.add_layer(CoingeckoClientLayer::new(config)); + } + NoOpExternalPriceApiClientLayer::CLIENT_NAME => { + self.node.add_layer(NoOpExternalPriceApiClientLayer); + } + ForcedPriceClientLayer::CLIENT_NAME => { + self.node.add_layer(ForcedPriceClientLayer::new(config)); + } + _ => { + anyhow::bail!( + "Unknown external price API client source: {}", + config.source + ); + } + } + + Ok(self) + } + + fn add_vm_runner_bwip_layer(mut self) -> anyhow::Result { + let basic_witness_input_producer_config = + try_load_config!(self.configs.basic_witness_input_producer_config); + self.node.add_layer(BasicWitnessInputProducerLayer::new( + basic_witness_input_producer_config, + self.genesis_config.l2_chain_id, + )); + + Ok(self) + } + + fn add_base_token_ratio_persister_layer(mut self) -> anyhow::Result { + let config = try_load_config!(self.configs.base_token_adjuster); + let contracts_config = self.contracts_config.clone(); + self.node + .add_layer(BaseTokenRatioPersisterLayer::new(config, contracts_config)); + + Ok(self) + } + + /// This layer will make sure that the database is initialized correctly, + /// e.g. genesis will be performed if it's required. + /// + /// Depending on the `kind` provided, either a task or a precondition will be added. + /// + /// *Important*: the task should be added by at most one component, because + /// it assumes unique control over the database. Multiple components adding this + /// layer in a distributed mode may result in the database corruption. + /// + /// This task works in pair with precondition, which must be present in every component: + /// the precondition will prevent node from starting until the database is initialized. + fn add_storage_initialization_layer(mut self, kind: LayerKind) -> anyhow::Result { + self.node.add_layer(MainNodeInitStrategyLayer { + genesis: self.genesis_config.clone(), + contracts: self.contracts_config.clone(), + }); + let mut layer = NodeStorageInitializerLayer::new(); + if matches!(kind, LayerKind::Precondition) { + layer = layer.as_precondition(); + } + self.node.add_layer(layer); + Ok(self) + } + + /// Builds the node with the genesis initialization task only. + pub fn only_genesis(mut self) -> anyhow::Result { + self = self + .add_pools_layer()? + .add_query_eth_client_layer()? + .add_storage_initialization_layer(LayerKind::Task)?; + + Ok(self.node.build()?) + } + + /// Builds the node with the specified components. pub fn build(mut self, mut components: Vec) -> anyhow::Result { // Add "base" layers (resources and helper tasks). self = self @@ -455,8 +604,12 @@ impl MainNodeBuilder { .add_healthcheck_layer()? .add_prometheus_exporter_layer()? .add_query_eth_client_layer()? - .add_sequencer_l1_gas_layer()? - .add_l1_batch_commitment_mode_validation_layer()?; + .add_sequencer_l1_gas_layer()?; + + // Add preconditions for all the components. + self = self + .add_l1_batch_commitment_mode_validation_layer()? + .add_storage_initialization_layer(LayerKind::Precondition)?; // Sort the components, so that the components they may depend on each other are added in the correct order. components.sort_unstable_by_key(|component| match component { @@ -470,6 +623,13 @@ impl MainNodeBuilder { // Note that the layers are added only once, so it's fine to add the same layer multiple times. for component in &components { match component { + Component::StateKeeper => { + // State keeper is the core component of the sequencer, + // which is why we consider it to be responsible for the storage initialization. + self = self + .add_storage_initialization_layer(LayerKind::Task)? + .add_state_keeper_layer()?; + } Component::HttpApi => { self = self .add_tx_sender_layer()? @@ -509,9 +669,6 @@ impl MainNodeBuilder { Component::EthTxManager => { self = self.add_eth_tx_manager_layer()?; } - Component::StateKeeper => { - self = self.add_state_keeper_layer()?; - } Component::TeeVerifierInputProducer => { // FIXME: self = self.add_tee_verifier_input_producer_layer()?; } @@ -529,11 +686,29 @@ impl MainNodeBuilder { Component::CommitmentGenerator => { self = self.add_commitment_generator_layer()?; } + Component::DADispatcher => { + self = self.add_no_da_client_layer()?.add_da_dispatcher_layer()?; + } Component::VmRunnerProtectiveReads => { self = self.add_vm_runner_protective_reads_layer()?; } + Component::BaseTokenRatioPersister => { + self = self + .add_external_api_client_layer()? + .add_base_token_ratio_persister_layer()?; + } + Component::VmRunnerBwip => { + self = self.add_vm_runner_bwip_layer()?; + } } } Ok(self.node.build()?) } } + +/// Marker for layers that can add either a task or a precondition. +#[derive(Debug)] +enum LayerKind { + Task, + Precondition, +} diff --git a/core/bin/zksync_tee_prover/Cargo.toml b/core/bin/zksync_tee_prover/Cargo.toml new file mode 100644 index 000000000000..037833b1890e --- /dev/null +++ b/core/bin/zksync_tee_prover/Cargo.toml @@ -0,0 +1,32 @@ +[package] +name = "zksync_tee_prover" +description = "ZKsync TEE prover" +version = "0.1.0" +edition.workspace = true +authors.workspace = true +homepage.workspace = true +repository.workspace = true +license.workspace = true +keywords.workspace = true +categories.workspace = true +publish = false + +[dependencies] +anyhow.workspace = true +async-trait.workspace = true +reqwest.workspace = true +secp256k1.workspace = true +serde = { workspace = true, features = ["derive"] } +thiserror.workspace = true +tokio = { workspace = true, features = ["full"] } +tracing.workspace = true +url.workspace = true +vise.workspace = true +zksync_basic_types.workspace = true +zksync_config.workspace = true +zksync_env_config.workspace = true +zksync_node_framework.workspace = true +zksync_prover_interface.workspace = true +zksync_tee_verifier.workspace = true +zksync_types.workspace = true +zksync_vlog.workspace = true diff --git a/core/bin/zksync_tee_prover/src/api_client.rs b/core/bin/zksync_tee_prover/src/api_client.rs new file mode 100644 index 000000000000..1530da971157 --- /dev/null +++ b/core/bin/zksync_tee_prover/src/api_client.rs @@ -0,0 +1,113 @@ +use reqwest::Client; +use secp256k1::{ecdsa::Signature, PublicKey}; +use serde::{de::DeserializeOwned, Serialize}; +use url::Url; +use zksync_basic_types::H256; +use zksync_prover_interface::{ + api::{ + RegisterTeeAttestationRequest, RegisterTeeAttestationResponse, SubmitTeeProofRequest, + SubmitTeeProofResponse, TeeProofGenerationDataRequest, TeeProofGenerationDataResponse, + }, + inputs::TeeVerifierInput, + outputs::L1BatchTeeProofForL1, +}; +use zksync_types::{tee_types::TeeType, L1BatchNumber}; + +use crate::{error::TeeProverError, metrics::METRICS}; + +/// Implementation of the API client for the proof data handler, run by +/// [`zksync_proof_data_handler::run_server`]. +#[derive(Debug)] +pub(crate) struct TeeApiClient { + api_base_url: Url, + http_client: Client, +} + +impl TeeApiClient { + pub fn new(api_base_url: Url) -> Self { + TeeApiClient { + api_base_url, + http_client: Client::new(), + } + } + + async fn post(&self, endpoint: S, request: Req) -> Result + where + Req: Serialize + std::fmt::Debug, + Resp: DeserializeOwned, + S: AsRef, + { + let url = self.api_base_url.join(endpoint.as_ref()).unwrap(); + + tracing::trace!("Sending POST request to {}: {:?}", url, request); + + self.http_client + .post(url) + .json(&request) + .send() + .await? + .error_for_status()? + .json::() + .await + } + + /// Registers the attestation quote with the TEE prover interface API, effectively proving that + /// the private key associated with the given public key was used to sign the root hash within a + /// trusted execution environment. + pub async fn register_attestation( + &self, + attestation_quote_bytes: Vec, + public_key: &PublicKey, + ) -> Result<(), TeeProverError> { + let request = RegisterTeeAttestationRequest { + attestation: attestation_quote_bytes, + pubkey: public_key.serialize().to_vec(), + }; + self.post::<_, RegisterTeeAttestationResponse, _>("/tee/register_attestation", request) + .await?; + tracing::info!( + "Attestation quote was successfully registered for the public key {}", + public_key + ); + Ok(()) + } + + /// Fetches the next job for the TEE prover to process, verifying and signing it if the + /// verification is successful. + pub async fn get_job(&self) -> Result>, TeeProverError> { + let request = TeeProofGenerationDataRequest {}; + let response = self + .post::<_, TeeProofGenerationDataResponse, _>("/tee/proof_inputs", request) + .await?; + Ok(response.0) + } + + /// Submits the successfully verified proof to the TEE prover interface API. + pub async fn submit_proof( + &self, + batch_number: L1BatchNumber, + signature: Signature, + pubkey: &PublicKey, + root_hash: H256, + tee_type: TeeType, + ) -> Result<(), TeeProverError> { + let request = SubmitTeeProofRequest(Box::new(L1BatchTeeProofForL1 { + signature: signature.serialize_compact().into(), + pubkey: pubkey.serialize().into(), + proof: root_hash.as_bytes().into(), + tee_type, + })); + let observer = METRICS.proof_submitting_time.start(); + self.post::<_, SubmitTeeProofResponse, _>( + format!("/tee/submit_proofs/{batch_number}").as_str(), + request, + ) + .await?; + observer.observe(); + tracing::info!( + "Proof submitted successfully for batch number {}", + batch_number + ); + Ok(()) + } +} diff --git a/core/bin/zksync_tee_prover/src/config.rs b/core/bin/zksync_tee_prover/src/config.rs new file mode 100644 index 000000000000..2a77c3752180 --- /dev/null +++ b/core/bin/zksync_tee_prover/src/config.rs @@ -0,0 +1,39 @@ +use std::path::PathBuf; + +use secp256k1::SecretKey; +use url::Url; +use zksync_env_config::FromEnv; +use zksync_types::tee_types::TeeType; + +/// Configuration for the TEE prover. +#[derive(Debug)] +pub(crate) struct TeeProverConfig { + /// The private key used to sign the proofs. + pub signing_key: SecretKey, + /// The path to the file containing the TEE quote. + pub attestation_quote_file_path: PathBuf, + /// Attestation quote file. + pub tee_type: TeeType, + /// TEE proof data handler API. + pub api_url: Url, +} + +impl FromEnv for TeeProverConfig { + /// Constructs the TEE Prover configuration from environment variables. + /// + /// Example usage of environment variables for tests: + /// ``` + /// export TEE_SIGNING_KEY="b50b38c8d396c88728fc032ece558ebda96907a0b1a9340289715eef7bf29deb" + /// export TEE_QUOTE_FILE="/tmp/test" # run `echo test > /tmp/test` beforehand + /// export TEE_TYPE="sgx" + /// export TEE_API_URL="http://127.0.0.1:3320" + /// ``` + fn from_env() -> anyhow::Result { + Ok(Self { + signing_key: std::env::var("TEE_SIGNING_KEY")?.parse()?, + attestation_quote_file_path: std::env::var("TEE_QUOTE_FILE")?.parse()?, + tee_type: std::env::var("TEE_TYPE")?.parse()?, + api_url: std::env::var("TEE_API_URL")?.parse()?, + }) + } +} diff --git a/core/bin/zksync_tee_prover/src/error.rs b/core/bin/zksync_tee_prover/src/error.rs new file mode 100644 index 000000000000..bd60a772948a --- /dev/null +++ b/core/bin/zksync_tee_prover/src/error.rs @@ -0,0 +1,47 @@ +use std::{error::Error as StdError, io}; + +use reqwest::StatusCode; + +#[derive(Debug, thiserror::Error)] +pub(crate) enum TeeProverError { + #[error(transparent)] + Request(#[from] reqwest::Error), + #[error(transparent)] + Verification(anyhow::Error), +} + +impl TeeProverError { + pub fn is_transient(&self) -> bool { + match self { + Self::Request(err) => is_transient_http_error(err), + _ => false, + } + } +} + +fn is_transient_http_error(err: &reqwest::Error) -> bool { + err.is_timeout() + || err.is_connect() + // Not all request errors are logically transient, but a significant part of them are (e.g., + // `hyper` protocol-level errors), and it's safer to consider an error transient. + || err.is_request() + || has_transient_io_source(err) + || err.status() == Some(StatusCode::BAD_GATEWAY) + || err.status() == Some(StatusCode::SERVICE_UNAVAILABLE) +} + +fn has_transient_io_source(err: &(dyn StdError + 'static)) -> bool { + // We treat any I/O errors as transient. This isn't always true, but frequently occurring I/O errors + // (e.g., "connection reset by peer") *are* transient, and treating an error as transient is a safer option, + // even if it can lead to unnecessary retries. + get_source::(err).is_some() +} + +fn get_source<'a, T: StdError + 'static>(mut err: &'a (dyn StdError + 'static)) -> Option<&'a T> { + loop { + if let Some(err) = err.downcast_ref::() { + return Some(err); + } + err = err.source()?; + } +} diff --git a/core/bin/zksync_tee_prover/src/main.rs b/core/bin/zksync_tee_prover/src/main.rs new file mode 100644 index 000000000000..b6c311cb55de --- /dev/null +++ b/core/bin/zksync_tee_prover/src/main.rs @@ -0,0 +1,69 @@ +use anyhow::Context as _; +use config::TeeProverConfig; +use tee_prover::TeeProverLayer; +use zksync_config::configs::{ObservabilityConfig, PrometheusConfig}; +use zksync_env_config::FromEnv; +use zksync_node_framework::{ + implementations::layers::{ + prometheus_exporter::PrometheusExporterLayer, sigint::SigintHandlerLayer, + }, + service::ZkStackServiceBuilder, +}; +use zksync_vlog::prometheus::PrometheusExporterConfig; + +mod api_client; +mod config; +mod error; +mod metrics; +mod tee_prover; + +/// This application serves as a TEE verifier, a.k.a. a TEE prover. +/// +/// - It's an application that retrieves data about batches executed by the sequencer and verifies +/// them in the TEE. +/// - It's a stateless application, e.g. it interacts with the sequencer via API and does not have +/// any kind of persistent state. +/// - It submits proofs for proven batches back to the sequencer. +/// - When the application starts, it registers the attestation on the sequencer, and then runs in a +/// loop, polling the sequencer for new jobs (batches), verifying them, and submitting generated +/// proofs back. +fn main() -> anyhow::Result<()> { + let observability_config = + ObservabilityConfig::from_env().context("ObservabilityConfig::from_env()")?; + let log_format: zksync_vlog::LogFormat = observability_config + .log_format + .parse() + .context("Invalid log format")?; + let mut builder = zksync_vlog::ObservabilityBuilder::new().with_log_format(log_format); + if let Some(sentry_url) = observability_config.sentry_url { + builder = builder + .with_sentry_url(&sentry_url) + .context("Invalid Sentry URL")? + .with_sentry_environment(observability_config.sentry_environment); + } + let _guard = builder.build(); + + let tee_prover_config = TeeProverConfig::from_env()?; + let attestation_quote_bytes = std::fs::read(tee_prover_config.attestation_quote_file_path)?; + + let prometheus_config = PrometheusConfig::from_env()?; + + let mut builder = ZkStackServiceBuilder::new(); + let mut builder_mut = builder + .add_layer(SigintHandlerLayer) + .add_layer(TeeProverLayer::new( + tee_prover_config.api_url, + tee_prover_config.signing_key, + attestation_quote_bytes, + tee_prover_config.tee_type, + )); + + if let Some(gateway) = prometheus_config.gateway_endpoint() { + let exporter_config = + PrometheusExporterConfig::push(gateway, prometheus_config.push_interval()); + builder_mut = builder_mut.add_layer(PrometheusExporterLayer(exporter_config)); + } + + builder_mut.build()?.run()?; + Ok(()) +} diff --git a/core/bin/zksync_tee_prover/src/metrics.rs b/core/bin/zksync_tee_prover/src/metrics.rs new file mode 100644 index 000000000000..9f535967f79f --- /dev/null +++ b/core/bin/zksync_tee_prover/src/metrics.rs @@ -0,0 +1,21 @@ +//! Metrics for the TEE Prover. + +use std::time::Duration; + +use vise::{Buckets, Gauge, Histogram, Metrics, Unit}; + +#[derive(Debug, Metrics)] +#[metrics(prefix = "tee_prover")] +pub(crate) struct TeeProverMetrics { + #[metrics(buckets = Buckets::LATENCIES, unit = Unit::Seconds)] + pub job_waiting_time: Histogram, + #[metrics(buckets = Buckets::LATENCIES, unit = Unit::Seconds)] + pub proof_generation_time: Histogram, + #[metrics(buckets = Buckets::LATENCIES, unit = Unit::Seconds)] + pub proof_submitting_time: Histogram, + pub network_errors_counter: Gauge, + pub last_batch_number_processed: Gauge, +} + +#[vise::register] +pub(super) static METRICS: vise::Global = vise::Global::new(); diff --git a/core/bin/zksync_tee_prover/src/tee_prover.rs b/core/bin/zksync_tee_prover/src/tee_prover.rs new file mode 100644 index 000000000000..b14d07b72db6 --- /dev/null +++ b/core/bin/zksync_tee_prover/src/tee_prover.rs @@ -0,0 +1,220 @@ +use std::{fmt, time::Duration}; + +use secp256k1::{ecdsa::Signature, Message, PublicKey, Secp256k1, SecretKey}; +use url::Url; +use zksync_basic_types::H256; +use zksync_node_framework::{ + service::StopReceiver, + task::{Task, TaskId}, + wiring_layer::{WiringError, WiringLayer}, + IntoContext, +}; +use zksync_prover_interface::inputs::TeeVerifierInput; +use zksync_tee_verifier::Verify; +use zksync_types::{tee_types::TeeType, L1BatchNumber}; + +use crate::{api_client::TeeApiClient, error::TeeProverError, metrics::METRICS}; + +/// Wiring layer for `TeeProver` +#[derive(Debug)] +pub(crate) struct TeeProverLayer { + api_url: Url, + signing_key: SecretKey, + attestation_quote_bytes: Vec, + tee_type: TeeType, +} + +impl TeeProverLayer { + pub fn new( + api_url: Url, + signing_key: SecretKey, + attestation_quote_bytes: Vec, + tee_type: TeeType, + ) -> Self { + Self { + api_url, + signing_key, + attestation_quote_bytes, + tee_type, + } + } +} + +#[derive(Debug, IntoContext)] +pub(crate) struct LayerOutput { + #[context(task)] + pub tee_prover: TeeProver, +} + +#[async_trait::async_trait] +impl WiringLayer for TeeProverLayer { + type Input = (); + type Output = LayerOutput; + + fn layer_name(&self) -> &'static str { + "tee_prover_layer" + } + + async fn wire(self, _input: Self::Input) -> Result { + let tee_prover = TeeProver { + config: Default::default(), + signing_key: self.signing_key, + public_key: self.signing_key.public_key(&Secp256k1::new()), + attestation_quote_bytes: self.attestation_quote_bytes, + tee_type: self.tee_type, + api_client: TeeApiClient::new(self.api_url), + }; + Ok(LayerOutput { tee_prover }) + } +} + +pub(crate) struct TeeProver { + config: TeeProverConfig, + signing_key: SecretKey, + public_key: PublicKey, + attestation_quote_bytes: Vec, + tee_type: TeeType, + api_client: TeeApiClient, +} + +impl fmt::Debug for TeeProver { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("TeeProver") + .field("config", &self.config) + .field("public_key", &self.public_key) + .field("attestation_quote_bytes", &self.attestation_quote_bytes) + .field("tee_type", &self.tee_type) + .finish() + } +} + +impl TeeProver { + fn verify( + &self, + tvi: TeeVerifierInput, + ) -> Result<(Signature, L1BatchNumber, H256), TeeProverError> { + match tvi { + TeeVerifierInput::V1(tvi) => { + let observer = METRICS.proof_generation_time.start(); + let verification_result = tvi.verify().map_err(TeeProverError::Verification)?; + let root_hash_bytes = verification_result.value_hash.as_bytes(); + let batch_number = verification_result.batch_number; + let msg_to_sign = Message::from_slice(root_hash_bytes) + .map_err(|e| TeeProverError::Verification(e.into()))?; + let signature = self.signing_key.sign_ecdsa(msg_to_sign); + observer.observe(); + Ok((signature, batch_number, verification_result.value_hash)) + } + _ => Err(TeeProverError::Verification(anyhow::anyhow!( + "Only TeeVerifierInput::V1 verification supported." + ))), + } + } + + async fn step(&self) -> Result, TeeProverError> { + match self.api_client.get_job().await? { + Some(job) => { + let (signature, batch_number, root_hash) = self.verify(*job)?; + self.api_client + .submit_proof( + batch_number, + signature, + &self.public_key, + root_hash, + self.tee_type, + ) + .await?; + Ok(Some(batch_number)) + } + None => { + tracing::trace!("There are currently no pending batches to be proven"); + Ok(None) + } + } + } +} + +/// TEE prover configuration options. +#[derive(Debug, Clone)] +pub struct TeeProverConfig { + /// Number of retries for transient errors before giving up on recovery (i.e., returning an error + /// from [`Self::run()`]). + pub max_retries: usize, + /// Initial back-off interval when retrying recovery on a transient error. Each subsequent retry interval + /// will be multiplied by [`Self.retry_backoff_multiplier`]. + pub initial_retry_backoff: Duration, + pub retry_backoff_multiplier: f32, + pub max_backoff: Duration, +} + +impl Default for TeeProverConfig { + fn default() -> Self { + Self { + max_retries: 5, + initial_retry_backoff: Duration::from_secs(1), + retry_backoff_multiplier: 2.0, + max_backoff: Duration::from_secs(128), + } + } +} + +#[async_trait::async_trait] +impl Task for TeeProver { + fn id(&self) -> TaskId { + "tee_prover".into() + } + + async fn run(self: Box, mut stop_receiver: StopReceiver) -> anyhow::Result<()> { + tracing::info!("Starting the task {}", self.id()); + + self.api_client + .register_attestation(self.attestation_quote_bytes.clone(), &self.public_key) + .await?; + + let mut retries = 1; + let mut backoff = self.config.initial_retry_backoff; + let mut observer = METRICS.job_waiting_time.start(); + + loop { + if *stop_receiver.0.borrow() { + tracing::info!("Stop signal received, shutting down TEE Prover component"); + return Ok(()); + } + let result = self.step().await; + let need_to_sleep = match result { + Ok(batch_number) => { + retries = 1; + backoff = self.config.initial_retry_backoff; + if let Some(batch_number) = batch_number { + observer.observe(); + observer = METRICS.job_waiting_time.start(); + METRICS + .last_batch_number_processed + .set(batch_number.0 as u64); + false + } else { + true + } + } + Err(err) => { + METRICS.network_errors_counter.inc_by(1); + if !err.is_transient() || retries > self.config.max_retries { + return Err(err.into()); + } + retries += 1; + tracing::warn!(%err, "Failed TEE prover step function {retries}/{}, retrying in {} milliseconds.", self.config.max_retries, backoff.as_millis()); + backoff = std::cmp::min( + backoff.mul_f32(self.config.retry_backoff_multiplier), + self.config.max_backoff, + ); + true + } + }; + if need_to_sleep { + tokio::time::timeout(backoff, stop_receiver.0.changed()) + .await + .ok(); + } + } + } +} diff --git a/core/lib/basic_types/Cargo.toml b/core/lib/basic_types/Cargo.toml index 937006bb2578..84411405c2a4 100644 --- a/core/lib/basic_types/Cargo.toml +++ b/core/lib/basic_types/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "zksync_basic_types" -version = "0.1.0" +description = "ZKsync primitive types" +version.workspace = true edition.workspace = true authors.workspace = true homepage.workspace = true diff --git a/core/lib/basic_types/src/lib.rs b/core/lib/basic_types/src/lib.rs index a55705886c55..21e90f4bad77 100644 --- a/core/lib/basic_types/src/lib.rs +++ b/core/lib/basic_types/src/lib.rs @@ -26,6 +26,7 @@ pub mod commitment; pub mod network; pub mod protocol_version; pub mod prover_dal; +pub mod tee_types; pub mod url; pub mod vm_version; pub mod web3; diff --git a/core/lib/basic_types/src/prover_dal.rs b/core/lib/basic_types/src/prover_dal.rs index 5eb00dc63a4f..edaad3798e82 100644 --- a/core/lib/basic_types/src/prover_dal.rs +++ b/core/lib/basic_types/src/prover_dal.rs @@ -5,15 +5,9 @@ use chrono::{DateTime, Duration, NaiveDateTime, NaiveTime, Utc}; use strum::{Display, EnumString}; use crate::{ - basic_fri_types::{AggregationRound, Eip4844Blobs}, - protocol_version::ProtocolVersionId, - L1BatchNumber, + basic_fri_types::AggregationRound, protocol_version::ProtocolVersionId, L1BatchNumber, }; -// This currently lives in `zksync_prover_types` -- we don't want a dependency between prover types (`zkevm_test_harness`) and DAL. -// This will be gone as part of 1.5.0, when EIP4844 becomes normal jobs, rather than special cased ones. -pub const EIP_4844_CIRCUIT_ID: u8 = 255; - #[derive(Debug, Clone)] pub struct FriProverJobMetadata { pub id: u32, @@ -259,7 +253,6 @@ pub struct ProverJobFriInfo { pub created_at: NaiveDateTime, pub updated_at: NaiveDateTime, pub time_taken: Option, - pub is_blob_cleaned: Option, pub depth: u32, pub is_node_final_proof: bool, pub proof_blob_url: Option, @@ -270,7 +263,7 @@ pub struct ProverJobFriInfo { #[derive(Debug, Clone)] pub struct BasicWitnessGeneratorJobInfo { pub l1_batch_number: L1BatchNumber, - pub merkle_tree_paths_blob_url: Option, + pub witness_inputs_blob_url: Option, pub attempts: u32, pub status: WitnessJobStatus, pub error: Option, @@ -278,10 +271,8 @@ pub struct BasicWitnessGeneratorJobInfo { pub updated_at: NaiveDateTime, pub processing_started_at: Option, pub time_taken: Option, - pub is_blob_cleaned: Option, pub protocol_version: Option, pub picked_by: Option, - pub eip_4844_blobs: Option, } #[derive(Debug, Clone)] @@ -297,7 +288,6 @@ pub struct LeafWitnessGeneratorJobInfo { pub updated_at: NaiveDateTime, pub processing_started_at: Option, pub time_taken: Option, - pub is_blob_cleaned: Option, pub number_of_basic_circuits: Option, pub protocol_version: Option, pub picked_by: Option, @@ -382,3 +372,12 @@ pub struct ProofCompressionJobInfo { pub time_taken: Option, pub picked_by: Option, } + +// Used for transferring information about L1 Batches from DAL to public interfaces (currently prover_cli stats). +/// DTO containing information about L1 Batch Proof. +#[derive(Debug, Clone)] +pub struct ProofGenerationTime { + pub l1_batch_number: L1BatchNumber, + pub time_taken: NaiveTime, + pub created_at: NaiveDateTime, +} diff --git a/core/lib/basic_types/src/tee_types.rs b/core/lib/basic_types/src/tee_types.rs new file mode 100644 index 000000000000..c9be9b6e99d8 --- /dev/null +++ b/core/lib/basic_types/src/tee_types.rs @@ -0,0 +1,9 @@ +use serde::{Deserialize, Serialize}; +use strum::{Display, EnumString}; + +#[derive(Debug, Clone, Copy, PartialEq, EnumString, Display, Serialize, Deserialize)] +#[non_exhaustive] +pub enum TeeType { + #[strum(serialize = "sgx")] + Sgx, +} diff --git a/core/lib/basic_types/src/web3/mod.rs b/core/lib/basic_types/src/web3/mod.rs index af9cd1eea3fc..9bc10c8ab364 100644 --- a/core/lib/basic_types/src/web3/mod.rs +++ b/core/lib/basic_types/src/web3/mod.rs @@ -327,6 +327,9 @@ pub struct Log { pub log_type: Option, /// Removed pub removed: Option, + /// L2 block timestamp + #[serde(rename = "blockTimestamp")] + pub block_timestamp: Option, } impl Log { @@ -827,6 +830,7 @@ pub enum TransactionCondition { } // `FeeHistory`: from `web3::types::fee_history` +// Adapted to support blobs. /// The fee history type returned from `eth_feeHistory` call. #[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] @@ -834,14 +838,25 @@ pub enum TransactionCondition { pub struct FeeHistory { /// Lowest number block of the returned range. pub oldest_block: BlockNumber, - /// A vector of block base fees per gas. This includes the next block after the newest of the returned range, because this value can be derived from the newest block. Zeroes are returned for pre-EIP-1559 blocks. + /// A vector of block base fees per gas. This includes the next block after the newest of the returned range, + /// because this value can be derived from the newest block. Zeroes are returned for pre-EIP-1559 blocks. #[serde(default)] // some node implementations skip empty lists pub base_fee_per_gas: Vec, /// A vector of block gas used ratios. These are calculated as the ratio of gas used and gas limit. #[serde(default)] // some node implementations skip empty lists pub gas_used_ratio: Vec, - /// A vector of effective priority fee per gas data points from a single block. All zeroes are returned if the block is empty. Returned only if requested. + /// A vector of effective priority fee per gas data points from a single block. All zeroes are returned if + /// the block is empty. Returned only if requested. pub reward: Option>>, + /// An array of base fees per blob gas for blocks. This includes the next block following the newest in the + /// returned range, as this value can be derived from the latest block. For blocks before EIP-4844, zeroes + /// are returned. + #[serde(default)] // some node implementations skip empty lists + pub base_fee_per_blob_gas: Vec, + /// An array showing the ratios of blob gas used in blocks. These ratios are calculated by dividing blobGasUsed + /// by the maximum blob gas per block. + #[serde(default)] // some node implementations skip empty lists + pub blob_gas_used_ratio: Vec, } // `SyncInfo`, `SyncState`: from `web3::types::sync_state` diff --git a/core/lib/circuit_breaker/Cargo.toml b/core/lib/circuit_breaker/Cargo.toml index 308a9e7eaa35..9bc00b475d4a 100644 --- a/core/lib/circuit_breaker/Cargo.toml +++ b/core/lib/circuit_breaker/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "zksync_circuit_breaker" -version = "0.1.0" +description = "ZKsync circuit breakers" +version.workspace = true edition.workspace = true authors.workspace = true homepage.workspace = true diff --git a/core/lib/config/Cargo.toml b/core/lib/config/Cargo.toml index 144843c2bab2..b1a2a0ef1e8f 100644 --- a/core/lib/config/Cargo.toml +++ b/core/lib/config/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "zksync_config" -version = "0.1.0" +description = "ZKsync core configuration" +version.workspace = true edition.workspace = true authors.workspace = true homepage.workspace = true @@ -13,7 +14,9 @@ categories.workspace = true zksync_basic_types.workspace = true zksync_crypto_primitives.workspace = true zksync_consensus_utils.workspace = true +zksync_concurrency.workspace = true +url.workspace = true anyhow.workspace = true rand.workspace = true secrecy.workspace = true diff --git a/core/lib/config/src/configs/api.rs b/core/lib/config/src/configs/api.rs index 3b33ef43343f..e039ab10116a 100644 --- a/core/lib/config/src/configs/api.rs +++ b/core/lib/config/src/configs/api.rs @@ -213,6 +213,13 @@ pub struct Web3JsonRpcConfig { /// (additionally to natively bridged tokens). #[serde(default)] pub whitelisted_tokens_for_aa: Vec
, + /// Enabled JSON RPC API namespaces. If not set, all namespaces will be available + #[serde(default)] + pub api_namespaces: Option>, + /// Enables extended tracing of RPC calls. This may negatively impact performance for nodes under high load + /// (hundreds or thousands RPS). + #[serde(default)] + pub extended_api_tracing: bool, } impl Web3JsonRpcConfig { @@ -251,6 +258,8 @@ impl Web3JsonRpcConfig { mempool_cache_size: Default::default(), tree_api_url: None, whitelisted_tokens_for_aa: Default::default(), + api_namespaces: None, + extended_api_tracing: false, } } diff --git a/core/lib/config/src/configs/base_token_adjuster.rs b/core/lib/config/src/configs/base_token_adjuster.rs new file mode 100644 index 000000000000..4ef253989cd2 --- /dev/null +++ b/core/lib/config/src/configs/base_token_adjuster.rs @@ -0,0 +1,47 @@ +use std::time::Duration; + +use serde::Deserialize; + +/// By default, the ratio persister will run every 30 seconds. +pub const DEFAULT_INTERVAL_MS: u64 = 30_000; + +/// By default, refetch ratio from db every 0.5 second +pub const DEFAULT_CACHE_UPDATE_INTERVAL: u64 = 500; + +#[derive(Debug, Clone, PartialEq, Deserialize)] +pub struct BaseTokenAdjusterConfig { + /// How often to spark a new cycle of the ratio persister to fetch external prices and persis ratios. + #[serde(default = "BaseTokenAdjusterConfig::default_polling_interval")] + pub price_polling_interval_ms: u64, + + /// We (in memory) cache the ratio fetched from db. This interval defines frequency of refetch from db. + #[serde(default = "BaseTokenAdjusterConfig::default_cache_update_interval")] + pub price_cache_update_interval_ms: u64, +} + +impl Default for BaseTokenAdjusterConfig { + fn default() -> Self { + Self { + price_polling_interval_ms: Self::default_polling_interval(), + price_cache_update_interval_ms: Self::default_cache_update_interval(), + } + } +} + +impl BaseTokenAdjusterConfig { + fn default_polling_interval() -> u64 { + DEFAULT_INTERVAL_MS + } + + pub fn price_polling_interval(&self) -> Duration { + Duration::from_millis(self.price_polling_interval_ms) + } + + fn default_cache_update_interval() -> u64 { + DEFAULT_CACHE_UPDATE_INTERVAL + } + + pub fn price_cache_update_interval(&self) -> Duration { + Duration::from_millis(self.price_cache_update_interval_ms) + } +} diff --git a/core/lib/config/src/configs/chain.rs b/core/lib/config/src/configs/chain.rs index c1abd1fea102..53884c4a7227 100644 --- a/core/lib/config/src/configs/chain.rs +++ b/core/lib/config/src/configs/chain.rs @@ -105,7 +105,12 @@ pub struct StateKeeperConfig { pub batch_overhead_l1_gas: u64, /// The maximum amount of gas that can be used by the batch. This value is derived from the circuits limitation per batch. pub max_gas_per_batch: u64, - /// The maximum amount of pubdata that can be used by the batch. Note that if the calldata is used as pubdata, this variable should not exceed 128kb. + /// The maximum amount of pubdata that can be used by the batch. + /// This variable should not exceed: + /// - 128kb for calldata-based rollups + /// - 120kb * n, where `n` is a number of blobs for blob-based rollups + /// - the DA layer's blob size limit for the DA layer-based validiums + /// - 100 MB for the object store-based or no-da validiums pub max_pubdata_per_batch: u64, /// The version of the fee model to use. @@ -120,6 +125,12 @@ pub struct StateKeeperConfig { /// the recursion layers' circuits. pub max_circuits_per_batch: usize, + /// Configures whether to persist protective reads when persisting L1 batches in the state keeper. + /// Protective reads can be written asynchronously in VM runner instead. + /// By default, set to `true` as a temporary safety measure. + #[serde(default = "StateKeeperConfig::default_protective_reads_persistence_enabled")] + pub protective_reads_persistence_enabled: bool, + // Base system contract hashes, required only for generating genesis config. // #PLA-811 #[deprecated(note = "Use GenesisConfig::bootloader_hash instead")] @@ -132,6 +143,10 @@ pub struct StateKeeperConfig { } impl StateKeeperConfig { + fn default_protective_reads_persistence_enabled() -> bool { + true + } + /// Creates a config object suitable for use in unit tests. /// Values mostly repeat the values used in the localhost environment. pub fn for_tests() -> Self { @@ -163,6 +178,7 @@ impl StateKeeperConfig { validation_computational_gas_limit: 300000, save_call_traces: true, max_circuits_per_batch: 24100, + protective_reads_persistence_enabled: true, bootloader_hash: None, default_aa_hash: None, l1_batch_commit_data_generator_mode: L1BatchCommitmentMode::Rollup, diff --git a/core/lib/config/src/configs/commitment_generator.rs b/core/lib/config/src/configs/commitment_generator.rs new file mode 100644 index 000000000000..9ec4d805b8fe --- /dev/null +++ b/core/lib/config/src/configs/commitment_generator.rs @@ -0,0 +1,10 @@ +use std::num::NonZeroU32; + +use serde::Deserialize; + +#[derive(Debug, Deserialize, Clone, PartialEq)] +pub struct CommitmentGeneratorConfig { + /// Maximum degree of parallelism during commitment generation, i.e., the maximum number of L1 batches being processed in parallel. + /// If not specified, commitment generator will use a value roughly equal to the number of CPU cores with some clamping applied. + pub max_parallelism: NonZeroU32, +} diff --git a/core/lib/config/src/configs/consensus.rs b/core/lib/config/src/configs/consensus.rs index c31d34941d2b..50885a6ec6fe 100644 --- a/core/lib/config/src/configs/consensus.rs +++ b/core/lib/config/src/configs/consensus.rs @@ -2,6 +2,7 @@ use std::collections::{BTreeMap, BTreeSet}; use secrecy::{ExposeSecret as _, Secret}; use zksync_basic_types::L2ChainId; +use zksync_concurrency::{limiter, time}; /// `zksync_consensus_crypto::TextFmt` representation of `zksync_consensus_roles::validator::PublicKey`. #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] @@ -11,6 +12,14 @@ pub struct ValidatorPublicKey(pub String); #[derive(Debug, Clone)] pub struct ValidatorSecretKey(pub Secret); +/// `zksync_consensus_crypto::TextFmt` representation of `zksync_consensus_roles::attester::PublicKey`. +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct AttesterPublicKey(pub String); + +/// `zksync_consensus_crypto::TextFmt` representation of `zksync_consensus_roles::attester::SecretKey`. +#[derive(Debug, Clone)] +pub struct AttesterSecretKey(pub Secret); + /// `zksync_consensus_crypto::TextFmt` representation of `zksync_consensus_roles::node::PublicKey`. #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct NodePublicKey(pub String); @@ -25,6 +34,12 @@ impl PartialEq for ValidatorSecretKey { } } +impl PartialEq for AttesterSecretKey { + fn eq(&self, other: &Self) -> bool { + self.0.expose_secret().eq(other.0.expose_secret()) + } +} + impl PartialEq for NodeSecretKey { fn eq(&self, other: &Self) -> bool { self.0.expose_secret().eq(other.0.expose_secret()) @@ -40,6 +55,15 @@ pub struct WeightedValidator { pub weight: u64, } +/// Copy-paste of `zksync_consensus_roles::attester::WeightedAttester`. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct WeightedAttester { + /// Attester key + pub key: AttesterPublicKey, + /// Attester weight inside the Committee. + pub weight: u64, +} + /// Copy-paste of `zksync_concurrency::net::Host`. #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct Host(pub String); @@ -60,11 +84,29 @@ pub struct GenesisSpec { pub protocol_version: ProtocolVersion, /// The validator committee. Represents `zksync_consensus_roles::validator::Committee`. pub validators: Vec, + /// The attester committee. Represents `zksync_consensus_roles::attester::Committee`. + pub attesters: Vec, /// Leader of the committee. Represents /// `zksync_consensus_roles::validator::LeaderSelectionMode::Sticky`. pub leader: ValidatorPublicKey, } +#[derive(Clone, Debug, PartialEq, Default)] +pub struct RpcConfig { + /// Max number of blocks that can be send from/to each peer. + /// Defaults to 10 blocks/s/connection. + pub get_block_rate: Option, +} + +impl RpcConfig { + pub fn get_block_rate(&self) -> limiter::Rate { + self.get_block_rate.unwrap_or(limiter::Rate { + burst: 10, + refresh: time::Duration::milliseconds(100), + }) + } +} + /// Config (shared between main node and external node). #[derive(Clone, Debug, PartialEq)] pub struct ConsensusConfig { @@ -78,6 +120,13 @@ pub struct ConsensusConfig { /// Maximal allowed size of the payload in bytes. pub max_payload_size: usize, + /// Maximal allowed size of the sync-batch payloads in bytes. + /// + /// The batch consists of block payloads and a Merkle proof of inclusion on L1 (~1kB), + /// so the maximum batch size should be the maximum payload size times the maximum number + /// of blocks in a batch. + pub max_batch_size: usize, + /// Limit on the number of inbound connections outside /// of the `static_inbound` set. pub gossip_dynamic_inbound_limit: usize, @@ -91,11 +140,21 @@ pub struct ConsensusConfig { /// Used to (re)initialize genesis if needed. /// External nodes fetch the genesis from the main node. pub genesis_spec: Option, + + /// Rate limiting configuration for the p2p RPCs. + pub rpc: Option, +} + +impl ConsensusConfig { + pub fn rpc(&self) -> RpcConfig { + self.rpc.clone().unwrap_or_default() + } } -/// Secrets need for consensus. +/// Secrets needed for consensus. #[derive(Debug, Clone, PartialEq)] pub struct ConsensusSecrets { pub validator_key: Option, + pub attester_key: Option, pub node_key: Option, } diff --git a/core/lib/config/src/configs/contracts.rs b/core/lib/config/src/configs/contracts.rs index f9bfcc7696b1..b68720ebaefe 100644 --- a/core/lib/config/src/configs/contracts.rs +++ b/core/lib/config/src/configs/contracts.rs @@ -37,7 +37,9 @@ pub struct ContractsConfig { pub l2_testnet_paymaster_addr: Option
, pub l1_multicall3_addr: Address, pub ecosystem_contracts: Option, + // Used by the RPC API and by the node builder in wiring the BaseTokenRatioProvider layer. pub base_token_addr: Option
, + pub chain_admin_addr: Option
, } impl ContractsConfig { @@ -58,6 +60,7 @@ impl ContractsConfig { governance_addr: Address::repeat_byte(0x13), base_token_addr: Some(Address::repeat_byte(0x14)), ecosystem_contracts: Some(EcosystemContracts::for_tests()), + chain_admin_addr: Some(Address::repeat_byte(0x18)), } } } diff --git a/core/lib/config/src/configs/da_dispatcher.rs b/core/lib/config/src/configs/da_dispatcher.rs new file mode 100644 index 000000000000..303a2c0b54c1 --- /dev/null +++ b/core/lib/config/src/configs/da_dispatcher.rs @@ -0,0 +1,43 @@ +use std::time::Duration; + +use serde::Deserialize; + +pub const DEFAULT_POLLING_INTERVAL_MS: u32 = 5000; +pub const DEFAULT_MAX_ROWS_TO_DISPATCH: u32 = 100; +pub const DEFAULT_MAX_RETRIES: u16 = 5; + +#[derive(Debug, Clone, PartialEq, Deserialize)] +pub struct DADispatcherConfig { + /// The interval between the `da_dispatcher's` iterations. + pub polling_interval_ms: Option, + /// The maximum number of rows to query from the database in a single query. + pub max_rows_to_dispatch: Option, + /// The maximum number of retries for the dispatch of a blob. + pub max_retries: Option, +} + +impl DADispatcherConfig { + pub fn for_tests() -> Self { + Self { + polling_interval_ms: Some(DEFAULT_POLLING_INTERVAL_MS), + max_rows_to_dispatch: Some(DEFAULT_MAX_ROWS_TO_DISPATCH), + max_retries: Some(DEFAULT_MAX_RETRIES), + } + } + + pub fn polling_interval(&self) -> Duration { + match self.polling_interval_ms { + Some(interval) => Duration::from_millis(interval as u64), + None => Duration::from_millis(DEFAULT_POLLING_INTERVAL_MS as u64), + } + } + + pub fn max_rows_to_dispatch(&self) -> u32 { + self.max_rows_to_dispatch + .unwrap_or(DEFAULT_MAX_ROWS_TO_DISPATCH) + } + + pub fn max_retries(&self) -> u16 { + self.max_retries.unwrap_or(DEFAULT_MAX_RETRIES) + } +} diff --git a/core/lib/config/src/configs/en_config.rs b/core/lib/config/src/configs/en_config.rs new file mode 100644 index 000000000000..32dc5b7c7b49 --- /dev/null +++ b/core/lib/config/src/configs/en_config.rs @@ -0,0 +1,19 @@ +use std::num::NonZeroUsize; + +use serde::Deserialize; +use zksync_basic_types::{ + commitment::L1BatchCommitmentMode, url::SensitiveUrl, L1ChainId, L2ChainId, +}; + +/// Temporary config for initializing external node, will be completely replaced by consensus config later +#[derive(Debug, Clone, PartialEq, Deserialize)] +pub struct ENConfig { + // Genesis + pub l2_chain_id: L2ChainId, + pub l1_chain_id: L1ChainId, + pub l1_batch_commit_data_generator_mode: L1BatchCommitmentMode, + + // Main node configuration + pub main_node_url: SensitiveUrl, + pub main_node_rate_limit_rps: Option, +} diff --git a/core/lib/config/src/configs/eth_sender.rs b/core/lib/config/src/configs/eth_sender.rs index 58b81fa0a145..c0e14dd68a87 100644 --- a/core/lib/config/src/configs/eth_sender.rs +++ b/core/lib/config/src/configs/eth_sender.rs @@ -81,6 +81,7 @@ pub enum PubdataSendingMode { #[default] Calldata, Blobs, + Custom, } #[derive(Debug, Deserialize, Clone, PartialEq)] @@ -114,7 +115,7 @@ pub struct SenderConfig { // Max acceptable fee for sending tx it acts as a safeguard to prevent sending tx with very high fees. pub max_acceptable_priority_fee_in_gwei: u64, - /// The mode in which we send pubdata, either Calldata or Blobs + /// The mode in which we send pubdata: Calldata, Blobs or Custom (DA layers, Object Store, etc.) pub pubdata_sending_mode: PubdataSendingMode, } @@ -152,7 +153,7 @@ impl SenderConfig { } } -#[derive(Debug, Deserialize, Copy, Clone, PartialEq)] +#[derive(Debug, Deserialize, Copy, Clone, PartialEq, Default)] pub struct GasAdjusterConfig { /// Priority Fee to be used by GasAdjuster pub default_priority_fee_per_gas: u64, diff --git a/core/lib/config/src/configs/experimental.rs b/core/lib/config/src/configs/experimental.rs index ad0ef5a4d5b8..e362715d3d4a 100644 --- a/core/lib/config/src/configs/experimental.rs +++ b/core/lib/config/src/configs/experimental.rs @@ -12,6 +12,21 @@ pub struct ExperimentalDBConfig { /// Maximum number of files concurrently opened by state keeper cache RocksDB. Useful to fit into OS limits; can be used /// as a rudimentary way to control RAM usage of the cache. pub state_keeper_db_max_open_files: Option, + /// Configures whether to persist protective reads when persisting L1 batches in the state keeper. + /// Protective reads are never required by full nodes so far, not until such a node runs a full Merkle tree + /// (presumably, to participate in L1 batch proving). + /// By default, set to `true` as a temporary safety measure. + #[serde(default = "ExperimentalDBConfig::default_protective_reads_persistence_enabled")] + pub protective_reads_persistence_enabled: bool, + // Merkle tree config + /// Processing delay between processing L1 batches in the Merkle tree. + #[serde(default = "ExperimentalDBConfig::default_merkle_tree_processing_delay_ms")] + pub processing_delay_ms: u64, + /// If specified, RocksDB indices and Bloom filters will be managed by the block cache, rather than + /// being loaded entirely into RAM on the RocksDB initialization. The block cache capacity should be increased + /// correspondingly; otherwise, RocksDB performance can significantly degrade. + #[serde(default)] + pub include_indices_and_filters_in_block_cache: bool, } impl Default for ExperimentalDBConfig { @@ -20,6 +35,10 @@ impl Default for ExperimentalDBConfig { state_keeper_db_block_cache_capacity_mb: Self::default_state_keeper_db_block_cache_capacity_mb(), state_keeper_db_max_open_files: None, + protective_reads_persistence_enabled: + Self::default_protective_reads_persistence_enabled(), + processing_delay_ms: Self::default_merkle_tree_processing_delay_ms(), + include_indices_and_filters_in_block_cache: false, } } } @@ -32,4 +51,12 @@ impl ExperimentalDBConfig { pub fn state_keeper_db_block_cache_capacity(&self) -> usize { self.state_keeper_db_block_cache_capacity_mb * super::BYTES_IN_MEGABYTE } + + const fn default_protective_reads_persistence_enabled() -> bool { + true + } + + const fn default_merkle_tree_processing_delay_ms() -> u64 { + 100 + } } diff --git a/core/lib/config/src/configs/external_price_api_client.rs b/core/lib/config/src/configs/external_price_api_client.rs new file mode 100644 index 000000000000..06282eb8bebd --- /dev/null +++ b/core/lib/config/src/configs/external_price_api_client.rs @@ -0,0 +1,27 @@ +use std::time::Duration; + +use serde::Deserialize; + +pub const DEFAULT_TIMEOUT_MS: u64 = 10_000; + +#[derive(Debug, Clone, PartialEq, Deserialize)] +pub struct ExternalPriceApiClientConfig { + pub source: String, + pub base_url: Option, + pub api_key: Option, + #[serde(default = "ExternalPriceApiClientConfig::default_timeout")] + pub client_timeout_ms: u64, + /// Forced conversion ratio. Only used with the ForcedPriceClient. + pub forced_numerator: Option, + pub forced_denominator: Option, +} + +impl ExternalPriceApiClientConfig { + fn default_timeout() -> u64 { + DEFAULT_TIMEOUT_MS + } + + pub fn client_timeout(&self) -> Duration { + Duration::from_millis(self.client_timeout_ms) + } +} diff --git a/core/lib/config/src/configs/fri_prover.rs b/core/lib/config/src/configs/fri_prover.rs index 99e3d354536e..5cd25450531a 100644 --- a/core/lib/config/src/configs/fri_prover.rs +++ b/core/lib/config/src/configs/fri_prover.rs @@ -10,6 +10,18 @@ pub enum SetupLoadMode { FromMemory, } +/// Kind of cloud environment prover subsystem runs in. +/// +/// Currently will only affect how the prover zone is chosen. +#[derive(Debug, Default, Deserialize, Clone, Copy, PartialEq, Eq)] +pub enum CloudType { + /// Assumes that the prover runs in GCP. + #[default] + GCP, + /// Assumes that the prover runs locally. + Local, +} + /// Configuration for the fri prover application #[derive(Debug, Deserialize, Clone, PartialEq)] pub struct FriProverConfig { @@ -28,6 +40,8 @@ pub struct FriProverConfig { pub shall_save_to_public_bucket: bool, pub prover_object_store: Option, pub public_object_store: Option, + #[serde(default)] + pub cloud_type: CloudType, } impl FriProverConfig { diff --git a/core/lib/config/src/configs/fri_witness_generator.rs b/core/lib/config/src/configs/fri_witness_generator.rs index c69d04367cf8..281159271dd0 100644 --- a/core/lib/config/src/configs/fri_witness_generator.rs +++ b/core/lib/config/src/configs/fri_witness_generator.rs @@ -23,6 +23,8 @@ pub struct FriWitnessGeneratorConfig { // whether to write to public GCS bucket for https://github.com/matter-labs/era-boojum-validator-cli pub shall_save_to_public_bucket: bool, + + pub prometheus_listener_port: Option, } #[derive(Debug)] diff --git a/core/lib/config/src/configs/general.rs b/core/lib/config/src/configs/general.rs index 9f249d655f57..122d1e278553 100644 --- a/core/lib/config/src/configs/general.rs +++ b/core/lib/config/src/configs/general.rs @@ -1,18 +1,24 @@ use crate::{ configs::{ + base_token_adjuster::BaseTokenAdjusterConfig, chain::{CircuitBreakerConfig, MempoolConfig, OperationsManagerConfig, StateKeeperConfig}, + consensus::ConsensusConfig, + da_dispatcher::DADispatcherConfig, fri_prover_group::FriProverGroupConfig, house_keeper::HouseKeeperConfig, - vm_runner::ProtectiveReadsWriterConfig, - FriProofCompressorConfig, FriProverConfig, FriProverGatewayConfig, - FriWitnessGeneratorConfig, FriWitnessVectorGeneratorConfig, ObservabilityConfig, - PrometheusConfig, ProofDataHandlerConfig, + pruning::PruningConfig, + snapshot_recovery::SnapshotRecoveryConfig, + vm_runner::{BasicWitnessInputProducerConfig, ProtectiveReadsWriterConfig}, + CommitmentGeneratorConfig, ExternalPriceApiClientConfig, FriProofCompressorConfig, + FriProverConfig, FriProverGatewayConfig, FriWitnessGeneratorConfig, + FriWitnessVectorGeneratorConfig, ObservabilityConfig, PrometheusConfig, + ProofDataHandlerConfig, }, ApiConfig, ContractVerifierConfig, DBConfig, EthConfig, ObjectStoreConfig, PostgresConfig, SnapshotsCreatorConfig, }; -#[derive(Debug)] +#[derive(Debug, PartialEq)] pub struct GeneralConfig { pub postgres_config: Option, pub api_config: Option, @@ -34,6 +40,14 @@ pub struct GeneralConfig { pub eth: Option, pub snapshot_creator: Option, pub observability: Option, + pub da_dispatcher_config: Option, pub protective_reads_writer_config: Option, + pub basic_witness_input_producer_config: Option, + pub commitment_generator: Option, + pub snapshot_recovery: Option, + pub pruning: Option, pub core_object_store: Option, + pub base_token_adjuster: Option, + pub external_price_api_client_config: Option, + pub consensus_config: Option, } diff --git a/core/lib/config/src/configs/mod.rs b/core/lib/config/src/configs/mod.rs index b2d9571ad292..0da6f986f353 100644 --- a/core/lib/config/src/configs/mod.rs +++ b/core/lib/config/src/configs/mod.rs @@ -1,12 +1,16 @@ // Public re-exports pub use self::{ api::ApiConfig, + base_token_adjuster::BaseTokenAdjusterConfig, + commitment_generator::CommitmentGeneratorConfig, contract_verifier::ContractVerifierConfig, contracts::{ContractsConfig, EcosystemContracts}, + da_dispatcher::DADispatcherConfig, database::{DBConfig, PostgresConfig}, eth_sender::{EthConfig, GasAdjusterConfig}, eth_watch::EthWatchConfig, experimental::ExperimentalDBConfig, + external_price_api_client::ExternalPriceApiClientConfig, fri_proof_compressor::FriProofCompressorConfig, fri_prover::FriProverConfig, fri_prover_gateway::FriProverGatewayConfig, @@ -17,21 +21,28 @@ pub use self::{ object_store::ObjectStoreConfig, observability::{ObservabilityConfig, OpentelemetryConfig}, proof_data_handler::ProofDataHandlerConfig, + pruning::PruningConfig, secrets::{DatabaseSecrets, L1Secrets, Secrets}, + snapshot_recovery::SnapshotRecoveryConfig, snapshots_creator::SnapshotsCreatorConfig, utils::PrometheusConfig, - vm_runner::ProtectiveReadsWriterConfig, + vm_runner::{BasicWitnessInputProducerConfig, ProtectiveReadsWriterConfig}, }; pub mod api; +pub mod base_token_adjuster; pub mod chain; +mod commitment_generator; pub mod consensus; pub mod contract_verifier; pub mod contracts; +pub mod da_dispatcher; pub mod database; +pub mod en_config; pub mod eth_sender; pub mod eth_watch; mod experimental; +pub mod external_price_api_client; pub mod fri_proof_compressor; pub mod fri_prover; pub mod fri_prover_gateway; @@ -44,7 +55,9 @@ pub mod house_keeper; pub mod object_store; pub mod observability; pub mod proof_data_handler; +pub mod pruning; pub mod secrets; +pub mod snapshot_recovery; pub mod snapshots_creator; pub mod utils; pub mod vm_runner; diff --git a/core/lib/config/src/configs/pruning.rs b/core/lib/config/src/configs/pruning.rs new file mode 100644 index 000000000000..d2a5b0e5e9df --- /dev/null +++ b/core/lib/config/src/configs/pruning.rs @@ -0,0 +1,19 @@ +use std::num::NonZeroU64; + +use serde::Deserialize; + +#[derive(Debug, Clone, PartialEq, Deserialize)] +pub struct PruningConfig { + pub enabled: bool, + /// Number of L1 batches pruned at a time. + pub chunk_size: Option, + /// Delta between soft- and hard-removing data from Postgres. Should be reasonably large (order of 60 seconds). + /// The default value is 60 seconds. + pub removal_delay_sec: Option, + /// If set, L1 batches will be pruned after the batch timestamp is this old (in seconds). Note that an L1 batch + /// may be temporarily retained for other reasons; e.g., a batch cannot be pruned until it is executed on L1, + /// which happens roughly 24 hours after its generation on the mainnet. Thus, in practice this value can specify + /// the retention period greater than that implicitly imposed by other criteria (e.g., 7 or 30 days). + /// If set to 0, L1 batches will not be retained based on their timestamp. The default value is 1 hour. + pub data_retention_sec: Option, +} diff --git a/core/lib/config/src/configs/snapshot_recovery.rs b/core/lib/config/src/configs/snapshot_recovery.rs new file mode 100644 index 000000000000..c1d5ea6e3ac6 --- /dev/null +++ b/core/lib/config/src/configs/snapshot_recovery.rs @@ -0,0 +1,48 @@ +use std::num::NonZeroUsize; + +use serde::Deserialize; +use zksync_basic_types::L1BatchNumber; + +use crate::ObjectStoreConfig; + +#[derive(Debug, Clone, PartialEq, Deserialize, Default)] +pub struct TreeRecoveryConfig { + /// Approximate chunk size (measured in the number of entries) to recover in a single iteration. + /// Reasonable values are order of 100,000 (meaning an iteration takes several seconds). + /// + /// **Important.** This value cannot be changed in the middle of tree recovery (i.e., if a node is stopped in the middle + /// of recovery and then restarted with a different config). + pub chunk_size: Option, + /// Buffer capacity for parallel persistence operations. Should be reasonably small since larger buffer means more RAM usage; + /// buffer elements are persisted tree chunks. OTOH, small buffer can lead to persistence parallelization being inefficient. + /// + /// If not set, parallel persistence will be disabled. + pub parallel_persistence_buffer: Option, +} + +#[derive(Debug, Clone, PartialEq, Deserialize, Default)] +pub struct PostgresRecoveryConfig { + /// Maximum concurrency factor for the concurrent parts of snapshot recovery for Postgres. It may be useful to + /// reduce this factor to about 5 if snapshot recovery overloads I/O capacity of the node. Conversely, + /// if I/O capacity of your infra is high, you may increase concurrency to speed up Postgres recovery. + pub max_concurrency: Option, +} + +#[derive(Debug, Clone, PartialEq, Deserialize)] +pub struct SnapshotRecoveryConfig { + /// Enables application-level snapshot recovery. Required to start a node that was recovered from a snapshot, + /// or to initialize a node from a snapshot. Has no effect if a node that was initialized from a Postgres dump + /// or was synced from genesis. + /// + /// This is an experimental and incomplete feature; do not use unless you know what you're doing. + pub enabled: bool, + /// L1 batch number of the snapshot to use during recovery. Specifying this parameter is mostly useful for testing. + pub l1_batch: Option, + /// Enables dropping storage key preimages when recovering storage logs from a snapshot with version 0. + /// This is a temporary flag that will eventually be removed together with version 0 snapshot support. + #[serde(default)] + pub drop_storage_key_preimages: bool, + pub tree: TreeRecoveryConfig, + pub postgres: PostgresRecoveryConfig, + pub object_store: Option, +} diff --git a/core/lib/config/src/configs/snapshots_creator.rs b/core/lib/config/src/configs/snapshots_creator.rs index 7d297f607803..c7dc39e41ef5 100644 --- a/core/lib/config/src/configs/snapshots_creator.rs +++ b/core/lib/config/src/configs/snapshots_creator.rs @@ -1,21 +1,34 @@ use serde::Deserialize; +use zksync_basic_types::L1BatchNumber; use crate::ObjectStoreConfig; #[derive(Debug, Clone, PartialEq, Deserialize)] pub struct SnapshotsCreatorConfig { - #[serde(default = "snapshots_creator_storage_logs_chunk_size_default")] + /// Version of snapshots to create. + // Raw integer version is used because `SnapshotVersion` is defined in `zksync_types` crate. + #[serde(default)] + pub version: u16, + /// L1 batch number to create the snapshot for. If not specified, a snapshot will be created + /// for the current penultimate L1 batch. + /// + /// - If a snapshot with this L1 batch already exists and is complete, the creator will do nothing. + /// - If a snapshot with this L1 batch exists and is incomplete, the creator will continue creating it, + /// regardless of whether the specified snapshot `version` matches. + pub l1_batch_number: Option, + #[serde(default = "SnapshotsCreatorConfig::storage_logs_chunk_size_default")] pub storage_logs_chunk_size: u64, - - #[serde(default = "snapshots_creator_concurrent_queries_count")] + #[serde(default = "SnapshotsCreatorConfig::concurrent_queries_count")] pub concurrent_queries_count: u32, pub object_store: Option, } -fn snapshots_creator_storage_logs_chunk_size_default() -> u64 { - 1_000_000 -} +impl SnapshotsCreatorConfig { + const fn storage_logs_chunk_size_default() -> u64 { + 1_000_000 + } -fn snapshots_creator_concurrent_queries_count() -> u32 { - 25 + const fn concurrent_queries_count() -> u32 { + 25 + } } diff --git a/core/lib/config/src/configs/utils.rs b/core/lib/config/src/configs/utils.rs index 977a48e82d20..23cd0d6dd740 100644 --- a/core/lib/config/src/configs/utils.rs +++ b/core/lib/config/src/configs/utils.rs @@ -7,7 +7,7 @@ pub struct PrometheusConfig { /// Port to which the Prometheus exporter server is listening. pub listener_port: u16, /// URL of the push gateway. - pub pushgateway_url: String, + pub pushgateway_url: Option, /// Push interval in ms. pub push_interval_ms: Option, } @@ -18,12 +18,16 @@ impl PrometheusConfig { } /// Returns the full endpoint URL for the push gateway. - pub fn gateway_endpoint(&self) -> String { - let gateway_url = &self.pushgateway_url; + pub fn gateway_endpoint(&self) -> Option { + let Some(gateway_url) = &self.pushgateway_url else { + return None; + }; let job_id = "zksync-pushgateway"; let namespace = env::var("POD_NAMESPACE").unwrap_or_else(|_| "UNKNOWN_NAMESPACE".to_owned()); let pod = env::var("POD_NAME").unwrap_or_else(|_| "UNKNOWN_POD".to_owned()); - format!("{gateway_url}/metrics/job/{job_id}/namespace/{namespace}/pod/{pod}") + Some(format!( + "{gateway_url}/metrics/job/{job_id}/namespace/{namespace}/pod/{pod}" + )) } } diff --git a/core/lib/config/src/configs/vm_runner.rs b/core/lib/config/src/configs/vm_runner.rs index eb3d4a9d4b24..fa7c7c1a90a3 100644 --- a/core/lib/config/src/configs/vm_runner.rs +++ b/core/lib/config/src/configs/vm_runner.rs @@ -17,3 +17,20 @@ impl ProtectiveReadsWriterConfig { "./db/protective_reads_writer".to_owned() } } + +#[derive(Debug, Deserialize, Clone, PartialEq, Default)] +pub struct BasicWitnessInputProducerConfig { + /// Path to the RocksDB data directory that serves state cache. + #[serde(default = "BasicWitnessInputProducerConfig::default_db_path")] + pub db_path: String, + /// How many max batches should be processed at the same time. + pub window_size: u32, + /// All batches before this one (inclusive) are always considered to be processed. + pub first_processed_batch: L1BatchNumber, +} + +impl BasicWitnessInputProducerConfig { + fn default_db_path() -> String { + "./db/basic_witness_input_producer".to_owned() + } +} diff --git a/core/lib/config/src/configs/wallets.rs b/core/lib/config/src/configs/wallets.rs index 678adb674f14..7b74cd441166 100644 --- a/core/lib/config/src/configs/wallets.rs +++ b/core/lib/config/src/configs/wallets.rs @@ -1,7 +1,7 @@ use zksync_basic_types::{Address, H160, H256}; use zksync_crypto_primitives::K256PrivateKey; -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq)] pub struct AddressWallet { address: Address, } @@ -16,7 +16,7 @@ impl AddressWallet { } } -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq)] pub struct Wallet { address: Address, private_key: K256PrivateKey, @@ -58,18 +58,18 @@ impl Wallet { } } -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq)] pub struct EthSender { pub operator: Wallet, pub blob_operator: Option, } -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq)] pub struct StateKeeper { pub fee_account: AddressWallet, } -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq)] pub struct Wallets { pub eth_sender: Option, pub state_keeper: Option, diff --git a/core/lib/config/src/lib.rs b/core/lib/config/src/lib.rs index 66656e60b702..91b5c6d480e3 100644 --- a/core/lib/config/src/lib.rs +++ b/core/lib/config/src/lib.rs @@ -1,8 +1,9 @@ #![allow(clippy::upper_case_acronyms, clippy::derive_partial_eq_without_eq)] pub use crate::configs::{ - ApiConfig, ContractVerifierConfig, ContractsConfig, DBConfig, EthConfig, EthWatchConfig, - GasAdjusterConfig, GenesisConfig, ObjectStoreConfig, PostgresConfig, SnapshotsCreatorConfig, + ApiConfig, BaseTokenAdjusterConfig, ContractVerifierConfig, ContractsConfig, + DADispatcherConfig, DBConfig, EthConfig, EthWatchConfig, GasAdjusterConfig, GenesisConfig, + ObjectStoreConfig, PostgresConfig, SnapshotsCreatorConfig, }; pub mod configs; diff --git a/core/lib/config/src/testonly.rs b/core/lib/config/src/testonly.rs index 3feee2a29ec7..f3d6b98491be 100644 --- a/core/lib/config/src/testonly.rs +++ b/core/lib/config/src/testonly.rs @@ -6,9 +6,10 @@ use zksync_basic_types::{ commitment::L1BatchCommitmentMode, network::Network, protocol_version::{ProtocolSemanticVersion, ProtocolVersionId, VersionPatch}, - L1ChainId, L2ChainId, + L1BatchNumber, L1ChainId, L2ChainId, }; use zksync_consensus_utils::EncodeDist; +use zksync_crypto_primitives::K256PrivateKey; use crate::configs::{self, eth_sender::PubdataSendingMode}; @@ -97,6 +98,9 @@ impl Distribution for EncodeDist { mempool_cache_update_interval: self.sample(rng), mempool_cache_size: self.sample(rng), whitelisted_tokens_for_aa: self.sample_range(rng).map(|_| rng.gen()).collect(), + api_namespaces: self + .sample_opt(|| self.sample_range(rng).map(|_| self.sample(rng)).collect()), + extended_api_tracing: self.sample(rng), } } } @@ -175,6 +179,7 @@ impl Distribution for EncodeDist { validation_computational_gas_limit: self.sample(rng), save_call_traces: self.sample(rng), max_circuits_per_batch: self.sample(rng), + protective_reads_persistence_enabled: self.sample(rng), // These values are not involved into files serialization skip them fee_account_addr: None, bootloader_hash: None, @@ -230,23 +235,24 @@ impl Distribution for EncodeDist { } impl Distribution for EncodeDist { - fn sample(&self, g: &mut R) -> configs::ContractsConfig { + fn sample(&self, rng: &mut R) -> configs::ContractsConfig { configs::ContractsConfig { - governance_addr: g.gen(), - verifier_addr: g.gen(), - default_upgrade_addr: g.gen(), - diamond_proxy_addr: g.gen(), - validator_timelock_addr: g.gen(), - l1_erc20_bridge_proxy_addr: g.gen(), - l2_erc20_bridge_addr: g.gen(), - l1_shared_bridge_proxy_addr: g.gen(), - l2_shared_bridge_addr: g.gen(), - l1_weth_bridge_proxy_addr: g.gen(), - l2_weth_bridge_addr: g.gen(), - l2_testnet_paymaster_addr: g.gen(), - l1_multicall3_addr: g.gen(), - base_token_addr: g.gen(), - ecosystem_contracts: self.sample(g), + governance_addr: rng.gen(), + verifier_addr: rng.gen(), + default_upgrade_addr: rng.gen(), + diamond_proxy_addr: rng.gen(), + validator_timelock_addr: rng.gen(), + l1_erc20_bridge_proxy_addr: rng.gen(), + l2_erc20_bridge_addr: rng.gen(), + l1_shared_bridge_proxy_addr: rng.gen(), + l2_shared_bridge_addr: rng.gen(), + l1_weth_bridge_proxy_addr: rng.gen(), + l2_weth_bridge_addr: rng.gen(), + l2_testnet_paymaster_addr: rng.gen(), + l1_multicall3_addr: rng.gen(), + base_token_addr: rng.gen(), + chain_admin_addr: rng.gen(), + ecosystem_contracts: self.sample(rng), } } } @@ -280,6 +286,9 @@ impl Distribution for EncodeDist { configs::ExperimentalDBConfig { state_keeper_db_block_cache_capacity_mb: self.sample(rng), state_keeper_db_max_open_files: self.sample(rng), + protective_reads_persistence_enabled: self.sample(rng), + processing_delay_ms: self.sample(rng), + include_indices_and_filters_in_block_cache: self.sample(rng), } } } @@ -343,9 +352,10 @@ impl Distribution for EncodeDist { impl Distribution for EncodeDist { fn sample(&self, rng: &mut R) -> configs::eth_sender::PubdataSendingMode { type T = configs::eth_sender::PubdataSendingMode; - match rng.gen_range(0..2) { + match rng.gen_range(0..3) { 0 => T::Calldata, - _ => T::Blobs, + 1 => T::Blobs, + _ => T::Custom, } } } @@ -428,6 +438,16 @@ impl Distribution for EncodeDist { } } +impl Distribution for EncodeDist { + fn sample(&self, rng: &mut R) -> configs::fri_prover::CloudType { + type T = configs::fri_prover::CloudType; + match rng.gen_range(0..1) { + 0 => T::GCP, + _ => T::Local, + } + } +} + impl Distribution for EncodeDist { fn sample(&self, rng: &mut R) -> configs::FriProverConfig { configs::FriProverConfig { @@ -444,6 +464,7 @@ impl Distribution for EncodeDist { availability_check_interval_in_secs: self.sample(rng), prover_object_store: self.sample(rng), public_object_store: self.sample(rng), + cloud_type: self.sample(rng), } } } @@ -551,6 +572,7 @@ impl Distribution for EncodeDist { max_attempts: self.sample(rng), last_l1_batch_to_process: self.sample(rng), shall_save_to_public_bucket: self.sample(rng), + prometheus_listener_port: self.sample(rng), } } } @@ -634,6 +656,8 @@ impl Distribution for EncodeDist { impl Distribution for EncodeDist { fn sample(&self, rng: &mut R) -> configs::SnapshotsCreatorConfig { configs::SnapshotsCreatorConfig { + l1_batch_number: self.sample_opt(|| L1BatchNumber(rng.gen())), + version: if rng.gen() { 0 } else { 1 }, storage_logs_chunk_size: self.sample(rng), concurrent_queries_count: self.sample(rng), object_store: self.sample(rng), @@ -672,11 +696,11 @@ impl Distribution for EncodeDist { .unwrap(), patch: VersionPatch(rng.gen()), }), - genesis_root_hash: rng.gen(), - rollup_last_leaf_index: self.sample(rng), - genesis_commitment: rng.gen(), - bootloader_hash: rng.gen(), - default_aa_hash: rng.gen(), + genesis_root_hash: Some(rng.gen()), + rollup_last_leaf_index: Some(self.sample(rng)), + genesis_commitment: Some(rng.gen()), + bootloader_hash: Some(rng.gen()), + default_aa_hash: Some(rng.gen()), fee_account: rng.gen(), l1_chain_id: L1ChainId(self.sample(rng)), l2_chain_id: L2ChainId::default(), @@ -713,6 +737,16 @@ impl Distribution for EncodeDist { } } +impl Distribution for EncodeDist { + fn sample(&self, rng: &mut R) -> configs::consensus::WeightedAttester { + use configs::consensus::{AttesterPublicKey, WeightedAttester}; + WeightedAttester { + key: AttesterPublicKey(self.sample(rng)), + weight: self.sample(rng), + } + } +} + impl Distribution for EncodeDist { fn sample(&self, rng: &mut R) -> configs::consensus::GenesisSpec { use configs::consensus::{GenesisSpec, ProtocolVersion, ValidatorPublicKey}; @@ -720,6 +754,7 @@ impl Distribution for EncodeDist { chain_id: L2ChainId::default(), protocol_version: ProtocolVersion(self.sample(rng)), validators: self.sample_collect(rng), + attesters: self.sample_collect(rng), leader: ValidatorPublicKey(self.sample(rng)), } } @@ -732,6 +767,7 @@ impl Distribution for EncodeDist { server_addr: self.sample(rng), public_addr: Host(self.sample(rng)), max_payload_size: self.sample(rng), + max_batch_size: self.sample(rng), gossip_dynamic_inbound_limit: self.sample(rng), gossip_static_inbound: self .sample_range(rng) @@ -742,15 +778,27 @@ impl Distribution for EncodeDist { .map(|_| (NodePublicKey(self.sample(rng)), Host(self.sample(rng)))) .collect(), genesis_spec: self.sample(rng), + rpc: self.sample(rng), + } + } +} + +impl Distribution for EncodeDist { + fn sample(&self, rng: &mut R) -> configs::consensus::RpcConfig { + configs::consensus::RpcConfig { + get_block_rate: self.sample(rng), } } } impl Distribution for EncodeDist { fn sample(&self, rng: &mut R) -> configs::consensus::ConsensusSecrets { - use configs::consensus::{ConsensusSecrets, NodeSecretKey, ValidatorSecretKey}; + use configs::consensus::{ + AttesterSecretKey, ConsensusSecrets, NodeSecretKey, ValidatorSecretKey, + }; ConsensusSecrets { validator_key: self.sample_opt(|| ValidatorSecretKey(String::into(self.sample(rng)))), + attester_key: self.sample_opt(|| AttesterSecretKey(String::into(self.sample(rng)))), node_key: self.sample_opt(|| NodeSecretKey(String::into(self.sample(rng)))), } } @@ -786,3 +834,217 @@ impl Distribution for EncodeDist { } } } + +impl Distribution for EncodeDist { + fn sample(&self, rng: &mut R) -> configs::wallets::Wallet { + configs::wallets::Wallet::new(K256PrivateKey::from_bytes(rng.gen()).unwrap()) + } +} + +impl Distribution for EncodeDist { + fn sample(&self, rng: &mut R) -> configs::wallets::AddressWallet { + configs::wallets::AddressWallet::from_address(rng.gen()) + } +} + +impl Distribution for EncodeDist { + fn sample(&self, rng: &mut R) -> configs::wallets::StateKeeper { + configs::wallets::StateKeeper { + fee_account: self.sample(rng), + } + } +} + +impl Distribution for EncodeDist { + fn sample(&self, rng: &mut R) -> configs::wallets::EthSender { + configs::wallets::EthSender { + operator: self.sample(rng), + blob_operator: self.sample_opt(|| self.sample(rng)), + } + } +} + +impl Distribution for EncodeDist { + fn sample(&self, rng: &mut R) -> configs::wallets::Wallets { + configs::wallets::Wallets { + state_keeper: self.sample_opt(|| self.sample(rng)), + eth_sender: self.sample_opt(|| self.sample(rng)), + } + } +} + +impl Distribution for EncodeDist { + fn sample(&self, rng: &mut R) -> configs::en_config::ENConfig { + configs::en_config::ENConfig { + l2_chain_id: L2ChainId::default(), + l1_chain_id: L1ChainId(rng.gen()), + main_node_url: format!("localhost:{}", rng.gen::()).parse().unwrap(), + l1_batch_commit_data_generator_mode: match rng.gen_range(0..2) { + 0 => L1BatchCommitmentMode::Rollup, + _ => L1BatchCommitmentMode::Validium, + }, + main_node_rate_limit_rps: self.sample_opt(|| rng.gen()), + } + } +} + +impl Distribution for EncodeDist { + fn sample(&self, rng: &mut R) -> configs::da_dispatcher::DADispatcherConfig { + configs::da_dispatcher::DADispatcherConfig { + polling_interval_ms: self.sample(rng), + max_rows_to_dispatch: self.sample(rng), + max_retries: self.sample(rng), + } + } +} + +impl Distribution for EncodeDist { + fn sample( + &self, + rng: &mut R, + ) -> configs::vm_runner::ProtectiveReadsWriterConfig { + configs::vm_runner::ProtectiveReadsWriterConfig { + db_path: self.sample(rng), + window_size: self.sample(rng), + first_processed_batch: L1BatchNumber(rng.gen()), + } + } +} + +impl Distribution for EncodeDist { + fn sample( + &self, + rng: &mut R, + ) -> configs::vm_runner::BasicWitnessInputProducerConfig { + configs::vm_runner::BasicWitnessInputProducerConfig { + db_path: self.sample(rng), + window_size: self.sample(rng), + first_processed_batch: L1BatchNumber(rng.gen()), + } + } +} + +impl Distribution for EncodeDist { + fn sample(&self, rng: &mut R) -> configs::CommitmentGeneratorConfig { + configs::CommitmentGeneratorConfig { + max_parallelism: self.sample(rng), + } + } +} + +impl Distribution for EncodeDist { + fn sample( + &self, + rng: &mut R, + ) -> configs::snapshot_recovery::TreeRecoveryConfig { + configs::snapshot_recovery::TreeRecoveryConfig { + chunk_size: self.sample(rng), + parallel_persistence_buffer: self.sample_opt(|| rng.gen()), + } + } +} + +impl Distribution for EncodeDist { + fn sample( + &self, + rng: &mut R, + ) -> configs::snapshot_recovery::PostgresRecoveryConfig { + configs::snapshot_recovery::PostgresRecoveryConfig { + max_concurrency: self.sample_opt(|| rng.gen()), + } + } +} + +impl Distribution for EncodeDist { + fn sample( + &self, + rng: &mut R, + ) -> configs::snapshot_recovery::SnapshotRecoveryConfig { + use configs::snapshot_recovery::{SnapshotRecoveryConfig, TreeRecoveryConfig}; + let tree: TreeRecoveryConfig = self.sample(rng); + SnapshotRecoveryConfig { + enabled: self.sample(rng), + l1_batch: self.sample_opt(|| L1BatchNumber(rng.gen())), + drop_storage_key_preimages: (tree != TreeRecoveryConfig::default()) && self.sample(rng), + tree, + postgres: self.sample(rng), + object_store: self.sample(rng), + } + } +} + +impl Distribution for EncodeDist { + fn sample(&self, rng: &mut R) -> configs::pruning::PruningConfig { + configs::pruning::PruningConfig { + enabled: self.sample(rng), + chunk_size: self.sample(rng), + removal_delay_sec: self.sample_opt(|| rng.gen()), + data_retention_sec: self.sample(rng), + } + } +} + +impl Distribution for EncodeDist { + fn sample( + &self, + rng: &mut R, + ) -> configs::base_token_adjuster::BaseTokenAdjusterConfig { + configs::base_token_adjuster::BaseTokenAdjusterConfig { + price_polling_interval_ms: self.sample(rng), + price_cache_update_interval_ms: self.sample(rng), + } + } +} + +impl Distribution for EncodeDist { + fn sample( + &self, + rng: &mut R, + ) -> configs::external_price_api_client::ExternalPriceApiClientConfig { + configs::external_price_api_client::ExternalPriceApiClientConfig { + source: self.sample(rng), + base_url: self.sample(rng), + api_key: self.sample(rng), + client_timeout_ms: self.sample(rng), + forced_numerator: self.sample(rng), + forced_denominator: self.sample(rng), + } + } +} + +impl Distribution for EncodeDist { + fn sample(&self, rng: &mut R) -> configs::GeneralConfig { + configs::GeneralConfig { + postgres_config: self.sample(rng), + api_config: self.sample(rng), + contract_verifier: self.sample(rng), + circuit_breaker_config: self.sample(rng), + mempool_config: self.sample(rng), + operations_manager_config: self.sample(rng), + state_keeper_config: self.sample(rng), + house_keeper_config: self.sample(rng), + proof_compressor_config: self.sample(rng), + prover_config: self.sample(rng), + prover_gateway: self.sample(rng), + witness_vector_generator: self.sample(rng), + prover_group_config: self.sample(rng), + witness_generator: self.sample(rng), + prometheus_config: self.sample(rng), + proof_data_handler_config: self.sample(rng), + db_config: self.sample(rng), + eth: self.sample(rng), + snapshot_creator: self.sample(rng), + observability: self.sample(rng), + da_dispatcher_config: self.sample(rng), + protective_reads_writer_config: self.sample(rng), + basic_witness_input_producer_config: self.sample(rng), + commitment_generator: self.sample(rng), + snapshot_recovery: self.sample(rng), + pruning: self.sample(rng), + core_object_store: self.sample(rng), + base_token_adjuster: self.sample(rng), + external_price_api_client_config: self.sample(rng), + consensus_config: self.sample(rng), + } + } +} diff --git a/core/lib/constants/Cargo.toml b/core/lib/constants/Cargo.toml index 622ac46c3152..b741b5734902 100644 --- a/core/lib/constants/Cargo.toml +++ b/core/lib/constants/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "zksync_system_constants" -version = "0.1.0" +description = "ZKsync system constants" +version.workspace = true edition.workspace = true authors.workspace = true homepage.workspace = true diff --git a/core/lib/contract_verifier/Cargo.toml b/core/lib/contract_verifier/Cargo.toml index ea84024cba98..2803e3bb4185 100644 --- a/core/lib/contract_verifier/Cargo.toml +++ b/core/lib/contract_verifier/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "zksync_contract_verifier_lib" -version = "0.1.0" +description = "ZKsync contract verification utilities" +version.workspace = true edition.workspace = true authors.workspace = true homepage.workspace = true diff --git a/core/lib/contracts/Cargo.toml b/core/lib/contracts/Cargo.toml index eedf60b262ac..2b80295cf440 100644 --- a/core/lib/contracts/Cargo.toml +++ b/core/lib/contracts/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "zksync_contracts" -version = "0.1.0" +description = "Definitions of main ZKsync smart contracts" +version.workspace = true edition.workspace = true authors.workspace = true homepage.workspace = true diff --git a/core/lib/contracts/src/lib.rs b/core/lib/contracts/src/lib.rs index 3374631a1814..bd7fa80b716c 100644 --- a/core/lib/contracts/src/lib.rs +++ b/core/lib/contracts/src/lib.rs @@ -39,14 +39,15 @@ const STATE_TRANSITION_CONTRACT_FILE: (&str, &str) = ( "IStateTransitionManager.sol/IStateTransitionManager.json", ); const ZKSYNC_HYPERCHAIN_CONTRACT_FILE: (&str, &str) = ( - "state-transition/", - "chain-interfaces/IZkSyncHyperchain.sol/IZkSyncHyperchain.json", + "state-transition/chain-interfaces", + "IZkSyncHyperchain.sol/IZkSyncHyperchain.json", ); const DIAMOND_INIT_CONTRACT_FILE: (&str, &str) = ( "state-transition", "chain-interfaces/IDiamondInit.sol/IDiamondInit.json", ); const GOVERNANCE_CONTRACT_FILE: (&str, &str) = ("governance", "IGovernance.sol/IGovernance.json"); +const CHAIN_ADMIN_CONTRACT_FILE: (&str, &str) = ("governance", "IChainAdmin.sol/IChainAdmin.json"); const MULTICALL3_CONTRACT_FILE: (&str, &str) = ("dev-contracts", "Multicall3.sol/Multicall3.json"); const VERIFIER_CONTRACT_FILE: (&str, &str) = ("state-transition", "Verifier.sol/Verifier.json"); const _IERC20_CONTRACT_FILE: &str = @@ -128,6 +129,10 @@ pub fn governance_contract() -> Contract { load_contract_for_both_compilers(GOVERNANCE_CONTRACT_FILE) } +pub fn chain_admin_contract() -> Contract { + load_contract_for_both_compilers(CHAIN_ADMIN_CONTRACT_FILE) +} + pub fn state_transition_manager_contract() -> Contract { load_contract_for_both_compilers(STATE_TRANSITION_CONTRACT_FILE) } @@ -804,3 +809,60 @@ pub static ADMIN_UPGRADE_CHAIN_FROM_VERSION_FUNCTION: Lazy = Lazy::new }"#; serde_json::from_str(abi).unwrap() }); + +pub static DIAMOND_CUT: Lazy = Lazy::new(|| { + let abi = r#" + { + "inputs": [ + { + "components": [ + { + "components": [ + { + "internalType": "address", + "name": "facet", + "type": "address" + }, + { + "internalType": "enum Diamond.Action", + "name": "action", + "type": "uint8" + }, + { + "internalType": "bool", + "name": "isFreezable", + "type": "bool" + }, + { + "internalType": "bytes4[]", + "name": "selectors", + "type": "bytes4[]" + } + ], + "internalType": "struct Diamond.FacetCut[]", + "name": "facetCuts", + "type": "tuple[]" + }, + { + "internalType": "address", + "name": "initAddress", + "type": "address" + }, + { + "internalType": "bytes", + "name": "initCalldata", + "type": "bytes" + } + ], + "internalType": "struct Diamond.DiamondCutData", + "name": "_diamondCut", + "type": "tuple" + } + ], + "name": "diamondCut", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }"#; + serde_json::from_str(abi).unwrap() +}); diff --git a/core/lib/crypto/Cargo.toml b/core/lib/crypto/Cargo.toml deleted file mode 100644 index 5c81bd6b9d84..000000000000 --- a/core/lib/crypto/Cargo.toml +++ /dev/null @@ -1,23 +0,0 @@ -[package] -name = "zksync_crypto" -version = "0.1.0" -edition.workspace = true -authors.workspace = true -homepage.workspace = true -repository.workspace = true -license.workspace = true -keywords.workspace = true -categories.workspace = true -readme = "README.md" - -[dependencies] -zksync_basic_types.workspace = true -serde.workspace = true -thiserror.workspace = true -once_cell.workspace = true -hex.workspace = true -sha2.workspace = true -blake2.workspace = true - -[dev-dependencies] -serde_json.workspace = true diff --git a/core/lib/crypto/README.md b/core/lib/crypto/README.md deleted file mode 100644 index 38b5a306a9bd..000000000000 --- a/core/lib/crypto/README.md +++ /dev/null @@ -1,10 +0,0 @@ -# ZKsync crypto. Essential cryptography primitives for the ZKsync network - -`zksync_crypto` is a crate containing essential ZKsync cryptographic primitives, such as private keys and hashers. - -## License - -`zksync_crypto` is a part of ZKsync stack, which is distributed under the terms of both the MIT license and the Apache -License (Version 2.0). - -See [LICENSE-APACHE](../../../LICENSE-APACHE), [LICENSE-MIT](../../../LICENSE-MIT) for details. diff --git a/core/lib/crypto/src/lib.rs b/core/lib/crypto/src/lib.rs deleted file mode 100644 index f437e48ef7b3..000000000000 --- a/core/lib/crypto/src/lib.rs +++ /dev/null @@ -1 +0,0 @@ -pub mod hasher; diff --git a/core/lib/crypto_primitives/Cargo.toml b/core/lib/crypto_primitives/Cargo.toml index 1664c4c95bb5..7efe5279b598 100644 --- a/core/lib/crypto_primitives/Cargo.toml +++ b/core/lib/crypto_primitives/Cargo.toml @@ -1,5 +1,6 @@ [package] name = "zksync_crypto_primitives" +description = "ZKsync core cryptographic primitives" version.workspace = true edition.workspace = true authors.workspace = true @@ -12,6 +13,8 @@ categories.workspace = true [dependencies] secp256k1 = { workspace = true, features = ["global-context"] } +sha2.workspace = true +blake2.workspace = true zksync_utils.workspace = true zksync_basic_types.workspace = true thiserror.workspace = true diff --git a/core/lib/crypto_primitives/src/ecdsa_signature.rs b/core/lib/crypto_primitives/src/ecdsa_signature.rs index 026e42307dc6..a994e0f3c134 100644 --- a/core/lib/crypto_primitives/src/ecdsa_signature.rs +++ b/core/lib/crypto_primitives/src/ecdsa_signature.rs @@ -43,7 +43,7 @@ type Public = H512; /// /// Provides a safe to use `Debug` implementation (outputting the address corresponding to the key). /// The key is zeroized on drop. -#[derive(Clone)] +#[derive(Clone, PartialEq)] pub struct K256PrivateKey(SecretKey); impl fmt::Debug for K256PrivateKey { diff --git a/core/lib/crypto/src/hasher/blake2.rs b/core/lib/crypto_primitives/src/hasher/blake2.rs similarity index 100% rename from core/lib/crypto/src/hasher/blake2.rs rename to core/lib/crypto_primitives/src/hasher/blake2.rs diff --git a/core/lib/crypto/src/hasher/keccak.rs b/core/lib/crypto_primitives/src/hasher/keccak.rs similarity index 100% rename from core/lib/crypto/src/hasher/keccak.rs rename to core/lib/crypto_primitives/src/hasher/keccak.rs diff --git a/core/lib/crypto/src/hasher/mod.rs b/core/lib/crypto_primitives/src/hasher/mod.rs similarity index 100% rename from core/lib/crypto/src/hasher/mod.rs rename to core/lib/crypto_primitives/src/hasher/mod.rs diff --git a/core/lib/crypto/src/hasher/sha256.rs b/core/lib/crypto_primitives/src/hasher/sha256.rs similarity index 100% rename from core/lib/crypto/src/hasher/sha256.rs rename to core/lib/crypto_primitives/src/hasher/sha256.rs diff --git a/core/lib/crypto_primitives/src/lib.rs b/core/lib/crypto_primitives/src/lib.rs index db669b98c1b1..154706d40791 100644 --- a/core/lib/crypto_primitives/src/lib.rs +++ b/core/lib/crypto_primitives/src/lib.rs @@ -2,4 +2,5 @@ pub use self::{ecdsa_signature::K256PrivateKey, eip712_signature::*, packed_eth_ pub(crate) mod ecdsa_signature; pub mod eip712_signature; +pub mod hasher; pub mod packed_eth_signature; diff --git a/prover/config/Cargo.toml b/core/lib/da_client/Cargo.toml similarity index 58% rename from prover/config/Cargo.toml rename to core/lib/da_client/Cargo.toml index ef5612d81e8a..589a077d4bf9 100644 --- a/prover/config/Cargo.toml +++ b/core/lib/da_client/Cargo.toml @@ -1,5 +1,6 @@ [package] -name = "zksync_prover_config" +name = "zksync_da_client" +description = "ZKsync DA client definition" version.workspace = true edition.workspace = true authors.workspace = true @@ -10,8 +11,10 @@ keywords.workspace = true categories.workspace = true [dependencies] -zksync_config.workspace = true -zksync_env_config.workspace = true -zksync_core_leftovers.workspace = true -zksync_protobuf_config.workspace = true +serde = { workspace = true, features = ["derive"] } +tracing.workspace = true +async-trait.workspace = true anyhow.workspace = true + +zksync_config.workspace = true +zksync_types.workspace = true diff --git a/core/lib/da_client/README.md b/core/lib/da_client/README.md new file mode 100644 index 000000000000..9c890498467d --- /dev/null +++ b/core/lib/da_client/README.md @@ -0,0 +1,16 @@ +# Data Availability Client + +This crate contains a trait that has to be implemented by all the DA clients. + +## Overview + +This trait assumes that every implementation follows these logical assumptions: + +- The DA client is only serving as a connector between the ZK chain's sequencer and the DA layer. +- The DA client is not supposed to be a standalone application, but rather a library that is used by the + `da_dispatcher`. +- The logic of the retries is implemented in the `da_dispatcher`, not in the DA clients. +- The `dispatch_blob` is supposed to be idempotent, and work correctly even if called multiple times with the same + params. +- The `get_inclusion_data` has to return the data only when the state roots are relayed to the L1 verification contract + (if the DA solution has one). diff --git a/core/lib/da_client/src/lib.rs b/core/lib/da_client/src/lib.rs new file mode 100644 index 000000000000..7e4a2643a259 --- /dev/null +++ b/core/lib/da_client/src/lib.rs @@ -0,0 +1,32 @@ +pub mod types; + +use std::fmt; + +use async_trait::async_trait; +use types::{DAError, DispatchResponse, InclusionData}; + +/// Trait that defines the interface for the data availability layer clients. +#[async_trait] +pub trait DataAvailabilityClient: Sync + Send + fmt::Debug { + /// Dispatches a blob to the data availability layer. + async fn dispatch_blob( + &self, + batch_number: u32, + data: Vec, + ) -> Result; + + /// Fetches the inclusion data for a given blob_id. + async fn get_inclusion_data(&self, blob_id: &str) -> Result, DAError>; + + /// Clones the client and wraps it in a Box. + fn clone_boxed(&self) -> Box; + + /// Returns the maximum size of the blob (in bytes) that can be dispatched. None means no limit. + fn blob_size_limit(&self) -> Option; +} + +impl Clone for Box { + fn clone(&self) -> Box { + self.clone_boxed() + } +} diff --git a/core/lib/da_client/src/types.rs b/core/lib/da_client/src/types.rs new file mode 100644 index 000000000000..e339111bb51a --- /dev/null +++ b/core/lib/da_client/src/types.rs @@ -0,0 +1,44 @@ +use std::{error, fmt::Display}; + +use serde::Serialize; + +/// `DAError` is the error type returned by the DA clients. +#[derive(Debug)] +pub struct DAError { + pub error: anyhow::Error, + pub is_transient: bool, +} + +impl DAError { + pub fn is_transient(&self) -> bool { + self.is_transient + } +} + +impl Display for DAError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let kind = if self.is_transient { + "transient" + } else { + "fatal" + }; + write!(f, "{kind} data availability client error: {}", self.error) + } +} + +impl error::Error for DAError {} + +/// `DispatchResponse` is the response received from the DA layer after dispatching a blob. +#[derive(Default)] +pub struct DispatchResponse { + /// The blob_id is needed to fetch the inclusion data. + pub blob_id: String, +} + +/// `InclusionData` is the data needed to verify on L1 that a blob is included in the DA layer. +#[derive(Default, Serialize)] +pub struct InclusionData { + /// The inclusion data serialized by the DA client. Serialization is done in a way that allows + /// the deserialization of the data in Solidity contracts. + pub data: Vec, +} diff --git a/core/lib/dal/.sqlx/.query-05be1a2c5cefcb1a58af2e5113e89003638d26219c13bc176e8cfee696d4e9f6.json.nPBbNl b/core/lib/dal/.sqlx/.query-05be1a2c5cefcb1a58af2e5113e89003638d26219c13bc176e8cfee696d4e9f6.json.nPBbNl deleted file mode 100644 index 69a1077452dd..000000000000 --- a/core/lib/dal/.sqlx/.query-05be1a2c5cefcb1a58af2e5113e89003638d26219c13bc176e8cfee696d4e9f6.json.nPBbNl +++ /dev/null @@ -1,119 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n transactions.hash AS tx_hash,\n transactions.index_in_block AS index_in_block,\n miniblocks.number AS block_number,\n transactions.nonce AS nonce,\n transactions.signature AS signature,\n transactions.initiator_address AS initiator_address,\n transactions.tx_format AS tx_format,\n transactions.value AS value,\n transactions.gas_limit AS gas_limit,\n transactions.max_fee_per_gas AS max_fee_per_gas,\n transactions.max_priority_fee_per_gas AS max_priority_fee_per_gas,\n transactions.effective_gas_price AS effective_gas_price,\n transactions.l1_batch_number AS l1_batch_number,\n transactions.l1_batch_tx_index AS l1_batch_tx_index,\n transactions.data->'contractAddress' AS \"execute_contract_address\",\n transactions.data->'calldata' AS \"calldata\",\n miniblocks.hash AS \"block_hash\"\n FROM transactions\n LEFT JOIN miniblocks ON miniblocks.number = transactions.miniblock_number\n WHERE\n miniblocks.number = $1 AND transactions.index_in_block = $2 AND transactions.data != '{}'::jsonb", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "tx_hash", - "type_info": "Bytea" - }, - { - "ordinal": 1, - "name": "index_in_block", - "type_info": "Int4" - }, - { - "ordinal": 2, - "name": "block_number", - "type_info": "Int8" - }, - { - "ordinal": 3, - "name": "nonce", - "type_info": "Int8" - }, - { - "ordinal": 4, - "name": "signature", - "type_info": "Bytea" - }, - { - "ordinal": 5, - "name": "initiator_address", - "type_info": "Bytea" - }, - { - "ordinal": 6, - "name": "tx_format", - "type_info": "Int4" - }, - { - "ordinal": 7, - "name": "value", - "type_info": "Numeric" - }, - { - "ordinal": 8, - "name": "gas_limit", - "type_info": "Numeric" - }, - { - "ordinal": 9, - "name": "max_fee_per_gas", - "type_info": "Numeric" - }, - { - "ordinal": 10, - "name": "max_priority_fee_per_gas", - "type_info": "Numeric" - }, - { - "ordinal": 11, - "name": "effective_gas_price", - "type_info": "Numeric" - }, - { - "ordinal": 12, - "name": "l1_batch_number", - "type_info": "Int8" - }, - { - "ordinal": 13, - "name": "l1_batch_tx_index", - "type_info": "Int4" - }, - { - "ordinal": 14, - "name": "execute_contract_address", - "type_info": "Jsonb" - }, - { - "ordinal": 15, - "name": "calldata", - "type_info": "Jsonb" - }, - { - "ordinal": 16, - "name": "block_hash", - "type_info": "Bytea" - } - ], - "parameters": { - "Left": [ - "Int8", - "Int4" - ] - }, - "nullable": [ - false, - true, - false, - true, - true, - false, - true, - false, - true, - true, - true, - true, - true, - true, - null, - null, - false - ] - }, - "hash": "05be1a2c5cefcb1a58af2e5113e89003638d26219c13bc176e8cfee696d4e9f6" -} diff --git a/core/lib/dal/.sqlx/query-00c0389f4cde049078885cdf05bdb7dbe0bb23c4fc87a78be2d01b77da2ecbd3.json b/core/lib/dal/.sqlx/query-00c0389f4cde049078885cdf05bdb7dbe0bb23c4fc87a78be2d01b77da2ecbd3.json new file mode 100644 index 000000000000..d83713192cb4 --- /dev/null +++ b/core/lib/dal/.sqlx/query-00c0389f4cde049078885cdf05bdb7dbe0bb23c4fc87a78be2d01b77da2ecbd3.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "\n WITH\n available_batches AS (\n SELECT\n MAX(number) AS \"last_batch\"\n FROM\n l1_batches\n ),\n processed_batches AS (\n SELECT\n COALESCE(MAX(l1_batch_number), $1) + $2 AS \"last_ready_batch\"\n FROM\n vm_runner_protective_reads\n WHERE\n time_taken IS NOT NULL\n )\n SELECT\n LEAST(last_batch, last_ready_batch) AS \"last_ready_batch!\"\n FROM\n available_batches\n FULL JOIN processed_batches ON TRUE\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "last_ready_batch!", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Int8", + "Int8" + ] + }, + "nullable": [ + true + ] + }, + "hash": "00c0389f4cde049078885cdf05bdb7dbe0bb23c4fc87a78be2d01b77da2ecbd3" +} diff --git a/core/lib/dal/.sqlx/query-0159271d31701963d0f2951c8ca21b20d55684a39d32005baebaba8e98045ab7.json b/core/lib/dal/.sqlx/query-0159271d31701963d0f2951c8ca21b20d55684a39d32005baebaba8e98045ab7.json deleted file mode 100644 index 694ac4183cf1..000000000000 --- a/core/lib/dal/.sqlx/query-0159271d31701963d0f2951c8ca21b20d55684a39d32005baebaba8e98045ab7.json +++ /dev/null @@ -1,100 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n l1_tx_count,\n l2_tx_count,\n timestamp,\n l2_to_l1_logs,\n l2_to_l1_messages,\n bloom,\n priority_ops_onchain_data,\n used_contract_hashes,\n bootloader_code_hash,\n default_aa_code_hash,\n protocol_version,\n system_logs,\n pubdata_input\n FROM\n l1_batches\n WHERE\n eth_commit_tx_id = $1\n OR eth_prove_tx_id = $1\n OR eth_execute_tx_id = $1\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "number", - "type_info": "Int8" - }, - { - "ordinal": 1, - "name": "l1_tx_count", - "type_info": "Int4" - }, - { - "ordinal": 2, - "name": "l2_tx_count", - "type_info": "Int4" - }, - { - "ordinal": 3, - "name": "timestamp", - "type_info": "Int8" - }, - { - "ordinal": 4, - "name": "l2_to_l1_logs", - "type_info": "ByteaArray" - }, - { - "ordinal": 5, - "name": "l2_to_l1_messages", - "type_info": "ByteaArray" - }, - { - "ordinal": 6, - "name": "bloom", - "type_info": "Bytea" - }, - { - "ordinal": 7, - "name": "priority_ops_onchain_data", - "type_info": "ByteaArray" - }, - { - "ordinal": 8, - "name": "used_contract_hashes", - "type_info": "Jsonb" - }, - { - "ordinal": 9, - "name": "bootloader_code_hash", - "type_info": "Bytea" - }, - { - "ordinal": 10, - "name": "default_aa_code_hash", - "type_info": "Bytea" - }, - { - "ordinal": 11, - "name": "protocol_version", - "type_info": "Int4" - }, - { - "ordinal": 12, - "name": "system_logs", - "type_info": "ByteaArray" - }, - { - "ordinal": 13, - "name": "pubdata_input", - "type_info": "Bytea" - } - ], - "parameters": { - "Left": [ - "Int4" - ] - }, - "nullable": [ - false, - false, - false, - false, - false, - false, - false, - false, - false, - true, - true, - true, - false, - true - ] - }, - "hash": "0159271d31701963d0f2951c8ca21b20d55684a39d32005baebaba8e98045ab7" -} diff --git a/core/lib/dal/.sqlx/query-01cc22f19b61145b0dbed96ec84ae1cee8eeca1c74529adee78157dcf8930c44.json b/core/lib/dal/.sqlx/query-01cc22f19b61145b0dbed96ec84ae1cee8eeca1c74529adee78157dcf8930c44.json new file mode 100644 index 000000000000..d8af3cae95b9 --- /dev/null +++ b/core/lib/dal/.sqlx/query-01cc22f19b61145b0dbed96ec84ae1cee8eeca1c74529adee78157dcf8930c44.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT COALESCE(\n (\n SELECT MAX(number) FROM miniblocks\n WHERE l1_batch_number = (\n SELECT number FROM l1_batches\n JOIN eth_txs ON\n l1_batches.eth_commit_tx_id = eth_txs.id\n WHERE\n eth_txs.confirmed_eth_tx_history_id IS NOT NULL\n ORDER BY number DESC LIMIT 1\n )\n ),\n 0\n ) AS number\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "number", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + null + ] + }, + "hash": "01cc22f19b61145b0dbed96ec84ae1cee8eeca1c74529adee78157dcf8930c44" +} diff --git a/core/lib/dal/.sqlx/query-0385576f1fb3836fc04a6cde3e92c03e1de8292eb0ea1e026ba1b32a3745c261.json b/core/lib/dal/.sqlx/query-0385576f1fb3836fc04a6cde3e92c03e1de8292eb0ea1e026ba1b32a3745c261.json new file mode 100644 index 000000000000..a98cbb18034a --- /dev/null +++ b/core/lib/dal/.sqlx/query-0385576f1fb3836fc04a6cde3e92c03e1de8292eb0ea1e026ba1b32a3745c261.json @@ -0,0 +1,49 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n storage_logs.hashed_key AS \"hashed_key!\",\n storage_logs.value AS \"value!\",\n storage_logs.miniblock_number AS \"miniblock_number!\",\n initial_writes.l1_batch_number AS \"l1_batch_number!\",\n initial_writes.index\n FROM\n (\n SELECT\n hashed_key,\n MAX(ARRAY[miniblock_number, operation_number]::INT[]) AS op\n FROM\n storage_logs\n WHERE\n miniblock_number <= $1\n AND hashed_key >= $3\n AND hashed_key <= $4\n GROUP BY\n hashed_key\n ORDER BY\n hashed_key\n ) AS keys\n INNER JOIN storage_logs ON keys.hashed_key = storage_logs.hashed_key\n AND storage_logs.miniblock_number = keys.op[1]\n AND storage_logs.operation_number = keys.op[2]\n INNER JOIN initial_writes ON keys.hashed_key = initial_writes.hashed_key\n WHERE\n initial_writes.l1_batch_number <= $2\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "hashed_key!", + "type_info": "Bytea" + }, + { + "ordinal": 1, + "name": "value!", + "type_info": "Bytea" + }, + { + "ordinal": 2, + "name": "miniblock_number!", + "type_info": "Int8" + }, + { + "ordinal": 3, + "name": "l1_batch_number!", + "type_info": "Int8" + }, + { + "ordinal": 4, + "name": "index", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Int8", + "Int8", + "Bytea", + "Bytea" + ] + }, + "nullable": [ + false, + false, + false, + false, + false + ] + }, + "hash": "0385576f1fb3836fc04a6cde3e92c03e1de8292eb0ea1e026ba1b32a3745c261" +} diff --git a/core/lib/dal/.sqlx/query-35e31e789379f3e36b36b5f824955eb58a0cc4e868ac01a12fae52f7be6b739d.json b/core/lib/dal/.sqlx/query-05b0050aa9d2944542abbcef31af3fe8d35800340d1c6e9d02c15226b699c93b.json similarity index 75% rename from core/lib/dal/.sqlx/query-35e31e789379f3e36b36b5f824955eb58a0cc4e868ac01a12fae52f7be6b739d.json rename to core/lib/dal/.sqlx/query-05b0050aa9d2944542abbcef31af3fe8d35800340d1c6e9d02c15226b699c93b.json index 178eba274fdc..b577e7535eb0 100644 --- a/core/lib/dal/.sqlx/query-35e31e789379f3e36b36b5f824955eb58a0cc4e868ac01a12fae52f7be6b739d.json +++ b/core/lib/dal/.sqlx/query-05b0050aa9d2944542abbcef31af3fe8d35800340d1c6e9d02c15226b699c93b.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_logs,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n system_logs,\n compressed_state_diffs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n number = $1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n eth_commit_tx_id IS NOT NULL\n AND eth_prove_tx_id IS NULL\n ORDER BY\n number\n LIMIT\n $1\n ", "describe": { "columns": [ { @@ -45,74 +45,74 @@ }, { "ordinal": 8, - "name": "l2_to_l1_logs", - "type_info": "ByteaArray" - }, - { - "ordinal": 9, "name": "l2_to_l1_messages", "type_info": "ByteaArray" }, { - "ordinal": 10, + "ordinal": 9, "name": "used_contract_hashes", "type_info": "Jsonb" }, { - "ordinal": 11, + "ordinal": 10, "name": "compressed_initial_writes", "type_info": "Bytea" }, { - "ordinal": 12, + "ordinal": 11, "name": "compressed_repeated_writes", "type_info": "Bytea" }, { - "ordinal": 13, + "ordinal": 12, "name": "l2_l1_merkle_root", "type_info": "Bytea" }, { - "ordinal": 14, + "ordinal": 13, "name": "rollup_last_leaf_index", "type_info": "Int8" }, { - "ordinal": 15, + "ordinal": 14, "name": "zkporter_is_available", "type_info": "Bool" }, { - "ordinal": 16, + "ordinal": 15, "name": "bootloader_code_hash", "type_info": "Bytea" }, { - "ordinal": 17, + "ordinal": 16, "name": "default_aa_code_hash", "type_info": "Bytea" }, { - "ordinal": 18, + "ordinal": 17, "name": "aux_data_hash", "type_info": "Bytea" }, { - "ordinal": 19, + "ordinal": 18, "name": "pass_through_data_hash", "type_info": "Bytea" }, { - "ordinal": 20, + "ordinal": 19, "name": "meta_parameters_hash", "type_info": "Bytea" }, { - "ordinal": 21, + "ordinal": 20, "name": "protocol_version", "type_info": "Int4" }, + { + "ordinal": 21, + "name": "compressed_state_diffs", + "type_info": "Bytea" + }, { "ordinal": 22, "name": "system_logs", @@ -120,21 +120,16 @@ }, { "ordinal": 23, - "name": "compressed_state_diffs", - "type_info": "Bytea" - }, - { - "ordinal": 24, "name": "events_queue_commitment", "type_info": "Bytea" }, { - "ordinal": 25, + "ordinal": 24, "name": "bootloader_initial_content_commitment", "type_info": "Bytea" }, { - "ordinal": 26, + "ordinal": 25, "name": "pubdata_input", "type_info": "Bytea" } @@ -155,7 +150,6 @@ true, false, false, - false, true, true, true, @@ -167,12 +161,12 @@ true, true, true, - false, true, + false, true, true, true ] }, - "hash": "35e31e789379f3e36b36b5f824955eb58a0cc4e868ac01a12fae52f7be6b739d" + "hash": "05b0050aa9d2944542abbcef31af3fe8d35800340d1c6e9d02c15226b699c93b" } diff --git a/core/lib/dal/.sqlx/query-05c2a77d9f65d435e2df63a300850e42abbaf365a1b041d0e7a809796ef0fe63.json b/core/lib/dal/.sqlx/query-05c2a77d9f65d435e2df63a300850e42abbaf365a1b041d0e7a809796ef0fe63.json new file mode 100644 index 000000000000..f3c85b9b43dc --- /dev/null +++ b/core/lib/dal/.sqlx/query-05c2a77d9f65d435e2df63a300850e42abbaf365a1b041d0e7a809796ef0fe63.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE proof_generation_details\n SET\n status = 'picked_by_prover',\n updated_at = NOW(),\n prover_taken_at = NOW()\n WHERE\n l1_batch_number = (\n SELECT\n l1_batch_number\n FROM\n proof_generation_details\n LEFT JOIN l1_batches ON l1_batch_number = l1_batches.number\n WHERE\n (\n vm_run_data_blob_url IS NOT NULL\n AND proof_gen_data_blob_url IS NOT NULL\n AND l1_batches.hash IS NOT NULL\n AND l1_batches.aux_data_hash IS NOT NULL\n AND l1_batches.meta_parameters_hash IS NOT NULL\n AND status = 'unpicked'\n )\n OR (\n status = 'picked_by_prover'\n AND prover_taken_at < NOW() - $1::INTERVAL\n )\n ORDER BY\n l1_batch_number ASC\n LIMIT\n 1\n )\n RETURNING\n proof_generation_details.l1_batch_number\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "l1_batch_number", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Interval" + ] + }, + "nullable": [ + false + ] + }, + "hash": "05c2a77d9f65d435e2df63a300850e42abbaf365a1b041d0e7a809796ef0fe63" +} diff --git a/core/lib/dal/.sqlx/query-08e59ed8e2fd1a74e19d8bf0d131e4ee6682a89fb86f3b715a240805d44e6d87.json b/core/lib/dal/.sqlx/query-08e59ed8e2fd1a74e19d8bf0d131e4ee6682a89fb86f3b715a240805d44e6d87.json deleted file mode 100644 index 0c3ca92c10c5..000000000000 --- a/core/lib/dal/.sqlx/query-08e59ed8e2fd1a74e19d8bf0d131e4ee6682a89fb86f3b715a240805d44e6d87.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n proof_generation_details (l1_batch_number, status, proof_gen_data_blob_url, created_at, updated_at)\n VALUES\n ($1, 'ready_to_be_proven', $2, NOW(), NOW())\n ON CONFLICT (l1_batch_number) DO NOTHING\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8", - "Text" - ] - }, - "nullable": [] - }, - "hash": "08e59ed8e2fd1a74e19d8bf0d131e4ee6682a89fb86f3b715a240805d44e6d87" -} diff --git a/core/lib/dal/.sqlx/query-0b5d5efeac95d429cf6a5be22153897edf8c868094ad029e2e8fcf286d44fd55.json b/core/lib/dal/.sqlx/query-0b5d5efeac95d429cf6a5be22153897edf8c868094ad029e2e8fcf286d44fd55.json new file mode 100644 index 000000000000..822a6967f6db --- /dev/null +++ b/core/lib/dal/.sqlx/query-0b5d5efeac95d429cf6a5be22153897edf8c868094ad029e2e8fcf286d44fd55.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n data_availability (l1_batch_number, blob_id, sent_at, created_at, updated_at)\n VALUES\n ($1, $2, $3, NOW(), NOW())\n ON CONFLICT DO NOTHING\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Text", + "Timestamp" + ] + }, + "nullable": [] + }, + "hash": "0b5d5efeac95d429cf6a5be22153897edf8c868094ad029e2e8fcf286d44fd55" +} diff --git a/core/lib/dal/.sqlx/query-0ccfbde0df7c74b489bae4799177b9a22283340a8c9fb4c28d2d76de921ca77b.json b/core/lib/dal/.sqlx/query-0ccfbde0df7c74b489bae4799177b9a22283340a8c9fb4c28d2d76de921ca77b.json new file mode 100644 index 000000000000..f4bd9fdfb765 --- /dev/null +++ b/core/lib/dal/.sqlx/query-0ccfbde0df7c74b489bae4799177b9a22283340a8c9fb4c28d2d76de921ca77b.json @@ -0,0 +1,38 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n l1_batch_number,\n blob_id,\n inclusion_data,\n sent_at\n FROM\n data_availability\n WHERE\n inclusion_data IS NULL\n ORDER BY\n l1_batch_number\n LIMIT\n 1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "l1_batch_number", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "blob_id", + "type_info": "Text" + }, + { + "ordinal": 2, + "name": "inclusion_data", + "type_info": "Bytea" + }, + { + "ordinal": 3, + "name": "sent_at", + "type_info": "Timestamp" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false, + false, + true, + false + ] + }, + "hash": "0ccfbde0df7c74b489bae4799177b9a22283340a8c9fb4c28d2d76de921ca77b" +} diff --git a/core/lib/dal/.sqlx/query-0fc8ede1d0962938d606c6352335afce09869d43eb88ec7fdb526ce8491e35d9.json b/core/lib/dal/.sqlx/query-0fc8ede1d0962938d606c6352335afce09869d43eb88ec7fdb526ce8491e35d9.json new file mode 100644 index 000000000000..1ad92abac368 --- /dev/null +++ b/core/lib/dal/.sqlx/query-0fc8ede1d0962938d606c6352335afce09869d43eb88ec7fdb526ce8491e35d9.json @@ -0,0 +1,56 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n *\n FROM\n base_token_ratios\n ORDER BY\n ratio_timestamp DESC\n LIMIT\n 1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Int4" + }, + { + "ordinal": 1, + "name": "created_at", + "type_info": "Timestamp" + }, + { + "ordinal": 2, + "name": "updated_at", + "type_info": "Timestamp" + }, + { + "ordinal": 3, + "name": "ratio_timestamp", + "type_info": "Timestamp" + }, + { + "ordinal": 4, + "name": "numerator", + "type_info": "Numeric" + }, + { + "ordinal": 5, + "name": "denominator", + "type_info": "Numeric" + }, + { + "ordinal": 6, + "name": "used_in_l1", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false, + false, + false, + false, + false, + false, + false + ] + }, + "hash": "0fc8ede1d0962938d606c6352335afce09869d43eb88ec7fdb526ce8491e35d9" +} diff --git a/core/lib/dal/.sqlx/query-11af69fc254e54449b64c086667700a95e4c37a7a18531b3cdf120394cb055b9.json b/core/lib/dal/.sqlx/query-11af69fc254e54449b64c086667700a95e4c37a7a18531b3cdf120394cb055b9.json deleted file mode 100644 index ed211d7dc9d8..000000000000 --- a/core/lib/dal/.sqlx/query-11af69fc254e54449b64c086667700a95e4c37a7a18531b3cdf120394cb055b9.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE proof_generation_details\n SET\n status = 'picked_by_prover',\n updated_at = NOW(),\n prover_taken_at = NOW()\n WHERE\n l1_batch_number = (\n SELECT\n l1_batch_number\n FROM\n proof_generation_details\n WHERE\n status = 'ready_to_be_proven'\n OR (\n status = 'picked_by_prover'\n AND prover_taken_at < NOW() - $1::INTERVAL\n )\n ORDER BY\n l1_batch_number ASC\n LIMIT\n 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING\n proof_generation_details.l1_batch_number\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "l1_batch_number", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [ - "Interval" - ] - }, - "nullable": [ - false - ] - }, - "hash": "11af69fc254e54449b64c086667700a95e4c37a7a18531b3cdf120394cb055b9" -} diff --git a/core/lib/dal/.sqlx/query-1bbfac481c402bcb3bb888b84146d922fa1fc9c202072fbc04cae1bbf97195aa.json b/core/lib/dal/.sqlx/query-1bbfac481c402bcb3bb888b84146d922fa1fc9c202072fbc04cae1bbf97195aa.json new file mode 100644 index 000000000000..f24a28ffdc28 --- /dev/null +++ b/core/lib/dal/.sqlx/query-1bbfac481c402bcb3bb888b84146d922fa1fc9c202072fbc04cae1bbf97195aa.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n vm_runner_bwip (l1_batch_number, created_at, updated_at, processing_started_at)\n VALUES\n ($1, NOW(), NOW(), NOW())\n ON CONFLICT (l1_batch_number) DO\n UPDATE\n SET\n updated_at = NOW(),\n processing_started_at = NOW()\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [] + }, + "hash": "1bbfac481c402bcb3bb888b84146d922fa1fc9c202072fbc04cae1bbf97195aa" +} diff --git a/core/lib/dal/.sqlx/query-1df2ddeea407a09acdabb35d3e0bfd5b1d36459ae4b720fd3ec9047e89f645ec.json b/core/lib/dal/.sqlx/query-1df2ddeea407a09acdabb35d3e0bfd5b1d36459ae4b720fd3ec9047e89f645ec.json new file mode 100644 index 000000000000..316400f97401 --- /dev/null +++ b/core/lib/dal/.sqlx/query-1df2ddeea407a09acdabb35d3e0bfd5b1d36459ae4b720fd3ec9047e89f645ec.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n MAX(l1_batch_number) AS \"last_processed_l1_batch\"\n FROM\n vm_runner_protective_reads\n WHERE\n time_taken IS NOT NULL\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "last_processed_l1_batch", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + null + ] + }, + "hash": "1df2ddeea407a09acdabb35d3e0bfd5b1d36459ae4b720fd3ec9047e89f645ec" +} diff --git a/core/lib/dal/.sqlx/query-b259e6bacd98fa68003e0c87bb28cc77bd2dcee4a04d1afc9779714854623a79.json b/core/lib/dal/.sqlx/query-1e3c88b41bc02cb6a116fa930ae85b8b60165ed757ae1044e83fdc877d95cbd8.json similarity index 71% rename from core/lib/dal/.sqlx/query-b259e6bacd98fa68003e0c87bb28cc77bd2dcee4a04d1afc9779714854623a79.json rename to core/lib/dal/.sqlx/query-1e3c88b41bc02cb6a116fa930ae85b8b60165ed757ae1044e83fdc877d95cbd8.json index 90c940c3977c..206d2f91e3b1 100644 --- a/core/lib/dal/.sqlx/query-b259e6bacd98fa68003e0c87bb28cc77bd2dcee4a04d1afc9779714854623a79.json +++ b/core/lib/dal/.sqlx/query-1e3c88b41bc02cb6a116fa930ae85b8b60165ed757ae1044e83fdc877d95cbd8.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n miniblock_number,\n log_index_in_miniblock,\n log_index_in_tx,\n tx_hash,\n NULL::bytea AS \"block_hash\",\n NULL::BIGINT AS \"l1_batch_number?\",\n shard_id,\n is_service,\n tx_index_in_miniblock,\n tx_index_in_l1_batch,\n sender,\n key,\n value\n FROM\n l2_to_l1_logs\n WHERE\n tx_hash = ANY ($1)\n ORDER BY\n tx_index_in_l1_batch ASC,\n log_index_in_tx ASC\n ", + "query": "\n SELECT\n miniblock_number,\n log_index_in_miniblock,\n log_index_in_tx,\n tx_hash,\n NULL::BIGINT AS \"l1_batch_number?\",\n shard_id,\n is_service,\n tx_index_in_miniblock,\n tx_index_in_l1_batch,\n sender,\n key,\n value\n FROM\n l2_to_l1_logs\n WHERE\n tx_hash = ANY ($1)\n ORDER BY\n tx_index_in_l1_batch ASC,\n log_index_in_tx ASC\n ", "describe": { "columns": [ { @@ -25,46 +25,41 @@ }, { "ordinal": 4, - "name": "block_hash", - "type_info": "Bytea" - }, - { - "ordinal": 5, "name": "l1_batch_number?", "type_info": "Int8" }, { - "ordinal": 6, + "ordinal": 5, "name": "shard_id", "type_info": "Int4" }, { - "ordinal": 7, + "ordinal": 6, "name": "is_service", "type_info": "Bool" }, { - "ordinal": 8, + "ordinal": 7, "name": "tx_index_in_miniblock", "type_info": "Int4" }, { - "ordinal": 9, + "ordinal": 8, "name": "tx_index_in_l1_batch", "type_info": "Int4" }, { - "ordinal": 10, + "ordinal": 9, "name": "sender", "type_info": "Bytea" }, { - "ordinal": 11, + "ordinal": 10, "name": "key", "type_info": "Bytea" }, { - "ordinal": 12, + "ordinal": 11, "name": "value", "type_info": "Bytea" } @@ -80,7 +75,6 @@ false, false, null, - null, false, false, false, @@ -90,5 +84,5 @@ false ] }, - "hash": "b259e6bacd98fa68003e0c87bb28cc77bd2dcee4a04d1afc9779714854623a79" + "hash": "1e3c88b41bc02cb6a116fa930ae85b8b60165ed757ae1044e83fdc877d95cbd8" } diff --git a/core/lib/dal/.sqlx/query-21cfb584e3731852e96da1968503208a30b0eead764527ff957ea6e86a34eec6.json b/core/lib/dal/.sqlx/query-21cfb584e3731852e96da1968503208a30b0eead764527ff957ea6e86a34eec6.json index 6d78d4ebd2f0..541af15fa271 100644 --- a/core/lib/dal/.sqlx/query-21cfb584e3731852e96da1968503208a30b0eead764527ff957ea6e86a34eec6.json +++ b/core/lib/dal/.sqlx/query-21cfb584e3731852e96da1968503208a30b0eead764527ff957ea6e86a34eec6.json @@ -39,8 +39,8 @@ }, "nullable": [ false, - false, - false, + true, + true, false, false, false diff --git a/core/lib/dal/.sqlx/query-b65bb931f00b53cc9ef41b58c1532945b091fece95bb85f954230c26ba78540a.json b/core/lib/dal/.sqlx/query-2486f8404e8cfcb9c178acd6dccae32e8812becbe5ce85e63694385f015f2cfe.json similarity index 72% rename from core/lib/dal/.sqlx/query-b65bb931f00b53cc9ef41b58c1532945b091fece95bb85f954230c26ba78540a.json rename to core/lib/dal/.sqlx/query-2486f8404e8cfcb9c178acd6dccae32e8812becbe5ce85e63694385f015f2cfe.json index ef1d2075170a..f28e3d044ccc 100644 --- a/core/lib/dal/.sqlx/query-b65bb931f00b53cc9ef41b58c1532945b091fece95bb85f954230c26ba78540a.json +++ b/core/lib/dal/.sqlx/query-2486f8404e8cfcb9c178acd6dccae32e8812becbe5ce85e63694385f015f2cfe.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_logs,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n number BETWEEN $1 AND $2\n ORDER BY\n number\n LIMIT\n $3\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n number BETWEEN $1 AND $2\n ORDER BY\n number\n LIMIT\n $3\n ", "describe": { "columns": [ { @@ -45,96 +45,91 @@ }, { "ordinal": 8, - "name": "l2_to_l1_logs", - "type_info": "ByteaArray" - }, - { - "ordinal": 9, "name": "l2_to_l1_messages", "type_info": "ByteaArray" }, { - "ordinal": 10, + "ordinal": 9, "name": "used_contract_hashes", "type_info": "Jsonb" }, { - "ordinal": 11, + "ordinal": 10, "name": "compressed_initial_writes", "type_info": "Bytea" }, { - "ordinal": 12, + "ordinal": 11, "name": "compressed_repeated_writes", "type_info": "Bytea" }, { - "ordinal": 13, + "ordinal": 12, "name": "l2_l1_merkle_root", "type_info": "Bytea" }, { - "ordinal": 14, + "ordinal": 13, "name": "rollup_last_leaf_index", "type_info": "Int8" }, { - "ordinal": 15, + "ordinal": 14, "name": "zkporter_is_available", "type_info": "Bool" }, { - "ordinal": 16, + "ordinal": 15, "name": "bootloader_code_hash", "type_info": "Bytea" }, { - "ordinal": 17, + "ordinal": 16, "name": "default_aa_code_hash", "type_info": "Bytea" }, { - "ordinal": 18, + "ordinal": 17, "name": "aux_data_hash", "type_info": "Bytea" }, { - "ordinal": 19, + "ordinal": 18, "name": "pass_through_data_hash", "type_info": "Bytea" }, { - "ordinal": 20, + "ordinal": 19, "name": "meta_parameters_hash", "type_info": "Bytea" }, { - "ordinal": 21, + "ordinal": 20, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 22, + "ordinal": 21, "name": "compressed_state_diffs", "type_info": "Bytea" }, { - "ordinal": 23, + "ordinal": 22, "name": "system_logs", "type_info": "ByteaArray" }, { - "ordinal": 24, + "ordinal": 23, "name": "events_queue_commitment", "type_info": "Bytea" }, { - "ordinal": 25, + "ordinal": 24, "name": "bootloader_initial_content_commitment", "type_info": "Bytea" }, { - "ordinal": 26, + "ordinal": 25, "name": "pubdata_input", "type_info": "Bytea" } @@ -157,7 +152,6 @@ true, false, false, - false, true, true, true, @@ -176,5 +170,5 @@ true ] }, - "hash": "b65bb931f00b53cc9ef41b58c1532945b091fece95bb85f954230c26ba78540a" + "hash": "2486f8404e8cfcb9c178acd6dccae32e8812becbe5ce85e63694385f015f2cfe" } diff --git a/core/lib/dal/.sqlx/query-37fee554801733f26904e23f6f84b79b6afddd869783dda827e2281640529492.json b/core/lib/dal/.sqlx/query-38dea171e4c49f54bf1db5ac9bfb3be9cf3928755be5f5fcfcdc086e73fb15e2.json similarity index 69% rename from core/lib/dal/.sqlx/query-37fee554801733f26904e23f6f84b79b6afddd869783dda827e2281640529492.json rename to core/lib/dal/.sqlx/query-38dea171e4c49f54bf1db5ac9bfb3be9cf3928755be5f5fcfcdc086e73fb15e2.json index b3f0bb2d8ab6..7ac6785d8e64 100644 --- a/core/lib/dal/.sqlx/query-37fee554801733f26904e23f6f84b79b6afddd869783dda827e2281640529492.json +++ b/core/lib/dal/.sqlx/query-38dea171e4c49f54bf1db5ac9bfb3be9cf3928755be5f5fcfcdc086e73fb15e2.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_logs,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n eth_prove_tx_id IS NOT NULL\n AND eth_execute_tx_id IS NULL\n ORDER BY\n number\n LIMIT\n $1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n eth_prove_tx_id IS NOT NULL\n AND eth_execute_tx_id IS NULL\n ORDER BY\n number\n LIMIT\n $1\n ", "describe": { "columns": [ { @@ -45,96 +45,91 @@ }, { "ordinal": 8, - "name": "l2_to_l1_logs", - "type_info": "ByteaArray" - }, - { - "ordinal": 9, "name": "l2_to_l1_messages", "type_info": "ByteaArray" }, { - "ordinal": 10, + "ordinal": 9, "name": "used_contract_hashes", "type_info": "Jsonb" }, { - "ordinal": 11, + "ordinal": 10, "name": "compressed_initial_writes", "type_info": "Bytea" }, { - "ordinal": 12, + "ordinal": 11, "name": "compressed_repeated_writes", "type_info": "Bytea" }, { - "ordinal": 13, + "ordinal": 12, "name": "l2_l1_merkle_root", "type_info": "Bytea" }, { - "ordinal": 14, + "ordinal": 13, "name": "rollup_last_leaf_index", "type_info": "Int8" }, { - "ordinal": 15, + "ordinal": 14, "name": "zkporter_is_available", "type_info": "Bool" }, { - "ordinal": 16, + "ordinal": 15, "name": "bootloader_code_hash", "type_info": "Bytea" }, { - "ordinal": 17, + "ordinal": 16, "name": "default_aa_code_hash", "type_info": "Bytea" }, { - "ordinal": 18, + "ordinal": 17, "name": "aux_data_hash", "type_info": "Bytea" }, { - "ordinal": 19, + "ordinal": 18, "name": "pass_through_data_hash", "type_info": "Bytea" }, { - "ordinal": 20, + "ordinal": 19, "name": "meta_parameters_hash", "type_info": "Bytea" }, { - "ordinal": 21, + "ordinal": 20, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 22, + "ordinal": 21, "name": "compressed_state_diffs", "type_info": "Bytea" }, { - "ordinal": 23, + "ordinal": 22, "name": "system_logs", "type_info": "ByteaArray" }, { - "ordinal": 24, + "ordinal": 23, "name": "events_queue_commitment", "type_info": "Bytea" }, { - "ordinal": 25, + "ordinal": 24, "name": "bootloader_initial_content_commitment", "type_info": "Bytea" }, { - "ordinal": 26, + "ordinal": 25, "name": "pubdata_input", "type_info": "Bytea" } @@ -155,7 +150,6 @@ true, false, false, - false, true, true, true, @@ -174,5 +168,5 @@ true ] }, - "hash": "37fee554801733f26904e23f6f84b79b6afddd869783dda827e2281640529492" + "hash": "38dea171e4c49f54bf1db5ac9bfb3be9cf3928755be5f5fcfcdc086e73fb15e2" } diff --git a/core/lib/dal/.sqlx/query-3e3ddd6578e37d38cc03fa1df0b7617de769aac94aeabc55d58b906ca3698bc8.json b/core/lib/dal/.sqlx/query-3e3ddd6578e37d38cc03fa1df0b7617de769aac94aeabc55d58b906ca3698bc8.json deleted file mode 100644 index fb1478c1a627..000000000000 --- a/core/lib/dal/.sqlx/query-3e3ddd6578e37d38cc03fa1df0b7617de769aac94aeabc55d58b906ca3698bc8.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n l1_batches (\n number,\n l1_tx_count,\n l2_tx_count,\n timestamp,\n l2_to_l1_logs,\n l2_to_l1_messages,\n bloom,\n priority_ops_onchain_data,\n predicted_commit_gas_cost,\n predicted_prove_gas_cost,\n predicted_execute_gas_cost,\n initial_bootloader_heap_content,\n used_contract_hashes,\n bootloader_code_hash,\n default_aa_code_hash,\n protocol_version,\n system_logs,\n storage_refunds,\n pubdata_costs,\n pubdata_input,\n predicted_circuits_by_type,\n created_at,\n updated_at\n )\n VALUES\n (\n $1,\n $2,\n $3,\n $4,\n $5,\n $6,\n $7,\n $8,\n $9,\n $10,\n $11,\n $12,\n $13,\n $14,\n $15,\n $16,\n $17,\n $18,\n $19,\n $20,\n $21,\n NOW(),\n NOW()\n )\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8", - "Int4", - "Int4", - "Int8", - "ByteaArray", - "ByteaArray", - "Bytea", - "ByteaArray", - "Int8", - "Int8", - "Int8", - "Jsonb", - "Jsonb", - "Bytea", - "Bytea", - "Int4", - "ByteaArray", - "Int8Array", - "Int8Array", - "Bytea", - "Jsonb" - ] - }, - "nullable": [] - }, - "hash": "3e3ddd6578e37d38cc03fa1df0b7617de769aac94aeabc55d58b906ca3698bc8" -} diff --git a/core/lib/dal/.sqlx/query-3ecd408294c93a5ee7dbbe128c52c62033a7f690353f01b2978ef9b30d52c94e.json b/core/lib/dal/.sqlx/query-3ecd408294c93a5ee7dbbe128c52c62033a7f690353f01b2978ef9b30d52c94e.json new file mode 100644 index 000000000000..a64b8e06628f --- /dev/null +++ b/core/lib/dal/.sqlx/query-3ecd408294c93a5ee7dbbe128c52c62033a7f690353f01b2978ef9b30d52c94e.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n inclusion_data\n FROM\n data_availability\n WHERE\n l1_batch_number = $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "inclusion_data", + "type_info": "Bytea" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + true + ] + }, + "hash": "3ecd408294c93a5ee7dbbe128c52c62033a7f690353f01b2978ef9b30d52c94e" +} diff --git a/core/lib/dal/.sqlx/query-3f0966f082e9e7cdfa18c107a1283b7955a058705093d7372726c3fc7ce506ad.json b/core/lib/dal/.sqlx/query-3f0966f082e9e7cdfa18c107a1283b7955a058705093d7372726c3fc7ce506ad.json new file mode 100644 index 000000000000..7b95614bfdff --- /dev/null +++ b/core/lib/dal/.sqlx/query-3f0966f082e9e7cdfa18c107a1283b7955a058705093d7372726c3fc7ce506ad.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE vm_runner_protective_reads\n SET\n time_taken = NOW() - processing_started_at\n WHERE\n l1_batch_number = $1\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [] + }, + "hash": "3f0966f082e9e7cdfa18c107a1283b7955a058705093d7372726c3fc7ce506ad" +} diff --git a/core/lib/dal/.sqlx/query-64972039a9d9335332a0763eb1547489b5c6a3e2ff36d3b836ac24e1db9fd7d7.json b/core/lib/dal/.sqlx/query-454e16ddb5e85285d0c4b9013bcce5d464ecc55c80b54bc16040226df7e297bd.json similarity index 72% rename from core/lib/dal/.sqlx/query-64972039a9d9335332a0763eb1547489b5c6a3e2ff36d3b836ac24e1db9fd7d7.json rename to core/lib/dal/.sqlx/query-454e16ddb5e85285d0c4b9013bcce5d464ecc55c80b54bc16040226df7e297bd.json index c164bcab2c39..4a73fde57e29 100644 --- a/core/lib/dal/.sqlx/query-64972039a9d9335332a0763eb1547489b5c6a3e2ff36d3b836ac24e1db9fd7d7.json +++ b/core/lib/dal/.sqlx/query-454e16ddb5e85285d0c4b9013bcce5d464ecc55c80b54bc16040226df7e297bd.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n l1_tx_count,\n l2_tx_count,\n timestamp,\n l2_to_l1_logs,\n l2_to_l1_messages,\n bloom,\n priority_ops_onchain_data,\n used_contract_hashes,\n bootloader_code_hash,\n default_aa_code_hash,\n protocol_version,\n system_logs,\n pubdata_input\n FROM\n l1_batches\n WHERE\n number = $1\n ", + "query": "\n SELECT\n number,\n l1_tx_count,\n l2_tx_count,\n timestamp,\n l2_to_l1_messages,\n bloom,\n priority_ops_onchain_data,\n used_contract_hashes,\n bootloader_code_hash,\n default_aa_code_hash,\n protocol_version,\n system_logs,\n pubdata_input\n FROM\n l1_batches\n WHERE\n number = $1\n ", "describe": { "columns": [ { @@ -25,51 +25,46 @@ }, { "ordinal": 4, - "name": "l2_to_l1_logs", - "type_info": "ByteaArray" - }, - { - "ordinal": 5, "name": "l2_to_l1_messages", "type_info": "ByteaArray" }, { - "ordinal": 6, + "ordinal": 5, "name": "bloom", "type_info": "Bytea" }, { - "ordinal": 7, + "ordinal": 6, "name": "priority_ops_onchain_data", "type_info": "ByteaArray" }, { - "ordinal": 8, + "ordinal": 7, "name": "used_contract_hashes", "type_info": "Jsonb" }, { - "ordinal": 9, + "ordinal": 8, "name": "bootloader_code_hash", "type_info": "Bytea" }, { - "ordinal": 10, + "ordinal": 9, "name": "default_aa_code_hash", "type_info": "Bytea" }, { - "ordinal": 11, + "ordinal": 10, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 12, + "ordinal": 11, "name": "system_logs", "type_info": "ByteaArray" }, { - "ordinal": 13, + "ordinal": 12, "name": "pubdata_input", "type_info": "Bytea" } @@ -88,7 +83,6 @@ false, false, false, - false, true, true, true, @@ -96,5 +90,5 @@ true ] }, - "hash": "64972039a9d9335332a0763eb1547489b5c6a3e2ff36d3b836ac24e1db9fd7d7" + "hash": "454e16ddb5e85285d0c4b9013bcce5d464ecc55c80b54bc16040226df7e297bd" } diff --git a/core/lib/dal/.sqlx/query-5659480e5d79dab3399e35539b240e7eb9f598999c28015a504605f88bf84b33.json b/core/lib/dal/.sqlx/query-4570e9ffd0b2973d0bc2986c391d0a59076dda4aa572ade2492f37e537fdf6ed.json similarity index 71% rename from core/lib/dal/.sqlx/query-5659480e5d79dab3399e35539b240e7eb9f598999c28015a504605f88bf84b33.json rename to core/lib/dal/.sqlx/query-4570e9ffd0b2973d0bc2986c391d0a59076dda4aa572ade2492f37e537fdf6ed.json index 5948d75785b2..7297bcdcad23 100644 --- a/core/lib/dal/.sqlx/query-5659480e5d79dab3399e35539b240e7eb9f598999c28015a504605f88bf84b33.json +++ b/core/lib/dal/.sqlx/query-4570e9ffd0b2973d0bc2986c391d0a59076dda4aa572ade2492f37e537fdf6ed.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n *\n FROM\n eth_txs\n WHERE\n id > (\n SELECT\n COALESCE(MAX(eth_tx_id), 0)\n FROM\n eth_txs_history\n )\n ORDER BY\n id\n LIMIT\n $1\n ", + "query": "\n SELECT\n *\n FROM\n eth_txs\n WHERE\n from_addr IS NOT DISTINCT FROM $2 -- can't just use equality as NULL != NULL\n AND id > (\n SELECT\n COALESCE(MAX(eth_tx_id), 0)\n FROM\n eth_txs_history\n JOIN eth_txs ON eth_txs.id = eth_txs_history.eth_tx_id\n WHERE\n eth_txs_history.sent_at_block IS NOT NULL\n AND eth_txs.from_addr IS NOT DISTINCT FROM $2\n )\n ORDER BY\n id\n LIMIT\n $1\n ", "describe": { "columns": [ { @@ -76,7 +76,8 @@ ], "parameters": { "Left": [ - "Int8" + "Int8", + "Bytea" ] }, "nullable": [ @@ -96,5 +97,5 @@ true ] }, - "hash": "5659480e5d79dab3399e35539b240e7eb9f598999c28015a504605f88bf84b33" + "hash": "4570e9ffd0b2973d0bc2986c391d0a59076dda4aa572ade2492f37e537fdf6ed" } diff --git a/core/lib/dal/.sqlx/query-5137159db7d3ff456e368e6246b07554ce738a2d7005472e7e76a64a8fbd57ad.json b/core/lib/dal/.sqlx/query-5137159db7d3ff456e368e6246b07554ce738a2d7005472e7e76a64a8fbd57ad.json new file mode 100644 index 000000000000..07ef0aba074d --- /dev/null +++ b/core/lib/dal/.sqlx/query-5137159db7d3ff456e368e6246b07554ce738a2d7005472e7e76a64a8fbd57ad.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n proof_generation_details (l1_batch_number, status, created_at, updated_at)\n VALUES\n ($1, 'unpicked', NOW(), NOW())\n ON CONFLICT (l1_batch_number) DO NOTHING\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [] + }, + "hash": "5137159db7d3ff456e368e6246b07554ce738a2d7005472e7e76a64a8fbd57ad" +} diff --git a/core/lib/dal/.sqlx/query-3ba9bc85e3e286aadef8aad27eb38fc90b18155e3435f58d9888fa50d92042f7.json b/core/lib/dal/.sqlx/query-526a8e1c231e99faadd5dbbe9c49ed811faffcc108d04b59fdec5a0ab9d13fa3.json similarity index 80% rename from core/lib/dal/.sqlx/query-3ba9bc85e3e286aadef8aad27eb38fc90b18155e3435f58d9888fa50d92042f7.json rename to core/lib/dal/.sqlx/query-526a8e1c231e99faadd5dbbe9c49ed811faffcc108d04b59fdec5a0ab9d13fa3.json index 221e04e0c717..dbdec4ac5d65 100644 --- a/core/lib/dal/.sqlx/query-3ba9bc85e3e286aadef8aad27eb38fc90b18155e3435f58d9888fa50d92042f7.json +++ b/core/lib/dal/.sqlx/query-526a8e1c231e99faadd5dbbe9c49ed811faffcc108d04b59fdec5a0ab9d13fa3.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n address,\n topic1,\n topic2,\n topic3,\n topic4,\n value,\n NULL::bytea AS \"block_hash\",\n NULL::BIGINT AS \"l1_batch_number?\",\n miniblock_number,\n tx_hash,\n tx_index_in_block,\n event_index_in_block,\n event_index_in_tx\n FROM\n events\n WHERE\n tx_hash = ANY ($1)\n ORDER BY\n miniblock_number ASC,\n event_index_in_block ASC\n ", + "query": "\n SELECT\n address,\n topic1,\n topic2,\n topic3,\n topic4,\n value,\n NULL::bytea AS \"block_hash\",\n NULL::BIGINT AS \"l1_batch_number?\",\n miniblock_number,\n tx_hash,\n tx_index_in_block,\n event_index_in_block,\n event_index_in_tx,\n NULL::BIGINT AS \"block_timestamp?\"\n FROM\n events\n WHERE\n tx_hash = ANY ($1)\n ORDER BY\n miniblock_number ASC,\n event_index_in_block ASC\n ", "describe": { "columns": [ { @@ -67,6 +67,11 @@ "ordinal": 12, "name": "event_index_in_tx", "type_info": "Int4" + }, + { + "ordinal": 13, + "name": "block_timestamp?", + "type_info": "Int8" } ], "parameters": { @@ -87,8 +92,9 @@ false, false, false, - false + false, + null ] }, - "hash": "3ba9bc85e3e286aadef8aad27eb38fc90b18155e3435f58d9888fa50d92042f7" + "hash": "526a8e1c231e99faadd5dbbe9c49ed811faffcc108d04b59fdec5a0ab9d13fa3" } diff --git a/core/lib/dal/.sqlx/query-4c6a564888598d203fc2302f5a54ab6b42342e96ac8093f12812ab9a65e1d3c5.json b/core/lib/dal/.sqlx/query-52bb6de515e1edf4dcf34a31600edb31cfd855014dfca5041833b9d5d9f7a55e.json similarity index 73% rename from core/lib/dal/.sqlx/query-4c6a564888598d203fc2302f5a54ab6b42342e96ac8093f12812ab9a65e1d3c5.json rename to core/lib/dal/.sqlx/query-52bb6de515e1edf4dcf34a31600edb31cfd855014dfca5041833b9d5d9f7a55e.json index 2bb2502ba5c9..b872e2ce6297 100644 --- a/core/lib/dal/.sqlx/query-4c6a564888598d203fc2302f5a54ab6b42342e96ac8093f12812ab9a65e1d3c5.json +++ b/core/lib/dal/.sqlx/query-52bb6de515e1edf4dcf34a31600edb31cfd855014dfca5041833b9d5d9f7a55e.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_logs,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n eth_commit_tx_id IS NOT NULL\n AND eth_prove_tx_id IS NULL\n ORDER BY\n number\n LIMIT\n $1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n system_logs,\n compressed_state_diffs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n number = $1\n ", "describe": { "columns": [ { @@ -45,74 +45,74 @@ }, { "ordinal": 8, - "name": "l2_to_l1_logs", - "type_info": "ByteaArray" - }, - { - "ordinal": 9, "name": "l2_to_l1_messages", "type_info": "ByteaArray" }, { - "ordinal": 10, + "ordinal": 9, "name": "used_contract_hashes", "type_info": "Jsonb" }, { - "ordinal": 11, + "ordinal": 10, "name": "compressed_initial_writes", "type_info": "Bytea" }, { - "ordinal": 12, + "ordinal": 11, "name": "compressed_repeated_writes", "type_info": "Bytea" }, { - "ordinal": 13, + "ordinal": 12, "name": "l2_l1_merkle_root", "type_info": "Bytea" }, { - "ordinal": 14, + "ordinal": 13, "name": "rollup_last_leaf_index", "type_info": "Int8" }, { - "ordinal": 15, + "ordinal": 14, "name": "zkporter_is_available", "type_info": "Bool" }, { - "ordinal": 16, + "ordinal": 15, "name": "bootloader_code_hash", "type_info": "Bytea" }, { - "ordinal": 17, + "ordinal": 16, "name": "default_aa_code_hash", "type_info": "Bytea" }, { - "ordinal": 18, + "ordinal": 17, "name": "aux_data_hash", "type_info": "Bytea" }, { - "ordinal": 19, + "ordinal": 18, "name": "pass_through_data_hash", "type_info": "Bytea" }, { - "ordinal": 20, + "ordinal": 19, "name": "meta_parameters_hash", "type_info": "Bytea" }, { - "ordinal": 21, + "ordinal": 20, "name": "protocol_version", "type_info": "Int4" }, + { + "ordinal": 21, + "name": "system_logs", + "type_info": "ByteaArray" + }, { "ordinal": 22, "name": "compressed_state_diffs", @@ -120,21 +120,16 @@ }, { "ordinal": 23, - "name": "system_logs", - "type_info": "ByteaArray" - }, - { - "ordinal": 24, "name": "events_queue_commitment", "type_info": "Bytea" }, { - "ordinal": 25, + "ordinal": 24, "name": "bootloader_initial_content_commitment", "type_info": "Bytea" }, { - "ordinal": 26, + "ordinal": 25, "name": "pubdata_input", "type_info": "Bytea" } @@ -155,8 +150,6 @@ true, false, false, - false, - true, true, true, true, @@ -171,8 +164,9 @@ false, true, true, + true, true ] }, - "hash": "4c6a564888598d203fc2302f5a54ab6b42342e96ac8093f12812ab9a65e1d3c5" + "hash": "52bb6de515e1edf4dcf34a31600edb31cfd855014dfca5041833b9d5d9f7a55e" } diff --git a/core/lib/dal/.sqlx/query-53ab91ac4daebeb7d9d38018f31a9a184779646a16537df5b7cc54d0b4175d24.json b/core/lib/dal/.sqlx/query-53ab91ac4daebeb7d9d38018f31a9a184779646a16537df5b7cc54d0b4175d24.json new file mode 100644 index 000000000000..9694b9c662c1 --- /dev/null +++ b/core/lib/dal/.sqlx/query-53ab91ac4daebeb7d9d38018f31a9a184779646a16537df5b7cc54d0b4175d24.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n transactions.execution_info\n FROM\n transactions\n WHERE\n transactions.hash = $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "execution_info", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "Bytea" + ] + }, + "nullable": [ + false + ] + }, + "hash": "53ab91ac4daebeb7d9d38018f31a9a184779646a16537df5b7cc54d0b4175d24" +} diff --git a/core/lib/dal/.sqlx/query-58f900812efdb615f6286eb4212c3252680594256dccb0d7e51444f613011980.json b/core/lib/dal/.sqlx/query-58f900812efdb615f6286eb4212c3252680594256dccb0d7e51444f613011980.json new file mode 100644 index 000000000000..0b45e2c25c2c --- /dev/null +++ b/core/lib/dal/.sqlx/query-58f900812efdb615f6286eb4212c3252680594256dccb0d7e51444f613011980.json @@ -0,0 +1,88 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n miniblock_number,\n log_index_in_miniblock,\n log_index_in_tx,\n tx_hash,\n l1_batch_number,\n shard_id,\n is_service,\n tx_index_in_miniblock,\n tx_index_in_l1_batch,\n sender,\n key,\n value\n FROM\n l2_to_l1_logs\n JOIN miniblocks ON l2_to_l1_logs.miniblock_number = miniblocks.number\n WHERE\n l1_batch_number = $1\n ORDER BY\n miniblock_number,\n log_index_in_miniblock\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "miniblock_number", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "log_index_in_miniblock", + "type_info": "Int4" + }, + { + "ordinal": 2, + "name": "log_index_in_tx", + "type_info": "Int4" + }, + { + "ordinal": 3, + "name": "tx_hash", + "type_info": "Bytea" + }, + { + "ordinal": 4, + "name": "l1_batch_number", + "type_info": "Int8" + }, + { + "ordinal": 5, + "name": "shard_id", + "type_info": "Int4" + }, + { + "ordinal": 6, + "name": "is_service", + "type_info": "Bool" + }, + { + "ordinal": 7, + "name": "tx_index_in_miniblock", + "type_info": "Int4" + }, + { + "ordinal": 8, + "name": "tx_index_in_l1_batch", + "type_info": "Int4" + }, + { + "ordinal": 9, + "name": "sender", + "type_info": "Bytea" + }, + { + "ordinal": 10, + "name": "key", + "type_info": "Bytea" + }, + { + "ordinal": 11, + "name": "value", + "type_info": "Bytea" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + false, + false, + false, + false, + true, + false, + false, + false, + false, + false, + false, + false + ] + }, + "hash": "58f900812efdb615f6286eb4212c3252680594256dccb0d7e51444f613011980" +} diff --git a/core/lib/dal/.sqlx/query-5c99342c4fbf36ccc8e9c9dafc76de37201091bfccd3caf922e766896c5a542b.json b/core/lib/dal/.sqlx/query-5c99342c4fbf36ccc8e9c9dafc76de37201091bfccd3caf922e766896c5a542b.json new file mode 100644 index 000000000000..5d09a9c37f7a --- /dev/null +++ b/core/lib/dal/.sqlx/query-5c99342c4fbf36ccc8e9c9dafc76de37201091bfccd3caf922e766896c5a542b.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE data_availability\n SET\n inclusion_data = $1,\n updated_at = NOW()\n WHERE\n l1_batch_number = $2\n AND inclusion_data IS NULL\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Bytea", + "Int8" + ] + }, + "nullable": [] + }, + "hash": "5c99342c4fbf36ccc8e9c9dafc76de37201091bfccd3caf922e766896c5a542b" +} diff --git a/core/lib/dal/.sqlx/query-71f82ea7c83cccab209e13ea388dbb6b0c35c756638588792ac9c1db221fef33.json b/core/lib/dal/.sqlx/query-63f95c6cdcfd933e2cf8f62c0d408f2dce89f7b700896fcc0f242e0e15ba058e.json similarity index 64% rename from core/lib/dal/.sqlx/query-71f82ea7c83cccab209e13ea388dbb6b0c35c756638588792ac9c1db221fef33.json rename to core/lib/dal/.sqlx/query-63f95c6cdcfd933e2cf8f62c0d408f2dce89f7b700896fcc0f242e0e15ba058e.json index afa7ac0e2111..cb68e7622524 100644 --- a/core/lib/dal/.sqlx/query-71f82ea7c83cccab209e13ea388dbb6b0c35c756638588792ac9c1db221fef33.json +++ b/core/lib/dal/.sqlx/query-63f95c6cdcfd933e2cf8f62c0d408f2dce89f7b700896fcc0f242e0e15ba058e.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_logs,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n AND events_queue_commitment IS NOT NULL\n AND bootloader_initial_content_commitment IS NOT NULL\n ORDER BY\n number\n LIMIT\n $4\n ", + "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN data_availability ON data_availability.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n AND events_queue_commitment IS NOT NULL\n AND bootloader_initial_content_commitment IS NOT NULL\n AND (\n data_availability.inclusion_data IS NOT NULL\n OR $4 IS FALSE\n )\n ORDER BY\n number\n LIMIT\n $5\n ", "describe": { "columns": [ { @@ -45,96 +45,91 @@ }, { "ordinal": 8, - "name": "l2_to_l1_logs", - "type_info": "ByteaArray" - }, - { - "ordinal": 9, "name": "l2_to_l1_messages", "type_info": "ByteaArray" }, { - "ordinal": 10, + "ordinal": 9, "name": "used_contract_hashes", "type_info": "Jsonb" }, { - "ordinal": 11, + "ordinal": 10, "name": "compressed_initial_writes", "type_info": "Bytea" }, { - "ordinal": 12, + "ordinal": 11, "name": "compressed_repeated_writes", "type_info": "Bytea" }, { - "ordinal": 13, + "ordinal": 12, "name": "l2_l1_merkle_root", "type_info": "Bytea" }, { - "ordinal": 14, + "ordinal": 13, "name": "rollup_last_leaf_index", "type_info": "Int8" }, { - "ordinal": 15, + "ordinal": 14, "name": "zkporter_is_available", "type_info": "Bool" }, { - "ordinal": 16, + "ordinal": 15, "name": "bootloader_code_hash", "type_info": "Bytea" }, { - "ordinal": 17, + "ordinal": 16, "name": "default_aa_code_hash", "type_info": "Bytea" }, { - "ordinal": 18, + "ordinal": 17, "name": "aux_data_hash", "type_info": "Bytea" }, { - "ordinal": 19, + "ordinal": 18, "name": "pass_through_data_hash", "type_info": "Bytea" }, { - "ordinal": 20, + "ordinal": 19, "name": "meta_parameters_hash", "type_info": "Bytea" }, { - "ordinal": 21, + "ordinal": 20, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 22, + "ordinal": 21, "name": "compressed_state_diffs", "type_info": "Bytea" }, { - "ordinal": 23, + "ordinal": 22, "name": "system_logs", "type_info": "ByteaArray" }, { - "ordinal": 24, + "ordinal": 23, "name": "events_queue_commitment", "type_info": "Bytea" }, { - "ordinal": 25, + "ordinal": 24, "name": "bootloader_initial_content_commitment", "type_info": "Bytea" }, { - "ordinal": 26, + "ordinal": 25, "name": "pubdata_input", "type_info": "Bytea" } @@ -144,6 +139,7 @@ "Bytea", "Bytea", "Int4", + "Bool", "Int8" ] }, @@ -158,7 +154,6 @@ true, false, false, - false, true, true, true, @@ -177,5 +172,5 @@ true ] }, - "hash": "71f82ea7c83cccab209e13ea388dbb6b0c35c756638588792ac9c1db221fef33" + "hash": "63f95c6cdcfd933e2cf8f62c0d408f2dce89f7b700896fcc0f242e0e15ba058e" } diff --git a/core/lib/dal/.sqlx/query-d10bcceb808ee616c2de5f821246d2769261b070ab93a6e0aa889e619d08cd2c.json b/core/lib/dal/.sqlx/query-659f616d3af4a79f898e84f890e06de9633d1086da972a467d89831e7a07c67e.json similarity index 66% rename from core/lib/dal/.sqlx/query-d10bcceb808ee616c2de5f821246d2769261b070ab93a6e0aa889e619d08cd2c.json rename to core/lib/dal/.sqlx/query-659f616d3af4a79f898e84f890e06de9633d1086da972a467d89831e7a07c67e.json index 7d32cb004013..9116a25c1673 100644 --- a/core/lib/dal/.sqlx/query-d10bcceb808ee616c2de5f821246d2769261b070ab93a6e0aa889e619d08cd2c.json +++ b/core/lib/dal/.sqlx/query-659f616d3af4a79f898e84f890e06de9633d1086da972a467d89831e7a07c67e.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_logs,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n system_logs,\n compressed_state_diffs,\n protocol_version,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n (\n SELECT\n l1_batches.*,\n ROW_NUMBER() OVER (\n ORDER BY\n number ASC\n ) AS ROW_NUMBER\n FROM\n l1_batches\n WHERE\n eth_commit_tx_id IS NOT NULL\n AND l1_batches.skip_proof = TRUE\n AND l1_batches.number > $1\n ORDER BY\n number\n LIMIT\n $2\n ) inn\n LEFT JOIN commitments ON commitments.l1_batch_number = inn.number\n WHERE\n number - ROW_NUMBER = $1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n system_logs,\n compressed_state_diffs,\n protocol_version,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n (\n SELECT\n l1_batches.*,\n ROW_NUMBER() OVER (\n ORDER BY\n number ASC\n ) AS ROW_NUMBER\n FROM\n l1_batches\n WHERE\n eth_commit_tx_id IS NOT NULL\n AND l1_batches.skip_proof = TRUE\n AND l1_batches.number > $1\n ORDER BY\n number\n LIMIT\n $2\n ) inn\n LEFT JOIN commitments ON commitments.l1_batch_number = inn.number\n WHERE\n number - ROW_NUMBER = $1\n ", "describe": { "columns": [ { @@ -45,96 +45,91 @@ }, { "ordinal": 8, - "name": "l2_to_l1_logs", - "type_info": "ByteaArray" - }, - { - "ordinal": 9, "name": "l2_to_l1_messages", "type_info": "ByteaArray" }, { - "ordinal": 10, + "ordinal": 9, "name": "used_contract_hashes", "type_info": "Jsonb" }, { - "ordinal": 11, + "ordinal": 10, "name": "compressed_initial_writes", "type_info": "Bytea" }, { - "ordinal": 12, + "ordinal": 11, "name": "compressed_repeated_writes", "type_info": "Bytea" }, { - "ordinal": 13, + "ordinal": 12, "name": "l2_l1_merkle_root", "type_info": "Bytea" }, { - "ordinal": 14, + "ordinal": 13, "name": "rollup_last_leaf_index", "type_info": "Int8" }, { - "ordinal": 15, + "ordinal": 14, "name": "zkporter_is_available", "type_info": "Bool" }, { - "ordinal": 16, + "ordinal": 15, "name": "bootloader_code_hash", "type_info": "Bytea" }, { - "ordinal": 17, + "ordinal": 16, "name": "default_aa_code_hash", "type_info": "Bytea" }, { - "ordinal": 18, + "ordinal": 17, "name": "aux_data_hash", "type_info": "Bytea" }, { - "ordinal": 19, + "ordinal": 18, "name": "pass_through_data_hash", "type_info": "Bytea" }, { - "ordinal": 20, + "ordinal": 19, "name": "meta_parameters_hash", "type_info": "Bytea" }, { - "ordinal": 21, + "ordinal": 20, "name": "system_logs", "type_info": "ByteaArray" }, { - "ordinal": 22, + "ordinal": 21, "name": "compressed_state_diffs", "type_info": "Bytea" }, { - "ordinal": 23, + "ordinal": 22, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 24, + "ordinal": 23, "name": "events_queue_commitment", "type_info": "Bytea" }, { - "ordinal": 25, + "ordinal": 24, "name": "bootloader_initial_content_commitment", "type_info": "Bytea" }, { - "ordinal": 26, + "ordinal": 25, "name": "pubdata_input", "type_info": "Bytea" } @@ -156,7 +151,6 @@ true, false, false, - false, true, true, true, @@ -175,5 +169,5 @@ true ] }, - "hash": "d10bcceb808ee616c2de5f821246d2769261b070ab93a6e0aa889e619d08cd2c" + "hash": "659f616d3af4a79f898e84f890e06de9633d1086da972a467d89831e7a07c67e" } diff --git a/core/lib/dal/.sqlx/query-23be43bf705d679ca751c89353716065fcad42c6b621efb3a135a16b477dcfd9.json b/core/lib/dal/.sqlx/query-6bb5eab89be2b08a08c00b5cd8d725208b0ecfe8065c8f893ff38c49072a21fc.json similarity index 70% rename from core/lib/dal/.sqlx/query-23be43bf705d679ca751c89353716065fcad42c6b621efb3a135a16b477dcfd9.json rename to core/lib/dal/.sqlx/query-6bb5eab89be2b08a08c00b5cd8d725208b0ecfe8065c8f893ff38c49072a21fc.json index c0e8bb9d2553..71318c9a1023 100644 --- a/core/lib/dal/.sqlx/query-23be43bf705d679ca751c89353716065fcad42c6b621efb3a135a16b477dcfd9.json +++ b/core/lib/dal/.sqlx/query-6bb5eab89be2b08a08c00b5cd8d725208b0ecfe8065c8f893ff38c49072a21fc.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n *\n FROM\n eth_txs\n WHERE\n confirmed_eth_tx_history_id IS NULL\n AND id <= (\n SELECT\n COALESCE(MAX(eth_tx_id), 0)\n FROM\n eth_txs_history\n WHERE\n sent_at_block IS NOT NULL\n )\n ORDER BY\n id\n ", + "query": "\n SELECT\n *\n FROM\n eth_txs\n WHERE\n from_addr IS NOT DISTINCT FROM $1 -- can't just use equality as NULL != NULL\n AND confirmed_eth_tx_history_id IS NULL\n AND id <= (\n SELECT\n COALESCE(MAX(eth_tx_id), 0)\n FROM\n eth_txs_history\n JOIN eth_txs ON eth_txs.id = eth_txs_history.eth_tx_id\n WHERE\n eth_txs_history.sent_at_block IS NOT NULL\n AND eth_txs.from_addr IS NOT DISTINCT FROM $1\n )\n ORDER BY\n id\n ", "describe": { "columns": [ { @@ -75,7 +75,9 @@ } ], "parameters": { - "Left": [] + "Left": [ + "Bytea" + ] }, "nullable": [ false, @@ -94,5 +96,5 @@ true ] }, - "hash": "23be43bf705d679ca751c89353716065fcad42c6b621efb3a135a16b477dcfd9" + "hash": "6bb5eab89be2b08a08c00b5cd8d725208b0ecfe8065c8f893ff38c49072a21fc" } diff --git a/core/lib/dal/.sqlx/query-6f003ee0311b9ff1f42d3a74587670ab55ca94647e0caa92adab7c18260f18ff.json b/core/lib/dal/.sqlx/query-6f003ee0311b9ff1f42d3a74587670ab55ca94647e0caa92adab7c18260f18ff.json new file mode 100644 index 000000000000..768089b083a1 --- /dev/null +++ b/core/lib/dal/.sqlx/query-6f003ee0311b9ff1f42d3a74587670ab55ca94647e0caa92adab7c18260f18ff.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n blob_id\n FROM\n data_availability\n WHERE\n l1_batch_number = $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "blob_id", + "type_info": "Text" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + false + ] + }, + "hash": "6f003ee0311b9ff1f42d3a74587670ab55ca94647e0caa92adab7c18260f18ff" +} diff --git a/core/lib/dal/.sqlx/query-703836a3f065b0aedf71ad0474cac5e5fccb3ec55aa1227f5f1ea5a11f9b36a9.json b/core/lib/dal/.sqlx/query-703836a3f065b0aedf71ad0474cac5e5fccb3ec55aa1227f5f1ea5a11f9b36a9.json new file mode 100644 index 000000000000..be9d5219665a --- /dev/null +++ b/core/lib/dal/.sqlx/query-703836a3f065b0aedf71ad0474cac5e5fccb3ec55aa1227f5f1ea5a11f9b36a9.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE proof_generation_details\n SET\n vm_run_data_blob_url = $1,\n updated_at = NOW()\n WHERE\n l1_batch_number = $2\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Text", + "Int8" + ] + }, + "nullable": [] + }, + "hash": "703836a3f065b0aedf71ad0474cac5e5fccb3ec55aa1227f5f1ea5a11f9b36a9" +} diff --git a/core/lib/dal/.sqlx/query-58aed39245c72d231b268ce83105bb2036d21f60d4c6934f9145730ac35c04de.json b/core/lib/dal/.sqlx/query-815a7037a11dfc32e9d084d57178a9777126abebaf648c00fdcc24beb9967010.json similarity index 60% rename from core/lib/dal/.sqlx/query-58aed39245c72d231b268ce83105bb2036d21f60d4c6934f9145730ac35c04de.json rename to core/lib/dal/.sqlx/query-815a7037a11dfc32e9d084d57178a9777126abebaf648c00fdcc24beb9967010.json index 502d14e05ea5..a5419ff6706b 100644 --- a/core/lib/dal/.sqlx/query-58aed39245c72d231b268ce83105bb2036d21f60d4c6934f9145730ac35c04de.json +++ b/core/lib/dal/.sqlx/query-815a7037a11dfc32e9d084d57178a9777126abebaf648c00fdcc24beb9967010.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n l1_batch_number\n FROM\n proof_generation_details\n WHERE\n status = 'ready_to_be_proven'\n ORDER BY\n l1_batch_number ASC\n LIMIT\n 1\n ", + "query": "\n SELECT\n l1_batch_number\n FROM\n proof_generation_details\n WHERE\n status = 'unpicked'\n ORDER BY\n l1_batch_number ASC\n LIMIT\n 1\n ", "describe": { "columns": [ { @@ -16,5 +16,5 @@ false ] }, - "hash": "58aed39245c72d231b268ce83105bb2036d21f60d4c6934f9145730ac35c04de" + "hash": "815a7037a11dfc32e9d084d57178a9777126abebaf648c00fdcc24beb9967010" } diff --git a/core/lib/dal/.sqlx/query-849d54b4cf9212010fb4e41ce8137978579ba22eec525912c4aeeb235c3b984c.json b/core/lib/dal/.sqlx/query-849d54b4cf9212010fb4e41ce8137978579ba22eec525912c4aeeb235c3b984c.json new file mode 100644 index 000000000000..5130763af73c --- /dev/null +++ b/core/lib/dal/.sqlx/query-849d54b4cf9212010fb4e41ce8137978579ba22eec525912c4aeeb235c3b984c.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n MAX(l1_batch_number) AS \"number\"\n FROM\n l1_batches_consensus\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "number", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + null + ] + }, + "hash": "849d54b4cf9212010fb4e41ce8137978579ba22eec525912c4aeeb235c3b984c" +} diff --git a/core/lib/dal/.sqlx/query-89d58c9735adbd9f40791d61bd63a0a2691a4b3238fce9dbc3a7d2861a4ca967.json b/core/lib/dal/.sqlx/query-89d58c9735adbd9f40791d61bd63a0a2691a4b3238fce9dbc3a7d2861a4ca967.json new file mode 100644 index 000000000000..b65b57e4e01e --- /dev/null +++ b/core/lib/dal/.sqlx/query-89d58c9735adbd9f40791d61bd63a0a2691a4b3238fce9dbc3a7d2861a4ca967.json @@ -0,0 +1,34 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n address AS \"address!\",\n key AS \"key!\",\n value\n FROM\n storage_logs\n WHERE\n miniblock_number BETWEEN (\n SELECT\n MIN(number)\n FROM\n miniblocks\n WHERE\n l1_batch_number = $1\n ) AND (\n SELECT\n MAX(number)\n FROM\n miniblocks\n WHERE\n l1_batch_number = $1\n )\n ORDER BY\n miniblock_number,\n operation_number\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "address!", + "type_info": "Bytea" + }, + { + "ordinal": 1, + "name": "key!", + "type_info": "Bytea" + }, + { + "ordinal": 2, + "name": "value", + "type_info": "Bytea" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + true, + true, + false + ] + }, + "hash": "89d58c9735adbd9f40791d61bd63a0a2691a4b3238fce9dbc3a7d2861a4ca967" +} diff --git a/core/lib/dal/.sqlx/query-3b013b93ea4a6766162c9f0c60517a7ffc993cf436ad3aeeae82ed3e330b07bd.json b/core/lib/dal/.sqlx/query-8c763c05187a409a54806b0eb88e733f635b183960226848b280383042ea3637.json similarity index 54% rename from core/lib/dal/.sqlx/query-3b013b93ea4a6766162c9f0c60517a7ffc993cf436ad3aeeae82ed3e330b07bd.json rename to core/lib/dal/.sqlx/query-8c763c05187a409a54806b0eb88e733f635b183960226848b280383042ea3637.json index 6e7bffec4854..930c1c1a9fed 100644 --- a/core/lib/dal/.sqlx/query-3b013b93ea4a6766162c9f0c60517a7ffc993cf436ad3aeeae82ed3e330b07bd.json +++ b/core/lib/dal/.sqlx/query-8c763c05187a409a54806b0eb88e733f635b183960226848b280383042ea3637.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n certificate\n FROM\n miniblocks_consensus\n ORDER BY\n number ASC\n LIMIT\n 1\n ", + "query": "\n SELECT\n certificate\n FROM\n l1_batches_consensus\n WHERE\n l1_batch_number = $1\n ", "describe": { "columns": [ { @@ -10,11 +10,13 @@ } ], "parameters": { - "Left": [] + "Left": [ + "Int8" + ] }, "nullable": [ false ] }, - "hash": "3b013b93ea4a6766162c9f0c60517a7ffc993cf436ad3aeeae82ed3e330b07bd" + "hash": "8c763c05187a409a54806b0eb88e733f635b183960226848b280383042ea3637" } diff --git a/core/lib/dal/.sqlx/query-928139bf23bd0d57b8dbdb3283b139300ad3b80ac9e70c00864c3d9f6521b028.json b/core/lib/dal/.sqlx/query-928139bf23bd0d57b8dbdb3283b139300ad3b80ac9e70c00864c3d9f6521b028.json new file mode 100644 index 000000000000..e192763b189b --- /dev/null +++ b/core/lib/dal/.sqlx/query-928139bf23bd0d57b8dbdb3283b139300ad3b80ac9e70c00864c3d9f6521b028.json @@ -0,0 +1,28 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n number,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN data_availability ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND data_availability.blob_id IS NULL\n AND pubdata_input IS NOT NULL\n ORDER BY\n number\n LIMIT\n $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "number", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "pubdata_input", + "type_info": "Bytea" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + false, + true + ] + }, + "hash": "928139bf23bd0d57b8dbdb3283b139300ad3b80ac9e70c00864c3d9f6521b028" +} diff --git a/core/lib/dal/.sqlx/query-d6b70256793417a949081899eccf75260c7afaf110870656061a04079c35c2d8.json b/core/lib/dal/.sqlx/query-9f29aa31d4698031e9f3fe2eb273724dcce382936af0d4c386143399995cd325.json similarity index 87% rename from core/lib/dal/.sqlx/query-d6b70256793417a949081899eccf75260c7afaf110870656061a04079c35c2d8.json rename to core/lib/dal/.sqlx/query-9f29aa31d4698031e9f3fe2eb273724dcce382936af0d4c386143399995cd325.json index c116d2d7de6e..2e1bf7c3e61c 100644 --- a/core/lib/dal/.sqlx/query-d6b70256793417a949081899eccf75260c7afaf110870656061a04079c35c2d8.json +++ b/core/lib/dal/.sqlx/query-9f29aa31d4698031e9f3fe2eb273724dcce382936af0d4c386143399995cd325.json @@ -1,21 +1,21 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n storage_logs.key AS \"key!\",\n storage_logs.value AS \"value!\",\n storage_logs.address AS \"address!\",\n storage_logs.miniblock_number AS \"miniblock_number!\",\n initial_writes.l1_batch_number AS \"l1_batch_number!\",\n initial_writes.index\n FROM\n (\n SELECT\n hashed_key,\n MAX(ARRAY[miniblock_number, operation_number]::INT[]) AS op\n FROM\n storage_logs\n WHERE\n miniblock_number <= $1\n AND hashed_key >= $3\n AND hashed_key <= $4\n GROUP BY\n hashed_key\n ORDER BY\n hashed_key\n ) AS keys\n INNER JOIN storage_logs ON keys.hashed_key = storage_logs.hashed_key\n AND storage_logs.miniblock_number = keys.op[1]\n AND storage_logs.operation_number = keys.op[2]\n INNER JOIN initial_writes ON keys.hashed_key = initial_writes.hashed_key\n WHERE\n initial_writes.l1_batch_number <= $2\n ", + "query": "\n SELECT\n storage_logs.address AS \"address!\",\n storage_logs.key AS \"key!\",\n storage_logs.value AS \"value!\",\n storage_logs.miniblock_number AS \"miniblock_number!\",\n initial_writes.l1_batch_number AS \"l1_batch_number!\",\n initial_writes.index\n FROM\n (\n SELECT\n hashed_key,\n MAX(ARRAY[miniblock_number, operation_number]::INT[]) AS op\n FROM\n storage_logs\n WHERE\n miniblock_number <= $1\n AND hashed_key >= $3\n AND hashed_key <= $4\n GROUP BY\n hashed_key\n ORDER BY\n hashed_key\n ) AS keys\n INNER JOIN storage_logs ON keys.hashed_key = storage_logs.hashed_key\n AND storage_logs.miniblock_number = keys.op[1]\n AND storage_logs.operation_number = keys.op[2]\n INNER JOIN initial_writes ON keys.hashed_key = initial_writes.hashed_key\n WHERE\n initial_writes.l1_batch_number <= $2\n ", "describe": { "columns": [ { "ordinal": 0, - "name": "key!", + "name": "address!", "type_info": "Bytea" }, { "ordinal": 1, - "name": "value!", + "name": "key!", "type_info": "Bytea" }, { "ordinal": 2, - "name": "address!", + "name": "value!", "type_info": "Bytea" }, { @@ -43,13 +43,13 @@ ] }, "nullable": [ - false, - false, + true, + true, false, false, false, false ] }, - "hash": "d6b70256793417a949081899eccf75260c7afaf110870656061a04079c35c2d8" + "hash": "9f29aa31d4698031e9f3fe2eb273724dcce382936af0d4c386143399995cd325" } diff --git a/core/lib/dal/.sqlx/query-9f2c06e6b14434ac4f3b556dc97994cc05ebeb4e5aeeaee50b7c4d8baf58ca44.json b/core/lib/dal/.sqlx/query-9f2c06e6b14434ac4f3b556dc97994cc05ebeb4e5aeeaee50b7c4d8baf58ca44.json new file mode 100644 index 000000000000..54f0d27bab26 --- /dev/null +++ b/core/lib/dal/.sqlx/query-9f2c06e6b14434ac4f3b556dc97994cc05ebeb4e5aeeaee50b7c4d8baf58ca44.json @@ -0,0 +1,33 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n l1_batches (\n number,\n l1_tx_count,\n l2_tx_count,\n timestamp,\n l2_to_l1_messages,\n bloom,\n priority_ops_onchain_data,\n predicted_commit_gas_cost,\n predicted_prove_gas_cost,\n predicted_execute_gas_cost,\n initial_bootloader_heap_content,\n used_contract_hashes,\n bootloader_code_hash,\n default_aa_code_hash,\n protocol_version,\n system_logs,\n storage_refunds,\n pubdata_costs,\n pubdata_input,\n predicted_circuits_by_type,\n created_at,\n updated_at\n )\n VALUES\n (\n $1,\n $2,\n $3,\n $4,\n $5,\n $6,\n $7,\n $8,\n $9,\n $10,\n $11,\n $12,\n $13,\n $14,\n $15,\n $16,\n $17,\n $18,\n $19,\n $20,\n NOW(),\n NOW()\n )\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Int4", + "Int4", + "Int8", + "ByteaArray", + "Bytea", + "ByteaArray", + "Int8", + "Int8", + "Int8", + "Jsonb", + "Jsonb", + "Bytea", + "Bytea", + "Int4", + "ByteaArray", + "Int8Array", + "Int8Array", + "Bytea", + "Jsonb" + ] + }, + "nullable": [] + }, + "hash": "9f2c06e6b14434ac4f3b556dc97994cc05ebeb4e5aeeaee50b7c4d8baf58ca44" +} diff --git a/core/lib/dal/.sqlx/query-aab0254e6bf2c109d97e84053cb08f1ce816a56308fb9fe581b8683f76cbbbc3.json b/core/lib/dal/.sqlx/query-aab0254e6bf2c109d97e84053cb08f1ce816a56308fb9fe581b8683f76cbbbc3.json new file mode 100644 index 000000000000..850dfc675743 --- /dev/null +++ b/core/lib/dal/.sqlx/query-aab0254e6bf2c109d97e84053cb08f1ce816a56308fb9fe581b8683f76cbbbc3.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE vm_runner_bwip\n SET\n time_taken = NOW() - processing_started_at\n WHERE\n l1_batch_number = $1\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [] + }, + "hash": "aab0254e6bf2c109d97e84053cb08f1ce816a56308fb9fe581b8683f76cbbbc3" +} diff --git a/core/lib/dal/.sqlx/query-b61b2545ff82bc3e2a198b21546735c1dcccdd6c439827fc4c3ba57e8767076e.json b/core/lib/dal/.sqlx/query-b61b2545ff82bc3e2a198b21546735c1dcccdd6c439827fc4c3ba57e8767076e.json new file mode 100644 index 000000000000..4f7101ed45e4 --- /dev/null +++ b/core/lib/dal/.sqlx/query-b61b2545ff82bc3e2a198b21546735c1dcccdd6c439827fc4c3ba57e8767076e.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE proof_generation_details\n SET\n proof_gen_data_blob_url = $1,\n updated_at = NOW()\n WHERE\n l1_batch_number = $2\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Text", + "Int8" + ] + }, + "nullable": [] + }, + "hash": "b61b2545ff82bc3e2a198b21546735c1dcccdd6c439827fc4c3ba57e8767076e" +} diff --git a/core/lib/dal/.sqlx/query-5e5c279ed5f26c2465edf701fd7ecf7e45774cb5aa8b1d27bdecacc8de4956ea.json b/core/lib/dal/.sqlx/query-b7cd7c40282c2ca2287eef93ee79c69a9e494bf1f873291b4ae7bf68b7e3c549.json similarity index 67% rename from core/lib/dal/.sqlx/query-5e5c279ed5f26c2465edf701fd7ecf7e45774cb5aa8b1d27bdecacc8de4956ea.json rename to core/lib/dal/.sqlx/query-b7cd7c40282c2ca2287eef93ee79c69a9e494bf1f873291b4ae7bf68b7e3c549.json index 16ca5c2bc1a1..ed4744206a48 100644 --- a/core/lib/dal/.sqlx/query-5e5c279ed5f26c2465edf701fd7ecf7e45774cb5aa8b1d27bdecacc8de4956ea.json +++ b/core/lib/dal/.sqlx/query-b7cd7c40282c2ca2287eef93ee79c69a9e494bf1f873291b4ae7bf68b7e3c549.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_logs,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n ORDER BY\n number\n LIMIT\n $4\n ", + "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n ORDER BY\n number\n LIMIT\n $4\n ", "describe": { "columns": [ { @@ -45,96 +45,91 @@ }, { "ordinal": 8, - "name": "l2_to_l1_logs", - "type_info": "ByteaArray" - }, - { - "ordinal": 9, "name": "l2_to_l1_messages", "type_info": "ByteaArray" }, { - "ordinal": 10, + "ordinal": 9, "name": "used_contract_hashes", "type_info": "Jsonb" }, { - "ordinal": 11, + "ordinal": 10, "name": "compressed_initial_writes", "type_info": "Bytea" }, { - "ordinal": 12, + "ordinal": 11, "name": "compressed_repeated_writes", "type_info": "Bytea" }, { - "ordinal": 13, + "ordinal": 12, "name": "l2_l1_merkle_root", "type_info": "Bytea" }, { - "ordinal": 14, + "ordinal": 13, "name": "rollup_last_leaf_index", "type_info": "Int8" }, { - "ordinal": 15, + "ordinal": 14, "name": "zkporter_is_available", "type_info": "Bool" }, { - "ordinal": 16, + "ordinal": 15, "name": "bootloader_code_hash", "type_info": "Bytea" }, { - "ordinal": 17, + "ordinal": 16, "name": "default_aa_code_hash", "type_info": "Bytea" }, { - "ordinal": 18, + "ordinal": 17, "name": "aux_data_hash", "type_info": "Bytea" }, { - "ordinal": 19, + "ordinal": 18, "name": "pass_through_data_hash", "type_info": "Bytea" }, { - "ordinal": 20, + "ordinal": 19, "name": "meta_parameters_hash", "type_info": "Bytea" }, { - "ordinal": 21, + "ordinal": 20, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 22, + "ordinal": 21, "name": "compressed_state_diffs", "type_info": "Bytea" }, { - "ordinal": 23, + "ordinal": 22, "name": "system_logs", "type_info": "ByteaArray" }, { - "ordinal": 24, + "ordinal": 23, "name": "events_queue_commitment", "type_info": "Bytea" }, { - "ordinal": 25, + "ordinal": 24, "name": "bootloader_initial_content_commitment", "type_info": "Bytea" }, { - "ordinal": 26, + "ordinal": 25, "name": "pubdata_input", "type_info": "Bytea" } @@ -158,7 +153,6 @@ true, false, false, - false, true, true, true, @@ -177,5 +171,5 @@ true ] }, - "hash": "5e5c279ed5f26c2465edf701fd7ecf7e45774cb5aa8b1d27bdecacc8de4956ea" + "hash": "b7cd7c40282c2ca2287eef93ee79c69a9e494bf1f873291b4ae7bf68b7e3c549" } diff --git a/core/lib/dal/.sqlx/query-be092376ee3aec298f8b22229abf6552b86d46808fe219c55a5210af56cce2ee.json b/core/lib/dal/.sqlx/query-be092376ee3aec298f8b22229abf6552b86d46808fe219c55a5210af56cce2ee.json new file mode 100644 index 000000000000..82c544631339 --- /dev/null +++ b/core/lib/dal/.sqlx/query-be092376ee3aec298f8b22229abf6552b86d46808fe219c55a5210af56cce2ee.json @@ -0,0 +1,28 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n hashed_key,\n value\n FROM\n storage_logs\n WHERE\n miniblock_number BETWEEN (\n SELECT\n MIN(number)\n FROM\n miniblocks\n WHERE\n l1_batch_number = $1\n ) AND (\n SELECT\n MAX(number)\n FROM\n miniblocks\n WHERE\n l1_batch_number = $1\n )\n ORDER BY\n miniblock_number,\n operation_number\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "hashed_key", + "type_info": "Bytea" + }, + { + "ordinal": 1, + "name": "value", + "type_info": "Bytea" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + false, + false + ] + }, + "hash": "be092376ee3aec298f8b22229abf6552b86d46808fe219c55a5210af56cce2ee" +} diff --git a/core/lib/dal/.sqlx/query-c36abacc705a2244d423599779e38d60d6e93bcb34fd20422e227714fccbf6b7.json b/core/lib/dal/.sqlx/query-c36abacc705a2244d423599779e38d60d6e93bcb34fd20422e227714fccbf6b7.json deleted file mode 100644 index ea4b266d8259..000000000000 --- a/core/lib/dal/.sqlx/query-c36abacc705a2244d423599779e38d60d6e93bcb34fd20422e227714fccbf6b7.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n address,\n key,\n value\n FROM\n storage_logs\n WHERE\n miniblock_number BETWEEN (\n SELECT\n MIN(number)\n FROM\n miniblocks\n WHERE\n l1_batch_number = $1\n ) AND (\n SELECT\n MAX(number)\n FROM\n miniblocks\n WHERE\n l1_batch_number = $1\n )\n ORDER BY\n miniblock_number,\n operation_number\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "address", - "type_info": "Bytea" - }, - { - "ordinal": 1, - "name": "key", - "type_info": "Bytea" - }, - { - "ordinal": 2, - "name": "value", - "type_info": "Bytea" - } - ], - "parameters": { - "Left": [ - "Int8" - ] - }, - "nullable": [ - false, - false, - false - ] - }, - "hash": "c36abacc705a2244d423599779e38d60d6e93bcb34fd20422e227714fccbf6b7" -} diff --git a/core/lib/dal/.sqlx/query-dcb51063c12341785e57f221e2d5ede2be9770b3799a9ab64fe9690b6eb0a48b.json b/core/lib/dal/.sqlx/query-c3af06cc232adb93f16456da07733acd2066a5e238088b39b982b10770f51479.json similarity index 84% rename from core/lib/dal/.sqlx/query-dcb51063c12341785e57f221e2d5ede2be9770b3799a9ab64fe9690b6eb0a48b.json rename to core/lib/dal/.sqlx/query-c3af06cc232adb93f16456da07733acd2066a5e238088b39b982b10770f51479.json index 0ee5b247c330..1c15bde02fdf 100644 --- a/core/lib/dal/.sqlx/query-dcb51063c12341785e57f221e2d5ede2be9770b3799a9ab64fe9690b6eb0a48b.json +++ b/core/lib/dal/.sqlx/query-c3af06cc232adb93f16456da07733acd2066a5e238088b39b982b10770f51479.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n WITH\n events_select AS (\n SELECT\n address,\n topic1,\n topic2,\n topic3,\n topic4,\n value,\n miniblock_number,\n tx_hash,\n tx_index_in_block,\n event_index_in_block,\n event_index_in_tx\n FROM\n events\n WHERE\n miniblock_number > $1\n ORDER BY\n miniblock_number ASC,\n event_index_in_block ASC\n )\n SELECT\n miniblocks.hash AS \"block_hash?\",\n address AS \"address!\",\n topic1 AS \"topic1!\",\n topic2 AS \"topic2!\",\n topic3 AS \"topic3!\",\n topic4 AS \"topic4!\",\n value AS \"value!\",\n miniblock_number AS \"miniblock_number!\",\n miniblocks.l1_batch_number AS \"l1_batch_number?\",\n tx_hash AS \"tx_hash!\",\n tx_index_in_block AS \"tx_index_in_block!\",\n event_index_in_block AS \"event_index_in_block!\",\n event_index_in_tx AS \"event_index_in_tx!\"\n FROM\n events_select\n INNER JOIN miniblocks ON events_select.miniblock_number = miniblocks.number\n ORDER BY\n miniblock_number ASC,\n event_index_in_block ASC\n ", + "query": "\n WITH\n events_select AS (\n SELECT\n address,\n topic1,\n topic2,\n topic3,\n topic4,\n value,\n miniblock_number,\n tx_hash,\n tx_index_in_block,\n event_index_in_block,\n event_index_in_tx\n FROM\n events\n WHERE\n miniblock_number > $1\n ORDER BY\n miniblock_number ASC,\n event_index_in_block ASC\n )\n SELECT\n miniblocks.hash AS \"block_hash?\",\n address AS \"address!\",\n topic1 AS \"topic1!\",\n topic2 AS \"topic2!\",\n topic3 AS \"topic3!\",\n topic4 AS \"topic4!\",\n value AS \"value!\",\n miniblock_number AS \"miniblock_number!\",\n miniblocks.l1_batch_number AS \"l1_batch_number?\",\n tx_hash AS \"tx_hash!\",\n tx_index_in_block AS \"tx_index_in_block!\",\n event_index_in_block AS \"event_index_in_block!\",\n event_index_in_tx AS \"event_index_in_tx!\",\n miniblocks.timestamp AS \"block_timestamp\"\n FROM\n events_select\n INNER JOIN miniblocks ON events_select.miniblock_number = miniblocks.number\n ORDER BY\n miniblock_number ASC,\n event_index_in_block ASC\n ", "describe": { "columns": [ { @@ -67,6 +67,11 @@ "ordinal": 12, "name": "event_index_in_tx!", "type_info": "Int4" + }, + { + "ordinal": 13, + "name": "block_timestamp", + "type_info": "Int8" } ], "parameters": { @@ -87,8 +92,9 @@ false, false, false, + false, false ] }, - "hash": "dcb51063c12341785e57f221e2d5ede2be9770b3799a9ab64fe9690b6eb0a48b" + "hash": "c3af06cc232adb93f16456da07733acd2066a5e238088b39b982b10770f51479" } diff --git a/core/lib/dal/.sqlx/query-c5aef75dbeb520c965a0996abed9713f437db492e2075ca69e11e2ef5728ccaa.json b/core/lib/dal/.sqlx/query-c5aef75dbeb520c965a0996abed9713f437db492e2075ca69e11e2ef5728ccaa.json new file mode 100644 index 000000000000..6dd2f6cc7a9f --- /dev/null +++ b/core/lib/dal/.sqlx/query-c5aef75dbeb520c965a0996abed9713f437db492e2075ca69e11e2ef5728ccaa.json @@ -0,0 +1,24 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n base_token_ratios (numerator, denominator, ratio_timestamp, created_at, updated_at)\n VALUES\n ($1, $2, $3, NOW(), NOW())\n RETURNING\n id\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Int4" + } + ], + "parameters": { + "Left": [ + "Numeric", + "Numeric", + "Timestamp" + ] + }, + "nullable": [ + false + ] + }, + "hash": "c5aef75dbeb520c965a0996abed9713f437db492e2075ca69e11e2ef5728ccaa" +} diff --git a/core/lib/dal/.sqlx/query-c731b37e17334619d42121e2740c312512dfab93fd8f32c94461b7a85e3a410e.json b/core/lib/dal/.sqlx/query-c731b37e17334619d42121e2740c312512dfab93fd8f32c94461b7a85e3a410e.json new file mode 100644 index 000000000000..d32a9867e304 --- /dev/null +++ b/core/lib/dal/.sqlx/query-c731b37e17334619d42121e2740c312512dfab93fd8f32c94461b7a85e3a410e.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n MAX(l1_batch_number) AS \"last_processed_l1_batch\"\n FROM\n vm_runner_bwip\n WHERE\n time_taken IS NOT NULL\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "last_processed_l1_batch", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + null + ] + }, + "hash": "c731b37e17334619d42121e2740c312512dfab93fd8f32c94461b7a85e3a410e" +} diff --git a/core/lib/dal/.sqlx/query-d3abe74360732659a1a35a176679411ba30ac67080552279d821d66b1b804db3.json b/core/lib/dal/.sqlx/query-d3abe74360732659a1a35a176679411ba30ac67080552279d821d66b1b804db3.json new file mode 100644 index 000000000000..2b5eeec2e638 --- /dev/null +++ b/core/lib/dal/.sqlx/query-d3abe74360732659a1a35a176679411ba30ac67080552279d821d66b1b804db3.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n vm_runner_protective_reads (l1_batch_number, created_at, updated_at, processing_started_at)\n VALUES\n ($1, NOW(), NOW(), NOW())\n ON CONFLICT (l1_batch_number) DO\n UPDATE\n SET\n updated_at = NOW(),\n processing_started_at = NOW()\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [] + }, + "hash": "d3abe74360732659a1a35a176679411ba30ac67080552279d821d66b1b804db3" +} diff --git a/core/lib/dal/.sqlx/query-fec7b791e371a4c58350b6537065223f4599d4128db588d8645f3d106de5f50b.json b/core/lib/dal/.sqlx/query-d3d472436f1f3a6cc61bc9d47de5731b755cf2e09d877dd4eb70d58a1d11a977.json similarity index 58% rename from core/lib/dal/.sqlx/query-fec7b791e371a4c58350b6537065223f4599d4128db588d8645f3d106de5f50b.json rename to core/lib/dal/.sqlx/query-d3d472436f1f3a6cc61bc9d47de5731b755cf2e09d877dd4eb70d58a1d11a977.json index c34d38ac2d03..61497cdb1694 100644 --- a/core/lib/dal/.sqlx/query-fec7b791e371a4c58350b6537065223f4599d4128db588d8645f3d106de5f50b.json +++ b/core/lib/dal/.sqlx/query-d3d472436f1f3a6cc61bc9d47de5731b755cf2e09d877dd4eb70d58a1d11a977.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n certificate\n FROM\n miniblocks_consensus\n ORDER BY\n number DESC\n LIMIT\n 1\n ", + "query": "\n SELECT\n certificate\n FROM\n miniblocks_consensus\n WHERE\n number >= $1\n ORDER BY\n number DESC\n LIMIT\n 1\n ", "describe": { "columns": [ { @@ -10,11 +10,13 @@ } ], "parameters": { - "Left": [] + "Left": [ + "Int8" + ] }, "nullable": [ false ] }, - "hash": "fec7b791e371a4c58350b6537065223f4599d4128db588d8645f3d106de5f50b" + "hash": "d3d472436f1f3a6cc61bc9d47de5731b755cf2e09d877dd4eb70d58a1d11a977" } diff --git a/core/lib/dal/.sqlx/query-d9d71913a116abf390c71f5229426306b02e328d7b1b69c495443bd2ca7f7510.json b/core/lib/dal/.sqlx/query-d9d71913a116abf390c71f5229426306b02e328d7b1b69c495443bd2ca7f7510.json new file mode 100644 index 000000000000..a42fbe98ff2f --- /dev/null +++ b/core/lib/dal/.sqlx/query-d9d71913a116abf390c71f5229426306b02e328d7b1b69c495443bd2ca7f7510.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n l1_batches_consensus (l1_batch_number, certificate, created_at, updated_at)\n VALUES\n ($1, $2, NOW(), NOW())\n ON CONFLICT (l1_batch_number) DO NOTHING\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Jsonb" + ] + }, + "nullable": [] + }, + "hash": "d9d71913a116abf390c71f5229426306b02e328d7b1b69c495443bd2ca7f7510" +} diff --git a/core/lib/dal/.sqlx/query-daa330d43f150824f2195cdbfb96862d6ad0de7c7ca1d5320800f317428f07e1.json b/core/lib/dal/.sqlx/query-daa330d43f150824f2195cdbfb96862d6ad0de7c7ca1d5320800f317428f07e1.json new file mode 100644 index 000000000000..836bbc435f00 --- /dev/null +++ b/core/lib/dal/.sqlx/query-daa330d43f150824f2195cdbfb96862d6ad0de7c7ca1d5320800f317428f07e1.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "\n DELETE FROM vm_runner_protective_reads\n WHERE\n l1_batch_number > $1\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [] + }, + "hash": "daa330d43f150824f2195cdbfb96862d6ad0de7c7ca1d5320800f317428f07e1" +} diff --git a/core/lib/dal/.sqlx/query-f7bbf329c045055d85811968552e4d38c6631b37c2894c2ff16449e7a2b0c7a2.json b/core/lib/dal/.sqlx/query-de255be5d2e5ef215428e9a886e7c9dc036873c60b8b916ce8c446e310447b66.json similarity index 72% rename from core/lib/dal/.sqlx/query-f7bbf329c045055d85811968552e4d38c6631b37c2894c2ff16449e7a2b0c7a2.json rename to core/lib/dal/.sqlx/query-de255be5d2e5ef215428e9a886e7c9dc036873c60b8b916ce8c446e310447b66.json index acb2c7d3bdc5..8a492376557b 100644 --- a/core/lib/dal/.sqlx/query-f7bbf329c045055d85811968552e4d38c6631b37c2894c2ff16449e7a2b0c7a2.json +++ b/core/lib/dal/.sqlx/query-de255be5d2e5ef215428e9a886e7c9dc036873c60b8b916ce8c446e310447b66.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_logs,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n number = 0\n OR eth_commit_tx_id IS NOT NULL\n AND commitment IS NOT NULL\n ORDER BY\n number DESC\n LIMIT\n 1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n number = 0\n OR eth_commit_tx_id IS NOT NULL\n AND commitment IS NOT NULL\n ORDER BY\n number DESC\n LIMIT\n 1\n ", "describe": { "columns": [ { @@ -45,96 +45,91 @@ }, { "ordinal": 8, - "name": "l2_to_l1_logs", - "type_info": "ByteaArray" - }, - { - "ordinal": 9, "name": "l2_to_l1_messages", "type_info": "ByteaArray" }, { - "ordinal": 10, + "ordinal": 9, "name": "used_contract_hashes", "type_info": "Jsonb" }, { - "ordinal": 11, + "ordinal": 10, "name": "compressed_initial_writes", "type_info": "Bytea" }, { - "ordinal": 12, + "ordinal": 11, "name": "compressed_repeated_writes", "type_info": "Bytea" }, { - "ordinal": 13, + "ordinal": 12, "name": "l2_l1_merkle_root", "type_info": "Bytea" }, { - "ordinal": 14, + "ordinal": 13, "name": "rollup_last_leaf_index", "type_info": "Int8" }, { - "ordinal": 15, + "ordinal": 14, "name": "zkporter_is_available", "type_info": "Bool" }, { - "ordinal": 16, + "ordinal": 15, "name": "bootloader_code_hash", "type_info": "Bytea" }, { - "ordinal": 17, + "ordinal": 16, "name": "default_aa_code_hash", "type_info": "Bytea" }, { - "ordinal": 18, + "ordinal": 17, "name": "aux_data_hash", "type_info": "Bytea" }, { - "ordinal": 19, + "ordinal": 18, "name": "pass_through_data_hash", "type_info": "Bytea" }, { - "ordinal": 20, + "ordinal": 19, "name": "meta_parameters_hash", "type_info": "Bytea" }, { - "ordinal": 21, + "ordinal": 20, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 22, + "ordinal": 21, "name": "compressed_state_diffs", "type_info": "Bytea" }, { - "ordinal": 23, + "ordinal": 22, "name": "system_logs", "type_info": "ByteaArray" }, { - "ordinal": 24, + "ordinal": 23, "name": "events_queue_commitment", "type_info": "Bytea" }, { - "ordinal": 25, + "ordinal": 24, "name": "bootloader_initial_content_commitment", "type_info": "Bytea" }, { - "ordinal": 26, + "ordinal": 25, "name": "pubdata_input", "type_info": "Bytea" } @@ -153,7 +148,6 @@ true, false, false, - false, true, true, true, @@ -172,5 +166,5 @@ true ] }, - "hash": "f7bbf329c045055d85811968552e4d38c6631b37c2894c2ff16449e7a2b0c7a2" + "hash": "de255be5d2e5ef215428e9a886e7c9dc036873c60b8b916ce8c446e310447b66" } diff --git a/core/lib/dal/.sqlx/query-decbf1c9c344253f692d0eae57323edbf31a923e7a45a431267e1bd9fc67b47b.json b/core/lib/dal/.sqlx/query-decbf1c9c344253f692d0eae57323edbf31a923e7a45a431267e1bd9fc67b47b.json deleted file mode 100644 index b2a1ae0eb956..000000000000 --- a/core/lib/dal/.sqlx/query-decbf1c9c344253f692d0eae57323edbf31a923e7a45a431267e1bd9fc67b47b.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n COALESCE(MAX(l1_batch_number), $1) AS \"last_processed_l1_batch!\"\n FROM\n vm_runner_protective_reads\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "last_processed_l1_batch!", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [ - "Int8" - ] - }, - "nullable": [ - null - ] - }, - "hash": "decbf1c9c344253f692d0eae57323edbf31a923e7a45a431267e1bd9fc67b47b" -} diff --git a/core/lib/dal/.sqlx/query-d43d5c96ae92f52b12b320d5c6c43335d23bec1370e520186739d7075e9e3338.json b/core/lib/dal/.sqlx/query-e1e8ab0cb11c6081d3525228eacbad74e1bab808c744fa14bf24332b39120767.json similarity index 84% rename from core/lib/dal/.sqlx/query-d43d5c96ae92f52b12b320d5c6c43335d23bec1370e520186739d7075e9e3338.json rename to core/lib/dal/.sqlx/query-e1e8ab0cb11c6081d3525228eacbad74e1bab808c744fa14bf24332b39120767.json index 93934a3a0bed..de9937ef7b95 100644 --- a/core/lib/dal/.sqlx/query-d43d5c96ae92f52b12b320d5c6c43335d23bec1370e520186739d7075e9e3338.json +++ b/core/lib/dal/.sqlx/query-e1e8ab0cb11c6081d3525228eacbad74e1bab808c744fa14bf24332b39120767.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n WITH\n events AS (\n SELECT DISTINCT\n ON (events.tx_hash) *\n FROM\n events\n WHERE\n events.address = $1\n AND events.topic1 = $2\n AND events.tx_hash = ANY ($3)\n ORDER BY\n events.tx_hash,\n events.event_index_in_tx DESC\n )\n SELECT\n transactions.hash AS tx_hash,\n transactions.index_in_block AS index_in_block,\n transactions.l1_batch_tx_index AS l1_batch_tx_index,\n transactions.miniblock_number AS \"block_number!\",\n transactions.error AS error,\n transactions.effective_gas_price AS effective_gas_price,\n transactions.initiator_address AS initiator_address,\n transactions.data -> 'to' AS \"transfer_to?\",\n transactions.data -> 'contractAddress' AS \"execute_contract_address?\",\n transactions.tx_format AS \"tx_format?\",\n transactions.refunded_gas AS refunded_gas,\n transactions.gas_limit AS gas_limit,\n miniblocks.hash AS \"block_hash\",\n miniblocks.l1_batch_number AS \"l1_batch_number?\",\n events.topic4 AS \"contract_address?\"\n FROM\n transactions\n JOIN miniblocks ON miniblocks.number = transactions.miniblock_number\n LEFT JOIN events ON events.tx_hash = transactions.hash\n WHERE\n transactions.hash = ANY ($3)\n AND transactions.data != '{}'::jsonb\n ", + "query": "\n WITH\n events AS (\n SELECT DISTINCT\n ON (events.tx_hash) *\n FROM\n events\n WHERE\n events.address = $1\n AND events.topic1 = $2\n AND events.tx_hash = ANY ($3)\n ORDER BY\n events.tx_hash,\n events.event_index_in_tx DESC\n )\n SELECT\n transactions.hash AS tx_hash,\n transactions.index_in_block AS index_in_block,\n transactions.l1_batch_tx_index AS l1_batch_tx_index,\n transactions.miniblock_number AS \"block_number!\",\n transactions.error AS error,\n transactions.effective_gas_price AS effective_gas_price,\n transactions.initiator_address AS initiator_address,\n transactions.data -> 'to' AS \"transfer_to?\",\n transactions.data -> 'contractAddress' AS \"execute_contract_address?\",\n transactions.tx_format AS \"tx_format?\",\n transactions.refunded_gas AS refunded_gas,\n transactions.gas_limit AS gas_limit,\n miniblocks.hash AS \"block_hash\",\n miniblocks.l1_batch_number AS \"l1_batch_number?\",\n events.topic4 AS \"contract_address?\",\n miniblocks.timestamp AS \"block_timestamp?\"\n FROM\n transactions\n JOIN miniblocks ON miniblocks.number = transactions.miniblock_number\n LEFT JOIN events ON events.tx_hash = transactions.hash\n WHERE\n transactions.hash = ANY ($3)\n AND transactions.data != '{}'::jsonb\n ", "describe": { "columns": [ { @@ -77,6 +77,11 @@ "ordinal": 14, "name": "contract_address?", "type_info": "Bytea" + }, + { + "ordinal": 15, + "name": "block_timestamp?", + "type_info": "Int8" } ], "parameters": { @@ -101,8 +106,9 @@ true, false, true, - true + true, + false ] }, - "hash": "d43d5c96ae92f52b12b320d5c6c43335d23bec1370e520186739d7075e9e3338" + "hash": "e1e8ab0cb11c6081d3525228eacbad74e1bab808c744fa14bf24332b39120767" } diff --git a/core/lib/dal/.sqlx/query-0a2138a1cbf21546931867319ccbfe1e597151ecfaeb3cfa6624f2a1978ef23f.json b/core/lib/dal/.sqlx/query-e7d0b7c132b80195dae7cbf50355eb148aa6d1dbd69bf3fe48522101a6ea0bcb.json similarity index 64% rename from core/lib/dal/.sqlx/query-0a2138a1cbf21546931867319ccbfe1e597151ecfaeb3cfa6624f2a1978ef23f.json rename to core/lib/dal/.sqlx/query-e7d0b7c132b80195dae7cbf50355eb148aa6d1dbd69bf3fe48522101a6ea0bcb.json index eaef732751ec..576484cd4206 100644 --- a/core/lib/dal/.sqlx/query-0a2138a1cbf21546931867319ccbfe1e597151ecfaeb3cfa6624f2a1978ef23f.json +++ b/core/lib/dal/.sqlx/query-e7d0b7c132b80195dae7cbf50355eb148aa6d1dbd69bf3fe48522101a6ea0bcb.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n WITH\n available_batches AS (\n SELECT\n MAX(number) AS \"last_batch\"\n FROM\n l1_batches\n ),\n processed_batches AS (\n SELECT\n COALESCE(MAX(l1_batch_number), $1) + $2 AS \"last_ready_batch\"\n FROM\n vm_runner_protective_reads\n )\n SELECT\n LEAST(last_batch, last_ready_batch) AS \"last_ready_batch!\"\n FROM\n available_batches\n FULL JOIN processed_batches ON TRUE\n ", + "query": "\n WITH\n available_batches AS (\n SELECT\n MAX(number) AS \"last_batch\"\n FROM\n l1_batches\n ),\n processed_batches AS (\n SELECT\n COALESCE(MAX(l1_batch_number), $1) + $2 AS \"last_ready_batch\"\n FROM\n vm_runner_bwip\n WHERE\n time_taken IS NOT NULL\n )\n SELECT\n LEAST(last_batch, last_ready_batch) AS \"last_ready_batch!\"\n FROM\n available_batches\n FULL JOIN processed_batches ON TRUE\n ", "describe": { "columns": [ { @@ -19,5 +19,5 @@ true ] }, - "hash": "0a2138a1cbf21546931867319ccbfe1e597151ecfaeb3cfa6624f2a1978ef23f" + "hash": "e7d0b7c132b80195dae7cbf50355eb148aa6d1dbd69bf3fe48522101a6ea0bcb" } diff --git a/core/lib/dal/.sqlx/query-6ad9adcbd60483148983a495d0e9b5c09854efaa4c0a35466b138587dce03f25.json b/core/lib/dal/.sqlx/query-ead71ae66fe4685132c03a973612fe98364aa684180dd6fbf540bb0b68d96a64.json similarity index 74% rename from core/lib/dal/.sqlx/query-6ad9adcbd60483148983a495d0e9b5c09854efaa4c0a35466b138587dce03f25.json rename to core/lib/dal/.sqlx/query-ead71ae66fe4685132c03a973612fe98364aa684180dd6fbf540bb0b68d96a64.json index 93d1966f370c..02cd6733e811 100644 --- a/core/lib/dal/.sqlx/query-6ad9adcbd60483148983a495d0e9b5c09854efaa4c0a35466b138587dce03f25.json +++ b/core/lib/dal/.sqlx/query-ead71ae66fe4685132c03a973612fe98364aa684180dd6fbf540bb0b68d96a64.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n WITH\n new_logs AS MATERIALIZED (\n SELECT DISTINCT\n ON (hashed_key) hashed_key,\n miniblock_number,\n operation_number\n FROM\n storage_logs\n WHERE\n miniblock_number BETWEEN $1 AND $2\n ORDER BY\n hashed_key,\n miniblock_number DESC,\n operation_number DESC\n )\n DELETE FROM storage_logs USING new_logs\n WHERE\n storage_logs.hashed_key = new_logs.hashed_key\n AND (storage_logs.miniblock_number, storage_logs.operation_number) < (new_logs.miniblock_number, new_logs.operation_number)\n ", + "query": "\n WITH\n new_logs AS MATERIALIZED (\n SELECT DISTINCT\n ON (hashed_key) hashed_key,\n miniblock_number,\n operation_number\n FROM\n storage_logs\n WHERE\n miniblock_number BETWEEN $1 AND $2\n ORDER BY\n hashed_key,\n miniblock_number DESC,\n operation_number DESC\n )\n DELETE FROM storage_logs USING new_logs\n WHERE\n storage_logs.hashed_key = new_logs.hashed_key\n AND storage_logs.miniblock_number <= $2\n AND (storage_logs.miniblock_number, storage_logs.operation_number) < (new_logs.miniblock_number, new_logs.operation_number)\n ", "describe": { "columns": [], "parameters": { @@ -11,5 +11,5 @@ }, "nullable": [] }, - "hash": "6ad9adcbd60483148983a495d0e9b5c09854efaa4c0a35466b138587dce03f25" + "hash": "ead71ae66fe4685132c03a973612fe98364aa684180dd6fbf540bb0b68d96a64" } diff --git a/core/lib/dal/.sqlx/query-f2f1b6c4f4686b423a4c449c56e1687e91d8367347b3830830a4c76407d60bc5.json b/core/lib/dal/.sqlx/query-f2f1b6c4f4686b423a4c449c56e1687e91d8367347b3830830a4c76407d60bc5.json deleted file mode 100644 index e49cc211cdcd..000000000000 --- a/core/lib/dal/.sqlx/query-f2f1b6c4f4686b423a4c449c56e1687e91d8367347b3830830a4c76407d60bc5.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n vm_runner_protective_reads (l1_batch_number, created_at, updated_at)\n VALUES\n ($1, NOW(), NOW())\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8" - ] - }, - "nullable": [] - }, - "hash": "f2f1b6c4f4686b423a4c449c56e1687e91d8367347b3830830a4c76407d60bc5" -} diff --git a/core/lib/dal/.sqlx/query-f6c0b212fad536f46863ce3a6105249bd77b263c4fcef81689f9dcd155064a36.json b/core/lib/dal/.sqlx/query-f6c0b212fad536f46863ce3a6105249bd77b263c4fcef81689f9dcd155064a36.json new file mode 100644 index 000000000000..f916d0dddcef --- /dev/null +++ b/core/lib/dal/.sqlx/query-f6c0b212fad536f46863ce3a6105249bd77b263c4fcef81689f9dcd155064a36.json @@ -0,0 +1,40 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n number,\n l1_tx_count,\n l2_tx_count,\n timestamp\n FROM\n l1_batches\n WHERE\n eth_commit_tx_id = $1\n OR eth_prove_tx_id = $1\n OR eth_execute_tx_id = $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "number", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "l1_tx_count", + "type_info": "Int4" + }, + { + "ordinal": 2, + "name": "l2_tx_count", + "type_info": "Int4" + }, + { + "ordinal": 3, + "name": "timestamp", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Int4" + ] + }, + "nullable": [ + false, + false, + false, + false + ] + }, + "hash": "f6c0b212fad536f46863ce3a6105249bd77b263c4fcef81689f9dcd155064a36" +} diff --git a/core/lib/dal/.sqlx/query-fe501f86f4bf6c5b8ccc2e039a4eb09b538a67d1c39fda052c4f4ddb23ce0084.json b/core/lib/dal/.sqlx/query-fe501f86f4bf6c5b8ccc2e039a4eb09b538a67d1c39fda052c4f4ddb23ce0084.json deleted file mode 100644 index 5573cdd99530..000000000000 --- a/core/lib/dal/.sqlx/query-fe501f86f4bf6c5b8ccc2e039a4eb09b538a67d1c39fda052c4f4ddb23ce0084.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n l2_to_l1_logs\n FROM\n l1_batches\n WHERE\n number = $1\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "l2_to_l1_logs", - "type_info": "ByteaArray" - } - ], - "parameters": { - "Left": [ - "Int8" - ] - }, - "nullable": [ - false - ] - }, - "hash": "fe501f86f4bf6c5b8ccc2e039a4eb09b538a67d1c39fda052c4f4ddb23ce0084" -} diff --git a/core/lib/dal/Cargo.toml b/core/lib/dal/Cargo.toml index aa1d7097b9ba..c046b3d3b425 100644 --- a/core/lib/dal/Cargo.toml +++ b/core/lib/dal/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "zksync_dal" -version = "0.1.0" +description = "ZKsync data access layer" +version.workspace = true edition.workspace = true authors.workspace = true homepage.workspace = true diff --git a/core/lib/dal/README.md b/core/lib/dal/README.md index 59f4401924ee..cc247733467d 100644 --- a/core/lib/dal/README.md +++ b/core/lib/dal/README.md @@ -83,6 +83,12 @@ invariants are expected to be upheld: - L2 blocks and L1 batches present in the DB form a continuous range of numbers. If a DB is recovered from a node snapshot, the first L2 block / L1 batch is **the next one** after the snapshot L2 block / L1 batch mentioned in the `snapshot_recovery` table. Otherwise, L2 blocks / L1 batches must start from number 0 (aka genesis). +- `address` and `key` fields in the `storage_logs` table are not null for all blocks executed on the node (i.e., blocks + the header of which is present in `miniblocks`). On the other hand, `address` and `key` fields may be null for + snapshot storage logs. These fields are needed for some components post-processing L1 batches, such as the Merkle tree + and the commitment generator. Both use `(address, key)` tuples to sort logs in a batch to get canonical ordering. + Since a snapshot is not post-processed in such a way, it is acceptable to skip them for the snapshot logs (and only + for them). ## Contributing to DAL diff --git a/core/lib/dal/migrations/20240522081114_create_data_availability_table.down.sql b/core/lib/dal/migrations/20240522081114_create_data_availability_table.down.sql new file mode 100644 index 000000000000..b6993d850ea5 --- /dev/null +++ b/core/lib/dal/migrations/20240522081114_create_data_availability_table.down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS data_availability; diff --git a/core/lib/dal/migrations/20240522081114_create_data_availability_table.up.sql b/core/lib/dal/migrations/20240522081114_create_data_availability_table.up.sql new file mode 100644 index 000000000000..037398021da6 --- /dev/null +++ b/core/lib/dal/migrations/20240522081114_create_data_availability_table.up.sql @@ -0,0 +1,13 @@ +CREATE TABLE IF NOT EXISTS data_availability +( + l1_batch_number BIGINT PRIMARY KEY REFERENCES l1_batches (number) ON DELETE CASCADE, + + blob_id TEXT NOT NULL, -- blob here is an abstract term, unrelated to any DA implementation + -- the BYTEA used for this column as the most generic type + -- the actual format of blob identifier and inclusion data is defined by the DA client implementation + inclusion_data BYTEA, + sent_at TIMESTAMP NOT NULL, + + created_at TIMESTAMP NOT NULL, + updated_at TIMESTAMP NOT NULL +); diff --git a/core/lib/dal/migrations/20240611121747_add_base_token_ratio_table.down.sql b/core/lib/dal/migrations/20240611121747_add_base_token_ratio_table.down.sql new file mode 100644 index 000000000000..e64cb3c7c408 --- /dev/null +++ b/core/lib/dal/migrations/20240611121747_add_base_token_ratio_table.down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS base_token_ratios; diff --git a/core/lib/dal/migrations/20240611121747_add_base_token_ratio_table.up.sql b/core/lib/dal/migrations/20240611121747_add_base_token_ratio_table.up.sql new file mode 100644 index 000000000000..f4853e352802 --- /dev/null +++ b/core/lib/dal/migrations/20240611121747_add_base_token_ratio_table.up.sql @@ -0,0 +1,11 @@ +CREATE TABLE base_token_ratios ( + id SERIAL PRIMARY KEY, + created_at TIMESTAMP NOT NULL, + updated_at TIMESTAMP NOT NULL, + + ratio_timestamp TIMESTAMP NOT NULL, + numerator NUMERIC(20,0) NOT NULL, + denominator NUMERIC(20,0) NOT NULL, + + used_in_l1 BOOLEAN NOT NULL DEFAULT FALSE +); diff --git a/core/lib/dal/migrations/20240617103351_make_key_preimage_nullable.down.sql b/core/lib/dal/migrations/20240617103351_make_key_preimage_nullable.down.sql new file mode 100644 index 000000000000..4348c29caef5 --- /dev/null +++ b/core/lib/dal/migrations/20240617103351_make_key_preimage_nullable.down.sql @@ -0,0 +1,3 @@ +ALTER TABLE storage_logs + ALTER COLUMN address SET NOT NULL, + ALTER COLUMN key SET NOT NULL; diff --git a/core/lib/dal/migrations/20240617103351_make_key_preimage_nullable.up.sql b/core/lib/dal/migrations/20240617103351_make_key_preimage_nullable.up.sql new file mode 100644 index 000000000000..18a623c67f56 --- /dev/null +++ b/core/lib/dal/migrations/20240617103351_make_key_preimage_nullable.up.sql @@ -0,0 +1,3 @@ +ALTER TABLE storage_logs + ALTER COLUMN address DROP NOT NULL, + ALTER COLUMN key DROP NOT NULL; diff --git a/core/lib/dal/migrations/20240619143458_add_vm_run_data_blob_url_column.down.sql b/core/lib/dal/migrations/20240619143458_add_vm_run_data_blob_url_column.down.sql new file mode 100644 index 000000000000..1f86ba3bb696 --- /dev/null +++ b/core/lib/dal/migrations/20240619143458_add_vm_run_data_blob_url_column.down.sql @@ -0,0 +1,2 @@ +ALTER TABLE proof_generation_details DROP COLUMN IF EXISTS vm_run_data_blob_url; +DROP TABLE IF EXISTS vm_runner_bwip; diff --git a/core/lib/dal/migrations/20240619143458_add_vm_run_data_blob_url_column.up.sql b/core/lib/dal/migrations/20240619143458_add_vm_run_data_blob_url_column.up.sql new file mode 100644 index 000000000000..1fe90c191411 --- /dev/null +++ b/core/lib/dal/migrations/20240619143458_add_vm_run_data_blob_url_column.up.sql @@ -0,0 +1,10 @@ +ALTER TABLE proof_generation_details + ADD COLUMN IF NOT EXISTS vm_run_data_blob_url TEXT DEFAULT NULL; + +CREATE TABLE IF NOT EXISTS vm_runner_bwip +( + l1_batch_number BIGINT NOT NULL PRIMARY KEY, + created_at TIMESTAMP NOT NULL, + updated_at TIMESTAMP NOT NULL, + time_taken TIME +); diff --git a/core/lib/dal/migrations/20240627142548_l1_batches_consensus.down.sql b/core/lib/dal/migrations/20240627142548_l1_batches_consensus.down.sql new file mode 100644 index 000000000000..45114088eaa8 --- /dev/null +++ b/core/lib/dal/migrations/20240627142548_l1_batches_consensus.down.sql @@ -0,0 +1 @@ +DROP TABLE l1_batches_consensus; diff --git a/core/lib/dal/migrations/20240627142548_l1_batches_consensus.up.sql b/core/lib/dal/migrations/20240627142548_l1_batches_consensus.up.sql new file mode 100644 index 000000000000..71c3854d640e --- /dev/null +++ b/core/lib/dal/migrations/20240627142548_l1_batches_consensus.up.sql @@ -0,0 +1,9 @@ +CREATE TABLE l1_batches_consensus ( + l1_batch_number BIGINT PRIMARY KEY REFERENCES l1_batches (number) ON DELETE CASCADE, + certificate JSONB NOT NULL, + + created_at TIMESTAMP NOT NULL, + updated_at TIMESTAMP NOT NULL, + + CHECK((certificate->'message'->'number')::jsonb::numeric = l1_batch_number) +); diff --git a/core/lib/dal/migrations/20240705164305_protective_reads_add_processing_started_at.down.sql b/core/lib/dal/migrations/20240705164305_protective_reads_add_processing_started_at.down.sql new file mode 100644 index 000000000000..3e13998726f7 --- /dev/null +++ b/core/lib/dal/migrations/20240705164305_protective_reads_add_processing_started_at.down.sql @@ -0,0 +1 @@ +ALTER TABLE vm_runner_protective_reads DROP COLUMN IF EXISTS processing_started_at; diff --git a/core/lib/dal/migrations/20240705164305_protective_reads_add_processing_started_at.up.sql b/core/lib/dal/migrations/20240705164305_protective_reads_add_processing_started_at.up.sql new file mode 100644 index 000000000000..e44b16cae441 --- /dev/null +++ b/core/lib/dal/migrations/20240705164305_protective_reads_add_processing_started_at.up.sql @@ -0,0 +1 @@ +ALTER TABLE vm_runner_protective_reads ADD COLUMN IF NOT EXISTS processing_started_at TIME; diff --git a/core/lib/dal/migrations/20240708152005_bwip_add_processing_started_at.down.sql b/core/lib/dal/migrations/20240708152005_bwip_add_processing_started_at.down.sql new file mode 100644 index 000000000000..86bd163acbc4 --- /dev/null +++ b/core/lib/dal/migrations/20240708152005_bwip_add_processing_started_at.down.sql @@ -0,0 +1 @@ +ALTER TABLE vm_runner_bwip DROP COLUMN IF EXISTS processing_started_at; diff --git a/core/lib/dal/migrations/20240708152005_bwip_add_processing_started_at.up.sql b/core/lib/dal/migrations/20240708152005_bwip_add_processing_started_at.up.sql new file mode 100644 index 000000000000..244e53b1b8c6 --- /dev/null +++ b/core/lib/dal/migrations/20240708152005_bwip_add_processing_started_at.up.sql @@ -0,0 +1 @@ +ALTER TABLE vm_runner_bwip ADD COLUMN IF NOT EXISTS processing_started_at TIME; diff --git a/core/lib/dal/migrations/20240708161016_remove-not-null-proof-gen-data-constraint.down.sql b/core/lib/dal/migrations/20240708161016_remove-not-null-proof-gen-data-constraint.down.sql new file mode 100644 index 000000000000..c92ecac92618 --- /dev/null +++ b/core/lib/dal/migrations/20240708161016_remove-not-null-proof-gen-data-constraint.down.sql @@ -0,0 +1 @@ +ALTER TABLE proof_generation_details ALTER COLUMN proof_gen_data_blob_url SET NOT NULL; diff --git a/core/lib/dal/migrations/20240708161016_remove-not-null-proof-gen-data-constraint.up.sql b/core/lib/dal/migrations/20240708161016_remove-not-null-proof-gen-data-constraint.up.sql new file mode 100644 index 000000000000..8604cec1b689 --- /dev/null +++ b/core/lib/dal/migrations/20240708161016_remove-not-null-proof-gen-data-constraint.up.sql @@ -0,0 +1 @@ +ALTER TABLE proof_generation_details ALTER COLUMN proof_gen_data_blob_url DROP NOT NULL; diff --git a/core/lib/dal/migrations/20240708194915_vm_runner_processing_started_at_timestamp.down.sql b/core/lib/dal/migrations/20240708194915_vm_runner_processing_started_at_timestamp.down.sql new file mode 100644 index 000000000000..9e957f700f43 --- /dev/null +++ b/core/lib/dal/migrations/20240708194915_vm_runner_processing_started_at_timestamp.down.sql @@ -0,0 +1,2 @@ +ALTER TABLE vm_runner_protective_reads ALTER COLUMN processing_started_at TYPE TIME USING (null); +ALTER TABLE vm_runner_bwip ALTER COLUMN processing_started_at TYPE TIME USING (null); diff --git a/core/lib/dal/migrations/20240708194915_vm_runner_processing_started_at_timestamp.up.sql b/core/lib/dal/migrations/20240708194915_vm_runner_processing_started_at_timestamp.up.sql new file mode 100644 index 000000000000..0afcdfe5aecf --- /dev/null +++ b/core/lib/dal/migrations/20240708194915_vm_runner_processing_started_at_timestamp.up.sql @@ -0,0 +1,2 @@ +ALTER TABLE vm_runner_protective_reads ALTER COLUMN processing_started_at TYPE TIMESTAMP USING (null); +ALTER TABLE vm_runner_bwip ALTER COLUMN processing_started_at TYPE TIMESTAMP USING (null); diff --git a/core/lib/dal/src/base_token_dal.rs b/core/lib/dal/src/base_token_dal.rs new file mode 100644 index 000000000000..a8bf51d0c603 --- /dev/null +++ b/core/lib/dal/src/base_token_dal.rs @@ -0,0 +1,61 @@ +use std::num::NonZeroU64; + +use bigdecimal::{BigDecimal, FromPrimitive}; +use zksync_db_connection::{connection::Connection, error::DalResult, instrument::InstrumentExt}; +use zksync_types::base_token_ratio::BaseTokenRatio; + +use crate::{models::storage_base_token_ratio::StorageBaseTokenRatio, Core}; + +#[derive(Debug)] +pub struct BaseTokenDal<'a, 'c> { + pub(crate) storage: &'a mut Connection<'c, Core>, +} + +impl BaseTokenDal<'_, '_> { + pub async fn insert_token_ratio( + &mut self, + numerator: NonZeroU64, + denominator: NonZeroU64, + ratio_timestamp: &chrono::NaiveDateTime, + ) -> DalResult { + let row = sqlx::query!( + r#" + INSERT INTO + base_token_ratios (numerator, denominator, ratio_timestamp, created_at, updated_at) + VALUES + ($1, $2, $3, NOW(), NOW()) + RETURNING + id + "#, + BigDecimal::from_u64(numerator.get()), + BigDecimal::from_u64(denominator.get()), + ratio_timestamp, + ) + .instrument("insert_token_ratio") + .fetch_one(self.storage) + .await?; + + Ok(row.id as usize) + } + + pub async fn get_latest_ratio(&mut self) -> DalResult> { + let row = sqlx::query_as!( + StorageBaseTokenRatio, + r#" + SELECT + * + FROM + base_token_ratios + ORDER BY + ratio_timestamp DESC + LIMIT + 1 + "#, + ) + .instrument("get_latest_ratio") + .fetch_optional(self.storage) + .await?; + + Ok(row.map(|r| r.into())) + } +} diff --git a/core/lib/dal/src/blocks_dal.rs b/core/lib/dal/src/blocks_dal.rs index 94d3b3372d93..4f4b3e99ff7b 100644 --- a/core/lib/dal/src/blocks_dal.rs +++ b/core/lib/dal/src/blocks_dal.rs @@ -15,9 +15,13 @@ use zksync_db_connection::{ }; use zksync_types::{ aggregated_operations::AggregatedActionType, - block::{BlockGasCount, L1BatchHeader, L1BatchTreeData, L2BlockHeader, StorageOracleInfo}, + block::{ + BlockGasCount, L1BatchHeader, L1BatchStatistics, L1BatchTreeData, L2BlockHeader, + StorageOracleInfo, + }, circuit::CircuitStatistic, commitment::{L1BatchCommitmentArtifacts, L1BatchWithMetadata}, + l2_to_l1_log::UserL2ToL1Log, writes::TreeWrite, Address, L1BatchNumber, L2BlockNumber, ProtocolVersionId, H256, U256, }; @@ -27,6 +31,7 @@ use crate::{ models::{ parse_protocol_version, storage_block::{StorageL1Batch, StorageL1BatchHeader, StorageL2BlockHeader}, + storage_event::StorageL2ToL1Log, storage_oracle_info::DbStorageOracleInfo, }, Core, CoreDal, @@ -102,7 +107,7 @@ impl BlocksDal<'_, '_> { l1_batches "# ) - .instrument("get_sealed_block_number") + .instrument("get_sealed_l1_batch_number") .report_latency() .fetch_one(self.storage) .await?; @@ -158,7 +163,7 @@ impl BlocksDal<'_, '_> { hash IS NOT NULL "# ) - .instrument("get_last_block_number_with_tree_data") + .instrument("get_last_l1_batch_number_with_tree_data") .report_latency() .fetch_one(self.storage) .await?; @@ -245,28 +250,17 @@ impl BlocksDal<'_, '_> { Ok(row.number.map(|num| L1BatchNumber(num as u32))) } - pub async fn get_l1_batches_for_eth_tx_id( + pub async fn get_l1_batches_statistics_for_eth_tx_id( &mut self, eth_tx_id: u32, - ) -> DalResult> { - let l1_batches = sqlx::query_as!( - StorageL1BatchHeader, + ) -> DalResult> { + Ok(sqlx::query!( r#" SELECT number, l1_tx_count, l2_tx_count, - timestamp, - l2_to_l1_logs, - l2_to_l1_messages, - bloom, - priority_ops_onchain_data, - used_contract_hashes, - bootloader_code_hash, - default_aa_code_hash, - protocol_version, - system_logs, - pubdata_input + timestamp FROM l1_batches WHERE @@ -276,12 +270,18 @@ impl BlocksDal<'_, '_> { "#, eth_tx_id as i32 ) - .instrument("get_l1_batches_for_eth_tx_id") + .instrument("get_l1_batch_statistics_for_eth_tx_id") .with_arg("eth_tx_id", ð_tx_id) .fetch_all(self.storage) - .await?; - - Ok(l1_batches.into_iter().map(Into::into).collect()) + .await? + .into_iter() + .map(|row| L1BatchStatistics { + number: L1BatchNumber(row.number as u32), + timestamp: row.timestamp as u64, + l2_tx_count: row.l2_tx_count as u32, + l1_tx_count: row.l1_tx_count as u32, + }) + .collect()) } async fn get_storage_l1_batch( @@ -300,7 +300,6 @@ impl BlocksDal<'_, '_> { priority_ops_onchain_data, hash, commitment, - l2_to_l1_logs, l2_to_l1_messages, used_contract_hashes, compressed_initial_writes, @@ -337,7 +336,7 @@ impl BlocksDal<'_, '_> { &mut self, number: L1BatchNumber, ) -> DalResult> { - Ok(sqlx::query_as!( + let storage_l1_batch_header = sqlx::query_as!( StorageL1BatchHeader, r#" SELECT @@ -345,7 +344,6 @@ impl BlocksDal<'_, '_> { l1_tx_count, l2_tx_count, timestamp, - l2_to_l1_logs, l2_to_l1_messages, bloom, priority_ops_onchain_data, @@ -365,8 +363,18 @@ impl BlocksDal<'_, '_> { .instrument("get_l1_batch_header") .with_arg("number", &number) .fetch_optional(self.storage) - .await? - .map(Into::into)) + .await?; + + if let Some(storage_l1_batch_header) = storage_l1_batch_header { + let l2_to_l1_logs = self + .get_l2_to_l1_logs_for_batch::(number) + .await?; + return Ok(Some( + storage_l1_batch_header.into_l1_batch_header_with_logs(l2_to_l1_logs), + )); + } + + Ok(None) } /// Returns initial bootloader heap content for the specified L1 batch. @@ -555,11 +563,6 @@ impl BlocksDal<'_, '_> { .iter() .map(|data| data.clone().into()) .collect(); - let l2_to_l1_logs: Vec<_> = header - .l2_to_l1_logs - .iter() - .map(|log| log.0.to_bytes().to_vec()) - .collect(); let system_logs = header .system_logs .iter() @@ -581,7 +584,6 @@ impl BlocksDal<'_, '_> { l1_tx_count, l2_tx_count, timestamp, - l2_to_l1_logs, l2_to_l1_messages, bloom, priority_ops_onchain_data, @@ -623,7 +625,6 @@ impl BlocksDal<'_, '_> { $18, $19, $20, - $21, NOW(), NOW() ) @@ -632,7 +633,6 @@ impl BlocksDal<'_, '_> { i32::from(header.l1_tx_count), i32::from(header.l2_tx_count), header.timestamp as i64, - &l2_to_l1_logs, &header.l2_to_l1_messages, header.bloom.as_bytes(), &priority_onchain_data, @@ -988,8 +988,8 @@ impl BlocksDal<'_, '_> { pub async fn get_last_committed_to_eth_l1_batch( &mut self, ) -> DalResult> { - // We can get 0 block for the first transaction - let block = sqlx::query_as!( + // We can get 0 batch for the first transaction + let batch = sqlx::query_as!( StorageL1Batch, r#" SELECT @@ -1001,7 +1001,6 @@ impl BlocksDal<'_, '_> { priority_ops_onchain_data, hash, commitment, - l2_to_l1_logs, l2_to_l1_messages, used_contract_hashes, compressed_initial_writes, @@ -1036,12 +1035,12 @@ impl BlocksDal<'_, '_> { .instrument("get_last_committed_to_eth_l1_batch") .fetch_one(self.storage) .await?; - // genesis block is first generated without commitment, we should wait for the tree to set it. - if block.commitment.is_none() { + // genesis batch is first generated without commitment, we should wait for the tree to set it. + if batch.commitment.is_none() { return Ok(None); } - self.map_storage_l1_batch(block).await + self.map_storage_l1_batch(batch).await } /// Returns the number of the last L1 batch for which an Ethereum commit tx was sent and confirmed. @@ -1182,7 +1181,6 @@ impl BlocksDal<'_, '_> { priority_ops_onchain_data, hash, commitment, - l2_to_l1_logs, l2_to_l1_messages, used_contract_hashes, compressed_initial_writes, @@ -1228,16 +1226,16 @@ impl BlocksDal<'_, '_> { &mut self, raw_batches: Vec, ) -> anyhow::Result> { - let mut l1_batches = Vec::with_capacity(raw_batches.len()); + let mut l1_batches_with_metadata = Vec::with_capacity(raw_batches.len()); for raw_batch in raw_batches { - let block = self + let batch = self .map_storage_l1_batch(raw_batch) .await - .context("get_l1_batch_with_metadata()")? - .context("Block should be complete")?; - l1_batches.push(block); + .context("map_storage_l1_batch()")? + .context("Batch should be complete")?; + l1_batches_with_metadata.push(batch); } - Ok(l1_batches) + Ok(l1_batches_with_metadata) } /// This method returns batches that are committed on L1 and witness jobs for them are skipped. @@ -1245,12 +1243,12 @@ impl BlocksDal<'_, '_> { &mut self, limit: usize, ) -> anyhow::Result> { - let last_proved_block_number = self + let last_proved_batch_number = self .get_last_l1_batch_with_prove_tx() .await .context("get_last_l1_batch_with_prove_tx()")?; // Witness jobs can be processed out of order, so `WHERE l1_batches.number - row_number = $1` - // is used to avoid having gaps in the list of blocks to send dummy proofs for. + // is used to avoid having gaps in the list of batches to send dummy proofs for. let raw_batches = sqlx::query_as!( StorageL1Batch, r#" @@ -1263,7 +1261,6 @@ impl BlocksDal<'_, '_> { priority_ops_onchain_data, hash, commitment, - l2_to_l1_logs, l2_to_l1_messages, used_contract_hashes, compressed_initial_writes, @@ -1305,7 +1302,7 @@ impl BlocksDal<'_, '_> { WHERE number - ROW_NUMBER = $1 "#, - last_proved_block_number.0 as i32, + last_proved_batch_number.0 as i32, limit as i32 ) .instrument("get_skipped_for_proof_l1_batches") @@ -1337,7 +1334,6 @@ impl BlocksDal<'_, '_> { priority_ops_onchain_data, hash, commitment, - l2_to_l1_logs, l2_to_l1_messages, used_contract_hashes, compressed_initial_writes, @@ -1447,10 +1443,10 @@ impl BlocksDal<'_, '_> { .fetch_one(self.storage.conn()) .await?; - Ok(if let Some(max_ready_to_send_block) = row.max { - // If we found at least one ready to execute batch then we can simply return all blocks between - // the expected started point and the max ready to send block because we send them to the L1 sequentially. - assert!(max_ready_to_send_block >= expected_started_point); + Ok(if let Some(max_ready_to_send_batch) = row.max { + // If we found at least one ready to execute batch then we can simply return all batches between + // the expected started point and the max ready to send batch because we send them to the L1 sequentially. + assert!(max_ready_to_send_batch >= expected_started_point); sqlx::query_as!( StorageL1Batch, r#" @@ -1463,7 +1459,6 @@ impl BlocksDal<'_, '_> { priority_ops_onchain_data, hash, commitment, - l2_to_l1_logs, l2_to_l1_messages, used_contract_hashes, compressed_initial_writes, @@ -1493,13 +1488,13 @@ impl BlocksDal<'_, '_> { $3 "#, expected_started_point as i32, - max_ready_to_send_block, + max_ready_to_send_batch, limit as i32, ) .instrument("get_ready_for_execute_l1_batches") .with_arg( "numbers", - &(expected_started_point..=max_ready_to_send_block), + &(expected_started_point..=max_ready_to_send_batch), ) .with_arg("limit", &limit) .fetch_all(self.storage) @@ -1528,7 +1523,6 @@ impl BlocksDal<'_, '_> { priority_ops_onchain_data, hash, commitment, - l2_to_l1_logs, l2_to_l1_messages, used_contract_hashes, compressed_initial_writes, @@ -1584,12 +1578,16 @@ impl BlocksDal<'_, '_> { .context("map_l1_batches()") } + /// When `with_da_inclusion_info` is true, only batches for which custom DA inclusion + /// information has already been provided will be included pub async fn get_ready_for_commit_l1_batches( &mut self, limit: usize, bootloader_hash: H256, default_aa_hash: H256, protocol_version_id: ProtocolVersionId, + + with_da_inclusion_info: bool, ) -> anyhow::Result> { let raw_batches = sqlx::query_as!( StorageL1Batch, @@ -1603,7 +1601,6 @@ impl BlocksDal<'_, '_> { priority_ops_onchain_data, hash, commitment, - l2_to_l1_logs, l2_to_l1_messages, used_contract_hashes, compressed_initial_writes, @@ -1625,6 +1622,7 @@ impl BlocksDal<'_, '_> { FROM l1_batches LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number + LEFT JOIN data_availability ON data_availability.l1_batch_number = l1_batches.number JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version WHERE eth_commit_tx_id IS NULL @@ -1638,14 +1636,19 @@ impl BlocksDal<'_, '_> { ) AND events_queue_commitment IS NOT NULL AND bootloader_initial_content_commitment IS NOT NULL + AND ( + data_availability.inclusion_data IS NOT NULL + OR $4 IS FALSE + ) ORDER BY number LIMIT - $4 + $5 "#, bootloader_hash.as_bytes(), default_aa_hash.as_bytes(), protocol_version_id as i32, + with_da_inclusion_info, limit as i64, ) .instrument("get_ready_for_commit_l1_batches") @@ -1653,6 +1656,7 @@ impl BlocksDal<'_, '_> { .with_arg("bootloader_hash", &bootloader_hash) .with_arg("default_aa_hash", &default_aa_hash) .with_arg("protocol_version_id", &protocol_version_id) + .with_arg("with_da_inclusion_info", &with_da_inclusion_info) .fetch_all(self.storage) .await?; @@ -1732,8 +1736,14 @@ impl BlocksDal<'_, '_> { let Some(l1_batch) = self.get_storage_l1_batch(number).await? else { return Ok(None); }; + + let l2_to_l1_logs = self + .get_l2_to_l1_logs_for_batch::(number) + .await?; Ok(Some(L1BatchWithOptionalMetadata { - header: l1_batch.clone().into(), + header: l1_batch + .clone() + .into_l1_batch_header_with_logs(l2_to_l1_logs), metadata: l1_batch.try_into(), })) } @@ -1774,10 +1784,19 @@ impl BlocksDal<'_, '_> { let unsorted_factory_deps = self .get_l1_batch_factory_deps(L1BatchNumber(storage_batch.number as u32)) .await?; - let header: L1BatchHeader = storage_batch.clone().into(); - let Ok(metadata) = storage_batch.try_into() else { + + let l2_to_l1_logs = self + .get_l2_to_l1_logs_for_batch::(L1BatchNumber( + storage_batch.number as u32, + )) + .await?; + + let Ok(metadata) = storage_batch.clone().try_into() else { return Ok(None); }; + + let header: L1BatchHeader = storage_batch.into_l1_batch_header_with_logs(l2_to_l1_logs); + let raw_published_bytecode_hashes = self .storage .events_dal() @@ -2273,6 +2292,48 @@ impl BlocksDal<'_, '_> { .map(|row| row.tree_writes_are_present) .unwrap_or(false)) } + + pub(crate) async fn get_l2_to_l1_logs_for_batch( + &mut self, + l1_batch_number: L1BatchNumber, + ) -> DalResult> + where + L: From, + { + let results = sqlx::query_as!( + StorageL2ToL1Log, + r#" + SELECT + miniblock_number, + log_index_in_miniblock, + log_index_in_tx, + tx_hash, + l1_batch_number, + shard_id, + is_service, + tx_index_in_miniblock, + tx_index_in_l1_batch, + sender, + key, + value + FROM + l2_to_l1_logs + JOIN miniblocks ON l2_to_l1_logs.miniblock_number = miniblocks.number + WHERE + l1_batch_number = $1 + ORDER BY + miniblock_number, + log_index_in_miniblock + "#, + i64::from(l1_batch_number.0) + ) + .instrument("get_l2_to_l1_logs_by_number") + .with_arg("l1_batch_number", &l1_batch_number) + .fetch_all(self.storage) + .await?; + + Ok(results.into_iter().map(L::from).collect()) + } } /// These methods should only be used for tests. @@ -2360,13 +2421,13 @@ impl BlocksDal<'_, '_> { #[cfg(test)] mod tests { use zksync_contracts::BaseSystemContractsHashes; - use zksync_types::{ - l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, - Address, ProtocolVersion, ProtocolVersionId, - }; + use zksync_types::{tx::IncludedTxLocation, Address, ProtocolVersion, ProtocolVersionId}; use super::*; - use crate::{ConnectionPool, Core, CoreDal}; + use crate::{ + tests::{create_l1_batch_header, create_l2_block_header, create_l2_to_l1_log}, + ConnectionPool, Core, CoreDal, + }; async fn save_mock_eth_tx(action_type: AggregatedActionType, conn: &mut Connection<'_, Core>) { conn.eth_sender_dal() @@ -2376,31 +2437,23 @@ mod tests { } fn mock_l1_batch_header() -> L1BatchHeader { - let mut header = L1BatchHeader::new( - L1BatchNumber(1), - 100, - BaseSystemContractsHashes { - bootloader: H256::repeat_byte(1), - default_aa: H256::repeat_byte(42), - }, - ProtocolVersionId::latest(), - ); + let mut header = create_l1_batch_header(1); header.l1_tx_count = 3; header.l2_tx_count = 5; - header.l2_to_l1_logs.push(UserL2ToL1Log(L2ToL1Log { - shard_id: 0, - is_service: false, - tx_number_in_block: 2, - sender: Address::repeat_byte(2), - key: H256::repeat_byte(3), - value: H256::zero(), - })); + header.l2_to_l1_logs.push(create_l2_to_l1_log(0, 0)); header.l2_to_l1_messages.push(vec![22; 22]); header.l2_to_l1_messages.push(vec![33; 33]); header } + async fn insert_mock_l1_batch_header(conn: &mut Connection<'_, Core>, header: &L1BatchHeader) { + conn.blocks_dal() + .insert_mock_l1_batch(header) + .await + .unwrap(); + } + #[tokio::test] async fn set_tx_id_works_correctly() { let pool = ConnectionPool::::test_pool().await; @@ -2411,10 +2464,9 @@ mod tests { .await .unwrap(); - conn.blocks_dal() - .insert_mock_l1_batch(&mock_l1_batch_header()) - .await - .unwrap(); + let header = mock_l1_batch_header(); + + insert_mock_l1_batch_header(&mut conn, &header).await; save_mock_eth_tx(AggregatedActionType::Commit, &mut conn).await; save_mock_eth_tx(AggregatedActionType::PublishProofOnchain, &mut conn).await; @@ -2485,6 +2537,7 @@ mod tests { async fn loading_l1_batch_header() { let pool = ConnectionPool::::test_pool().await; let mut conn = pool.connection().await.unwrap(); + conn.protocol_versions_dal() .save_protocol_version_with_tx(&ProtocolVersion::default()) .await @@ -2492,8 +2545,30 @@ mod tests { let header = mock_l1_batch_header(); + insert_mock_l1_batch_header(&mut conn, &header).await; + + let l2_block_header = create_l2_block_header(1); + conn.blocks_dal() - .insert_mock_l1_batch(&header) + .insert_l2_block(&l2_block_header) + .await + .unwrap(); + + conn.blocks_dal() + .mark_l2_blocks_as_executed_in_l1_batch(L1BatchNumber(1)) + .await + .unwrap(); + + let first_location = IncludedTxLocation { + tx_hash: H256([1; 32]), + tx_index_in_l2_block: 0, + tx_initiator_address: Address::repeat_byte(2), + }; + let first_logs = [create_l2_to_l1_log(0, 0)]; + + let all_logs = vec![(first_location, first_logs.iter().collect())]; + conn.events_dal() + .save_user_l2_to_l1_logs(L2BlockNumber(1), &all_logs) .await .unwrap(); @@ -2503,6 +2578,7 @@ mod tests { .await .unwrap() .unwrap(); + assert_eq!(loaded_header.number, header.number); assert_eq!(loaded_header.timestamp, header.timestamp); assert_eq!(loaded_header.l1_tx_count, header.l1_tx_count); diff --git a/core/lib/dal/src/blocks_web3_dal.rs b/core/lib/dal/src/blocks_web3_dal.rs index 1c7f912728cc..2957701f9e23 100644 --- a/core/lib/dal/src/blocks_web3_dal.rs +++ b/core/lib/dal/src/blocks_web3_dal.rs @@ -21,7 +21,7 @@ use crate::{ }, storage_transaction::CallTrace, }, - Core, + Core, CoreDal, }; #[derive(Debug)] @@ -271,6 +271,24 @@ impl BlocksWeb3Dal<'_, '_> { api::BlockId::Number(api::BlockNumber::Latest | api::BlockNumber::Committed) => ( "SELECT MAX(number) AS number FROM miniblocks"; ), + api::BlockId::Number(api::BlockNumber::L1Committed) => ( + " + SELECT COALESCE( + ( + SELECT MAX(number) FROM miniblocks + WHERE l1_batch_number = ( + SELECT number FROM l1_batches + JOIN eth_txs ON + l1_batches.eth_commit_tx_id = eth_txs.id + WHERE + eth_txs.confirmed_eth_tx_history_id IS NOT NULL + ORDER BY number DESC LIMIT 1 + ) + ), + 0 + ) AS number + "; + ), api::BlockId::Number(api::BlockNumber::Finalized) => ( " SELECT COALESCE( @@ -406,28 +424,10 @@ impl BlocksWeb3Dal<'_, '_> { &mut self, l1_batch_number: L1BatchNumber, ) -> DalResult> { - let raw_logs = sqlx::query!( - r#" - SELECT - l2_to_l1_logs - FROM - l1_batches - WHERE - number = $1 - "#, - i64::from(l1_batch_number.0) - ) - .instrument("get_l2_to_l1_logs") - .with_arg("l1_batch_number", &l1_batch_number) - .fetch_optional(self.storage) - .await? - .map(|row| row.l2_to_l1_logs) - .unwrap_or_default(); - - Ok(raw_logs - .into_iter() - .map(|bytes| L2ToL1Log::from_slice(&bytes)) - .collect()) + self.storage + .blocks_dal() + .get_l2_to_l1_logs_for_batch::(l1_batch_number) + .await } pub async fn get_l1_batch_number_of_l2_block( @@ -733,6 +733,7 @@ impl BlocksWeb3Dal<'_, '_> { #[cfg(test)] mod tests { use zksync_types::{ + aggregated_operations::AggregatedActionType, block::{L2BlockHasher, L2BlockHeader}, fee::TransactionExecutionMetrics, Address, L2BlockNumber, ProtocolVersion, ProtocolVersionId, @@ -741,8 +742,8 @@ mod tests { use super::*; use crate::{ tests::{ - create_l2_block_header, create_snapshot_recovery, mock_execution_result, - mock_l2_transaction, + create_l1_batch_header, create_l2_block_header, create_snapshot_recovery, + mock_execution_result, mock_l2_transaction, }, ConnectionPool, Core, CoreDal, }; @@ -902,6 +903,79 @@ mod tests { assert_eq!(l2_block_number, Some(L2BlockNumber(43))); } + #[tokio::test] + async fn resolving_l1_committed_block_id() { + let connection_pool = ConnectionPool::::test_pool().await; + let mut conn = connection_pool.connection().await.unwrap(); + conn.protocol_versions_dal() + .save_protocol_version_with_tx(&ProtocolVersion::default()) + .await + .unwrap(); + + let l2_block_header = create_l2_block_header(1); + conn.blocks_dal() + .insert_l2_block(&l2_block_header) + .await + .unwrap(); + + let l1_batch_header = create_l1_batch_header(0); + + conn.blocks_dal() + .insert_mock_l1_batch(&l1_batch_header) + .await + .unwrap(); + conn.blocks_dal() + .mark_l2_blocks_as_executed_in_l1_batch(l1_batch_header.number) + .await + .unwrap(); + + let resolved_l2_block_number = conn + .blocks_web3_dal() + .resolve_block_id(api::BlockId::Number(api::BlockNumber::L1Committed)) + .await + .unwrap(); + assert_eq!(resolved_l2_block_number, Some(L2BlockNumber(0))); + + let mocked_commit_eth_tx = conn + .eth_sender_dal() + .save_eth_tx( + 0, + vec![], + AggregatedActionType::Commit, + Address::default(), + 0, + None, + None, + ) + .await + .unwrap(); + let tx_hash = H256::random(); + conn.eth_sender_dal() + .insert_tx_history(mocked_commit_eth_tx.id, 0, 0, None, tx_hash, &[], 0) + .await + .unwrap(); + conn.eth_sender_dal() + .confirm_tx(tx_hash, U256::zero()) + .await + .unwrap(); + conn.blocks_dal() + .set_eth_tx_id( + l1_batch_header.number..=l1_batch_header.number, + mocked_commit_eth_tx.id, + AggregatedActionType::Commit, + ) + .await + .unwrap(); + + let resolved_l2_block_number = conn + .blocks_web3_dal() + .resolve_block_id(api::BlockId::Number(api::BlockNumber::L1Committed)) + .await + .unwrap(); + + assert_eq!(resolved_l2_block_number, Some(l2_block_header.number)); + } + #[tokio::test] async fn resolving_block_by_hash() { let connection_pool = ConnectionPool::::test_pool().await; diff --git a/core/lib/dal/src/consensus_dal.rs b/core/lib/dal/src/consensus_dal.rs index f2742cbedd8c..7655abbe230c 100644 --- a/core/lib/dal/src/consensus_dal.rs +++ b/core/lib/dal/src/consensus_dal.rs @@ -1,11 +1,10 @@ -use std::ops; - use anyhow::Context as _; -use zksync_consensus_roles::validator; -use zksync_consensus_storage::ReplicaState; +use bigdecimal::Zero; +use zksync_consensus_roles::{attester, validator}; +use zksync_consensus_storage::{BlockStoreState, ReplicaState}; use zksync_db_connection::{ connection::Connection, - error::{DalResult, SqlxContext}, + error::{DalError, DalResult, SqlxContext}, instrument::{InstrumentExt, Instrumented}, }; use zksync_types::L2BlockNumber; @@ -19,6 +18,19 @@ pub struct ConsensusDal<'a, 'c> { pub storage: &'a mut Connection<'c, Core>, } +/// Error returned by `ConsensusDal::insert_certificate()`. +#[derive(thiserror::Error, Debug)] +pub enum InsertCertificateError { + #[error("corresponding payload is missing")] + MissingPayload, + #[error("certificate doesn't match the payload")] + PayloadMismatch, + #[error(transparent)] + Dal(#[from] DalError), + #[error(transparent)] + Other(#[from] anyhow::Error), +} + impl ConsensusDal<'_, '_> { /// Fetches genesis. pub async fn genesis(&mut self) -> DalResult> { @@ -85,14 +97,16 @@ impl ConsensusDal<'_, '_> { DELETE FROM miniblocks_consensus "# ) - .execute(txn.conn()) + .instrument("try_update_genesis#DELETE FROM miniblock_consensus") + .execute(&mut txn) .await?; sqlx::query!( r#" DELETE FROM consensus_replica_state "# ) - .execute(txn.conn()) + .instrument("try_update_genesis#DELETE FROM consensus_replica_state") + .execute(&mut txn) .await?; sqlx::query!( r#" @@ -104,32 +118,13 @@ impl ConsensusDal<'_, '_> { genesis, state, ) - .execute(txn.conn()) + .instrument("try_update_genesis#INSERT INTO consenuss_replica_state") + .execute(&mut txn) .await?; txn.commit().await?; Ok(()) } - /// Fetches the range of L2 blocks present in storage. - /// If storage was recovered from snapshot, the range doesn't need to start at 0. - pub async fn block_range(&mut self) -> DalResult> { - let mut txn = self.storage.start_transaction().await?; - let snapshot = txn - .snapshot_recovery_dal() - .get_applied_snapshot_status() - .await?; - // `snapshot.l2_block_number` indicates the last block processed. - // This block is NOT present in storage. Therefore, the first block - // that will appear in storage is `snapshot.l2_block_number + 1`. - let start = validator::BlockNumber(snapshot.map_or(0, |s| s.l2_block_number.0 + 1).into()); - let end = txn - .blocks_dal() - .get_sealed_l2_block_number() - .await? - .map_or(start, |last| validator::BlockNumber(last.0.into()).next()); - Ok(start..end) - } - /// [Main node only] creates a new consensus fork starting at /// the last sealed L2 block. Resets the state of the consensus /// by calling `try_update_genesis()`. @@ -142,19 +137,18 @@ impl ConsensusDal<'_, '_> { let Some(old) = txn.consensus_dal().genesis().await.context("genesis()")? else { return Ok(()); }; - let first_block = txn - .consensus_dal() - .block_range() - .await - .context("get_block_range()")? - .end; let new = validator::GenesisRaw { chain_id: old.chain_id, fork_number: old.fork_number.next(), - first_block, + first_block: txn + .consensus_dal() + .next_block() + .await + .context("next_block()")?, protocol_version: old.protocol_version, - committee: old.committee.clone(), + validators: old.validators.clone(), + attesters: old.attesters.clone(), leader_selection: old.leader_selection.clone(), } .with_hash(); @@ -196,68 +190,90 @@ impl ConsensusDal<'_, '_> { state_json ) .instrument("set_replica_state") + .report_latency() .with_arg("state.view", &state.view) .execute(self.storage) .await?; Ok(()) } - /// Fetches the first consensus certificate. - /// It might NOT be the certificate for the first L2 block: - /// see `validator::Genesis.first_block`. - pub async fn first_certificate(&mut self) -> DalResult> { - sqlx::query!( - r#" - SELECT - certificate - FROM - miniblocks_consensus - ORDER BY - number ASC - LIMIT - 1 - "# - ) - .try_map(|row| { - zksync_protobuf::serde::deserialize(row.certificate).decode_column("certificate") + /// First block that should be in storage. + async fn first_block(&mut self) -> anyhow::Result { + let info = self + .storage + .pruning_dal() + .get_pruning_info() + .await + .context("get_pruning_info()")?; + Ok(match info.last_soft_pruned_l2_block { + // It is guaranteed that pruning info values are set for storage recovered from + // snapshot, even if pruning was not enabled. + Some(last_pruned) => validator::BlockNumber(last_pruned.0.into()) + 1, + // No snapshot and no pruning: + None => validator::BlockNumber(0), }) - .instrument("first_certificate") - .fetch_optional(self.storage) - .await + } + + /// Next block that should be inserted to storage. + pub async fn next_block(&mut self) -> anyhow::Result { + if let Some(last) = self + .storage + .blocks_dal() + .get_sealed_l2_block_number() + .await + .context("get_sealed_l2_block_number()")? + { + return Ok(validator::BlockNumber(last.0.into()) + 1); + } + let next = self + .storage + .consensus_dal() + .first_block() + .await + .context("first_block()")?; + Ok(next) } /// Fetches the last consensus certificate. /// Currently, certificates are NOT generated synchronously with L2 blocks, /// so it might NOT be the certificate for the last L2 block. - pub async fn last_certificate(&mut self) -> DalResult> { - sqlx::query!( + pub async fn block_certificates_range(&mut self) -> anyhow::Result { + // It cannot be older than genesis first block. + let mut start = self.genesis().await?.context("genesis()")?.first_block; + start = start.max(self.first_block().await.context("first_block()")?); + let row = sqlx::query!( r#" SELECT certificate FROM miniblocks_consensus + WHERE + number >= $1 ORDER BY number DESC LIMIT 1 - "# + "#, + i64::try_from(start.0)?, ) - .try_map(|row| { - zksync_protobuf::serde::deserialize(row.certificate).decode_column("certificate") - }) - .instrument("last_certificate") + .instrument("block_certificate_range") + .report_latency() .fetch_optional(self.storage) - .await + .await?; + Ok(BlockStoreState { + first: start, + last: row + .map(|row| zksync_protobuf::serde::deserialize(row.certificate)) + .transpose()?, + }) } /// Fetches the consensus certificate for the L2 block with the given `block_number`. - pub async fn certificate( + pub async fn block_certificate( &mut self, block_number: validator::BlockNumber, - ) -> DalResult> { - let instrumentation = - Instrumented::new("certificate").with_arg("block_number", &block_number); - let query = sqlx::query!( + ) -> anyhow::Result> { + let Some(row) = sqlx::query!( r#" SELECT certificate @@ -266,17 +282,42 @@ impl ConsensusDal<'_, '_> { WHERE number = $1 "#, - i64::try_from(block_number.0) - .map_err(|err| { instrumentation.arg_error("block_number", err) })? + i64::try_from(block_number.0)? ) - .try_map(|row| { - zksync_protobuf::serde::deserialize(row.certificate).decode_column("certificate") - }); + .instrument("block_certificate") + .report_latency() + .fetch_optional(self.storage) + .await? + else { + return Ok(None); + }; + Ok(Some(zksync_protobuf::serde::deserialize(row.certificate)?)) + } - instrumentation - .with(query) - .fetch_optional(self.storage) - .await + /// Fetches the attester certificate for the L1 batch with the given `batch_number`. + pub async fn batch_certificate( + &mut self, + batch_number: attester::BatchNumber, + ) -> anyhow::Result> { + let Some(row) = sqlx::query!( + r#" + SELECT + certificate + FROM + l1_batches_consensus + WHERE + l1_batch_number = $1 + "#, + i64::try_from(batch_number.0)? + ) + .instrument("batch_certificate") + .report_latency() + .fetch_optional(self.storage) + .await? + else { + return Ok(None); + }; + Ok(Some(zksync_protobuf::serde::deserialize(row.certificate)?)) } /// Fetches a range of L2 blocks from storage and converts them to `Payload`s. @@ -329,34 +370,23 @@ impl ConsensusDal<'_, '_> { .next()) } - /// Inserts a certificate for the L2 block `cert.header().number`. It verifies that - /// - /// - the certified payload matches the L2 block in storage - /// - the `cert.header().parent` matches the parent L2 block. - /// - the parent block already has a certificate. - /// - /// NOTE: This is an extra secure way of storing a certificate, - /// which will help us to detect bugs in the consensus implementation - /// while it is "fresh". If it turns out to take too long, - /// we can remove the verification checks later. - pub async fn insert_certificate(&mut self, cert: &validator::CommitQC) -> anyhow::Result<()> { + /// Inserts a certificate for the L2 block `cert.header().number`. + /// Fails if certificate doesn't match the stored block. + pub async fn insert_block_certificate( + &mut self, + cert: &validator::CommitQC, + ) -> Result<(), InsertCertificateError> { + use InsertCertificateError as E; let header = &cert.message.proposal; let mut txn = self.storage.start_transaction().await?; - if let Some(last) = txn.consensus_dal().last_certificate().await? { - anyhow::ensure!( - last.header().number.next() == header.number, - "expected certificate for a block after the current head block" - ); - } let want_payload = txn .consensus_dal() .block_payload(cert.message.proposal.number) .await? - .context("corresponding L2 block is missing")?; - anyhow::ensure!( - header.payload == want_payload.encode().hash(), - "consensus block payload doesn't match the L2 block" - ); + .ok_or(E::MissingPayload)?; + if header.payload != want_payload.encode().hash() { + return Err(E::PayloadMismatch); + } sqlx::query!( r#" INSERT INTO @@ -367,20 +397,83 @@ impl ConsensusDal<'_, '_> { header.number.0 as i64, zksync_protobuf::serde::serialize(cert, serde_json::value::Serializer).unwrap(), ) - .execute(txn.conn()) + .instrument("insert_block_certificate") + .report_latency() + .execute(&mut txn) .await?; - txn.commit().await?; + txn.commit().await.context("commit")?; Ok(()) } + + /// Inserts a certificate for the L1 batch. + /// + /// Insertion is allowed even if it creates gaps in the L1 batch history. + /// + /// This method assumes that all payload validation has been carried out by the caller. + pub async fn insert_batch_certificate( + &mut self, + cert: &attester::BatchQC, + ) -> Result<(), InsertCertificateError> { + let l1_batch_number = cert.message.number.0 as i64; + + let res = sqlx::query!( + r#" + INSERT INTO + l1_batches_consensus (l1_batch_number, certificate, created_at, updated_at) + VALUES + ($1, $2, NOW(), NOW()) + ON CONFLICT (l1_batch_number) DO NOTHING + "#, + l1_batch_number, + zksync_protobuf::serde::serialize(cert, serde_json::value::Serializer).unwrap(), + ) + .instrument("insert_batch_certificate") + .report_latency() + .execute(self.storage) + .await?; + + if res.rows_affected().is_zero() { + tracing::debug!(l1_batch_number, "duplicate batch certificate"); + } + + Ok(()) + } + + /// Gets a number of the last L1 batch that was inserted. It might have gaps before it, + /// depending on the order in which votes have been collected over gossip by consensus. + pub async fn get_last_batch_certificate_number( + &mut self, + ) -> DalResult> { + let row = sqlx::query!( + r#" + SELECT + MAX(l1_batch_number) AS "number" + FROM + l1_batches_consensus + "# + ) + .instrument("get_last_batch_certificate_number") + .report_latency() + .fetch_one(self.storage) + .await?; + + Ok(row + .number + .map(|number| attester::BatchNumber(number as u64))) + } } #[cfg(test)] mod tests { use rand::Rng as _; - use zksync_consensus_roles::validator; + use zksync_consensus_roles::{attester, validator}; use zksync_consensus_storage::ReplicaState; + use zksync_types::{L1BatchNumber, ProtocolVersion}; - use crate::{ConnectionPool, Core, CoreDal}; + use crate::{ + tests::{create_l1_batch_header, create_l2_block_header}, + ConnectionPool, Core, CoreDal, + }; #[tokio::test] async fn replica_state_read_write() { @@ -416,4 +509,96 @@ mod tests { } } } + + #[tokio::test] + async fn test_batch_certificate() { + let rng = &mut rand::thread_rng(); + let pool = ConnectionPool::::test_pool().await; + let mut conn = pool.connection().await.unwrap(); + + let mut mock_batch_qc = |number: L1BatchNumber| { + let mut cert: attester::BatchQC = rng.gen(); + cert.message.number.0 = u64::from(number.0); + cert.signatures.add(rng.gen(), rng.gen()); + cert + }; + + // Required for inserting l2 blocks + conn.protocol_versions_dal() + .save_protocol_version_with_tx(&ProtocolVersion::default()) + .await + .unwrap(); + + // Insert some mock L2 blocks and L1 batches + let mut block_number = 0; + let mut batch_number = 0; + let num_batches = 3; + for _ in 0..num_batches { + for _ in 0..3 { + block_number += 1; + let l2_block = create_l2_block_header(block_number); + conn.blocks_dal().insert_l2_block(&l2_block).await.unwrap(); + } + batch_number += 1; + let l1_batch = create_l1_batch_header(batch_number); + + conn.blocks_dal() + .insert_mock_l1_batch(&l1_batch) + .await + .unwrap(); + + conn.blocks_dal() + .mark_l2_blocks_as_executed_in_l1_batch(l1_batch.number) + .await + .unwrap(); + } + + let l1_batch_number = L1BatchNumber(batch_number); + + // Insert a batch certificate for the last L1 batch. + let cert1 = mock_batch_qc(l1_batch_number); + + conn.consensus_dal() + .insert_batch_certificate(&cert1) + .await + .unwrap(); + + // Try insert duplicate batch certificate for the same batch. + let cert2 = mock_batch_qc(l1_batch_number); + + conn.consensus_dal() + .insert_batch_certificate(&cert2) + .await + .unwrap(); + + // Retrieve the latest certificate. + let number = conn + .consensus_dal() + .get_last_batch_certificate_number() + .await + .unwrap() + .unwrap(); + + let cert = conn + .consensus_dal() + .batch_certificate(number) + .await + .unwrap() + .unwrap(); + + assert_eq!(cert, cert1, "duplicates are ignored"); + + // Try insert batch certificate for non-existing batch + let cert3 = mock_batch_qc(l1_batch_number.next()); + conn.consensus_dal() + .insert_batch_certificate(&cert3) + .await + .expect_err("missing payload"); + + // Insert one more L1 batch without a certificate. + conn.blocks_dal() + .insert_mock_l1_batch(&create_l1_batch_header(batch_number + 1)) + .await + .unwrap(); + } } diff --git a/core/lib/dal/src/data_availability_dal.rs b/core/lib/dal/src/data_availability_dal.rs new file mode 100644 index 000000000000..24048ec4fa19 --- /dev/null +++ b/core/lib/dal/src/data_availability_dal.rs @@ -0,0 +1,217 @@ +use zksync_db_connection::{ + connection::Connection, + error::DalResult, + instrument::{InstrumentExt, Instrumented}, +}; +use zksync_types::{pubdata_da::DataAvailabilityBlob, L1BatchNumber}; + +use crate::{ + models::storage_data_availability::{L1BatchDA, StorageDABlob}, + Core, +}; + +#[derive(Debug)] +pub struct DataAvailabilityDal<'a, 'c> { + pub(crate) storage: &'a mut Connection<'c, Core>, +} + +impl DataAvailabilityDal<'_, '_> { + /// Inserts the blob_id for the given L1 batch. If the blob_id is already present, + /// verifies that it matches the one provided in the function arguments + /// (preventing the same L1 batch from being stored twice) + pub async fn insert_l1_batch_da( + &mut self, + number: L1BatchNumber, + blob_id: &str, + sent_at: chrono::NaiveDateTime, + ) -> DalResult<()> { + let update_result = sqlx::query!( + r#" + INSERT INTO + data_availability (l1_batch_number, blob_id, sent_at, created_at, updated_at) + VALUES + ($1, $2, $3, NOW(), NOW()) + ON CONFLICT DO NOTHING + "#, + i64::from(number.0), + blob_id, + sent_at, + ) + .instrument("insert_l1_batch_da") + .with_arg("number", &number) + .with_arg("blob_id", &blob_id) + .report_latency() + .execute(self.storage) + .await?; + + if update_result.rows_affected() == 0 { + tracing::debug!( + "L1 batch #{number}: DA blob_id wasn't updated as it's already present" + ); + + let instrumentation = + Instrumented::new("get_matching_batch_da_blob_id").with_arg("number", &number); + + // Batch was already processed. Verify that existing DA blob_id matches + let query = sqlx::query!( + r#" + SELECT + blob_id + FROM + data_availability + WHERE + l1_batch_number = $1 + "#, + i64::from(number.0), + ); + + let matched: String = instrumentation + .clone() + .with(query) + .report_latency() + .fetch_one(self.storage) + .await? + .blob_id; + + if matched != *blob_id.to_string() { + let err = instrumentation.constraint_error(anyhow::anyhow!( + "Error storing DA blob id. DA blob_id {blob_id} for L1 batch #{number} does not match the expected value" + )); + return Err(err); + } + } + Ok(()) + } + + /// Saves the inclusion data for the given L1 batch. If the inclusion data is already present, + /// verifies that it matches the one provided in the function arguments + /// (meaning that the inclusion data corresponds to the same DA blob) + pub async fn save_l1_batch_inclusion_data( + &mut self, + number: L1BatchNumber, + da_inclusion_data: &[u8], + ) -> DalResult<()> { + let update_result = sqlx::query!( + r#" + UPDATE data_availability + SET + inclusion_data = $1, + updated_at = NOW() + WHERE + l1_batch_number = $2 + AND inclusion_data IS NULL + "#, + da_inclusion_data, + i64::from(number.0), + ) + .instrument("save_l1_batch_da_data") + .with_arg("number", &number) + .report_latency() + .execute(self.storage) + .await?; + + if update_result.rows_affected() == 0 { + tracing::debug!("L1 batch #{number}: DA data wasn't updated as it's already present"); + + let instrumentation = + Instrumented::new("get_matching_batch_da_data").with_arg("number", &number); + + // Batch was already processed. Verify that existing DA data matches + let query = sqlx::query!( + r#" + SELECT + inclusion_data + FROM + data_availability + WHERE + l1_batch_number = $1 + "#, + i64::from(number.0), + ); + + let matched: Option> = instrumentation + .clone() + .with(query) + .report_latency() + .fetch_one(self.storage) + .await? + .inclusion_data; + + if matched.unwrap_or_default() != da_inclusion_data.to_vec() { + let err = instrumentation.constraint_error(anyhow::anyhow!( + "Error storing DA inclusion data. DA data for L1 batch #{number} does not match the one provided before" + )); + return Err(err); + } + } + Ok(()) + } + + /// Assumes that the L1 batches are sorted by number, and returns the first one that is ready for DA dispatch. + pub async fn get_first_da_blob_awaiting_inclusion( + &mut self, + ) -> DalResult> { + Ok(sqlx::query_as!( + StorageDABlob, + r#" + SELECT + l1_batch_number, + blob_id, + inclusion_data, + sent_at + FROM + data_availability + WHERE + inclusion_data IS NULL + ORDER BY + l1_batch_number + LIMIT + 1 + "#, + ) + .instrument("get_first_da_blob_awaiting_inclusion") + .fetch_optional(self.storage) + .await? + .map(DataAvailabilityBlob::from)) + } + + /// Fetches the pubdata and `l1_batch_number` for the L1 batches that are ready for DA dispatch. + pub async fn get_ready_for_da_dispatch_l1_batches( + &mut self, + limit: usize, + ) -> DalResult> { + let rows = sqlx::query!( + r#" + SELECT + number, + pubdata_input + FROM + l1_batches + LEFT JOIN data_availability ON data_availability.l1_batch_number = l1_batches.number + WHERE + eth_commit_tx_id IS NULL + AND number != 0 + AND data_availability.blob_id IS NULL + AND pubdata_input IS NOT NULL + ORDER BY + number + LIMIT + $1 + "#, + limit as i64, + ) + .instrument("get_ready_for_da_dispatch_l1_batches") + .with_arg("limit", &limit) + .fetch_all(self.storage) + .await?; + + Ok(rows + .into_iter() + .map(|row| L1BatchDA { + // `unwrap` is safe here because we have a `WHERE` clause that filters out `NULL` values + pubdata: row.pubdata_input.unwrap(), + l1_batch_number: L1BatchNumber(row.number as u32), + }) + .collect()) + } +} diff --git a/core/lib/dal/src/eth_sender_dal.rs b/core/lib/dal/src/eth_sender_dal.rs index d32ed082131e..d45d8470b379 100644 --- a/core/lib/dal/src/eth_sender_dal.rs +++ b/core/lib/dal/src/eth_sender_dal.rs @@ -22,7 +22,10 @@ pub struct EthSenderDal<'a, 'c> { } impl EthSenderDal<'_, '_> { - pub async fn get_inflight_txs(&mut self) -> sqlx::Result> { + pub async fn get_inflight_txs( + &mut self, + operator_address: Option
, + ) -> sqlx::Result> { let txs = sqlx::query_as!( StorageEthTx, r#" @@ -31,18 +34,22 @@ impl EthSenderDal<'_, '_> { FROM eth_txs WHERE - confirmed_eth_tx_history_id IS NULL + from_addr IS NOT DISTINCT FROM $1 -- can't just use equality as NULL != NULL + AND confirmed_eth_tx_history_id IS NULL AND id <= ( SELECT COALESCE(MAX(eth_tx_id), 0) FROM eth_txs_history + JOIN eth_txs ON eth_txs.id = eth_txs_history.eth_tx_id WHERE - sent_at_block IS NOT NULL + eth_txs_history.sent_at_block IS NOT NULL + AND eth_txs.from_addr IS NOT DISTINCT FROM $1 ) ORDER BY id - "# + "#, + operator_address.as_ref().map(|h160| h160.as_bytes()), ) .fetch_all(self.storage.conn()) .await?; @@ -121,7 +128,11 @@ impl EthSenderDal<'_, '_> { .map(Into::into)) } - pub async fn get_new_eth_txs(&mut self, limit: u64) -> sqlx::Result> { + pub async fn get_new_eth_txs( + &mut self, + limit: u64, + operator_address: &Option
, + ) -> sqlx::Result> { let txs = sqlx::query_as!( StorageEthTx, r#" @@ -130,18 +141,24 @@ impl EthSenderDal<'_, '_> { FROM eth_txs WHERE - id > ( + from_addr IS NOT DISTINCT FROM $2 -- can't just use equality as NULL != NULL + AND id > ( SELECT COALESCE(MAX(eth_tx_id), 0) FROM eth_txs_history + JOIN eth_txs ON eth_txs.id = eth_txs_history.eth_tx_id + WHERE + eth_txs_history.sent_at_block IS NOT NULL + AND eth_txs.from_addr IS NOT DISTINCT FROM $2 ) ORDER BY id LIMIT $1 "#, - limit as i64 + limit as i64, + operator_address.as_ref().map(|h160| h160.as_bytes()), ) .fetch_all(self.storage.conn()) .await?; diff --git a/core/lib/dal/src/events_dal.rs b/core/lib/dal/src/events_dal.rs index ebe159577bb2..c2b296fc085b 100644 --- a/core/lib/dal/src/events_dal.rs +++ b/core/lib/dal/src/events_dal.rs @@ -222,7 +222,8 @@ impl EventsDal<'_, '_> { tx_hash, tx_index_in_block, event_index_in_block, - event_index_in_tx + event_index_in_tx, + NULL::BIGINT AS "block_timestamp?" FROM events WHERE @@ -307,7 +308,6 @@ impl EventsDal<'_, '_> { log_index_in_miniblock, log_index_in_tx, tx_hash, - NULL::bytea AS "block_hash", NULL::BIGINT AS "l1_batch_number?", shard_id, is_service, @@ -416,7 +416,10 @@ mod tests { use zksync_types::{Address, L1BatchNumber, ProtocolVersion}; use super::*; - use crate::{tests::create_l2_block_header, ConnectionPool, Core}; + use crate::{ + tests::{create_l2_block_header, create_l2_to_l1_log}, + ConnectionPool, Core, + }; fn create_vm_event(index: u8, topic_count: u8) -> VmEvent { assert!(topic_count <= 4); @@ -498,17 +501,6 @@ mod tests { } } - fn create_l2_to_l1_log(tx_number_in_block: u16, index: u8) -> UserL2ToL1Log { - UserL2ToL1Log(L2ToL1Log { - shard_id: 0, - is_service: false, - tx_number_in_block, - sender: Address::repeat_byte(index), - key: H256::from_low_u64_be(u64::from(index)), - value: H256::repeat_byte(index), - }) - } - #[tokio::test] async fn storing_l2_to_l1_logs() { let pool = ConnectionPool::::test_pool().await; diff --git a/core/lib/dal/src/events_web3_dal.rs b/core/lib/dal/src/events_web3_dal.rs index 1a182f6052d5..fc21cc36460c 100644 --- a/core/lib/dal/src/events_web3_dal.rs +++ b/core/lib/dal/src/events_web3_dal.rs @@ -79,7 +79,8 @@ impl EventsWeb3Dal<'_, '_> { ORDER BY miniblock_number ASC, event_index_in_block ASC LIMIT ${} ) - SELECT miniblocks.hash as "block_hash", miniblocks.l1_batch_number as "l1_batch_number", events_select.* + SELECT miniblocks.hash as "block_hash", miniblocks.l1_batch_number as "l1_batch_number", + miniblocks.timestamp as block_timestamp, events_select.* FROM events_select INNER JOIN miniblocks ON events_select.miniblock_number = miniblocks.number ORDER BY miniblock_number ASC, event_index_in_block ASC @@ -222,7 +223,8 @@ impl EventsWeb3Dal<'_, '_> { tx_hash AS "tx_hash!", tx_index_in_block AS "tx_index_in_block!", event_index_in_block AS "event_index_in_block!", - event_index_in_tx AS "event_index_in_tx!" + event_index_in_tx AS "event_index_in_tx!", + miniblocks.timestamp AS "block_timestamp" FROM events_select INNER JOIN miniblocks ON events_select.miniblock_number = miniblocks.number diff --git a/core/lib/dal/src/helpers.rs b/core/lib/dal/src/helpers.rs index e8e11f1cc5f3..65e9161bd047 100644 --- a/core/lib/dal/src/helpers.rs +++ b/core/lib/dal/src/helpers.rs @@ -40,11 +40,10 @@ pub async fn wait_for_l1_batch( #[cfg(test)] mod tests { - use zksync_contracts::BaseSystemContractsHashes; - use zksync_types::{block::L1BatchHeader, ProtocolVersion, ProtocolVersionId, H256}; + use zksync_types::ProtocolVersion; use super::*; - use crate::{ConnectionPool, Core, CoreDal}; + use crate::{tests::create_l1_batch_header, ConnectionPool, Core, CoreDal}; #[tokio::test] async fn waiting_for_l1_batch_success() { @@ -59,15 +58,7 @@ mod tests { .save_protocol_version_with_tx(&ProtocolVersion::default()) .await .unwrap(); - let header = L1BatchHeader::new( - L1BatchNumber(0), - 100, - BaseSystemContractsHashes { - bootloader: H256::repeat_byte(1), - default_aa: H256::repeat_byte(42), - }, - ProtocolVersionId::latest(), - ); + let header = create_l1_batch_header(0); conn.blocks_dal() .insert_mock_l1_batch(&header) .await diff --git a/core/lib/dal/src/lib.rs b/core/lib/dal/src/lib.rs index 0a2ed3bdd641..0e1badb9af76 100644 --- a/core/lib/dal/src/lib.rs +++ b/core/lib/dal/src/lib.rs @@ -6,14 +6,15 @@ pub use sqlx::{types::BigDecimal, Error as SqlxError}; use zksync_db_connection::connection::DbMarker; pub use zksync_db_connection::{ - connection::Connection, + connection::{Connection, IsolationLevel}, connection_pool::{ConnectionPool, ConnectionPoolBuilder}, error::{DalError, DalResult}, }; use crate::{ - blocks_dal::BlocksDal, blocks_web3_dal::BlocksWeb3Dal, consensus_dal::ConsensusDal, - contract_verification_dal::ContractVerificationDal, eth_sender_dal::EthSenderDal, + base_token_dal::BaseTokenDal, blocks_dal::BlocksDal, blocks_web3_dal::BlocksWeb3Dal, + consensus_dal::ConsensusDal, contract_verification_dal::ContractVerificationDal, + data_availability_dal::DataAvailabilityDal, eth_sender_dal::EthSenderDal, events_dal::EventsDal, events_web3_dal::EventsWeb3Dal, factory_deps_dal::FactoryDepsDal, proof_generation_dal::ProofGenerationDal, protocol_versions_dal::ProtocolVersionsDal, protocol_versions_web3_dal::ProtocolVersionsWeb3Dal, pruning_dal::PruningDal, @@ -26,11 +27,13 @@ use crate::{ transactions_web3_dal::TransactionsWeb3Dal, vm_runner_dal::VmRunnerDal, }; +pub mod base_token_dal; pub mod blocks_dal; pub mod blocks_web3_dal; pub mod consensus; pub mod consensus_dal; pub mod contract_verification_dal; +mod data_availability_dal; pub mod eth_sender_dal; pub mod events_dal; pub mod events_web3_dal; @@ -124,7 +127,11 @@ where fn pruning_dal(&mut self) -> PruningDal<'_, 'a>; + fn data_availability_dal(&mut self) -> DataAvailabilityDal<'_, 'a>; + fn vm_runner_dal(&mut self) -> VmRunnerDal<'_, 'a>; + + fn base_token_dal(&mut self) -> BaseTokenDal<'_, 'a>; } #[derive(Clone, Debug)] @@ -240,7 +247,15 @@ impl<'a> CoreDal<'a> for Connection<'a, Core> { PruningDal { storage: self } } + fn data_availability_dal(&mut self) -> DataAvailabilityDal<'_, 'a> { + DataAvailabilityDal { storage: self } + } + fn vm_runner_dal(&mut self) -> VmRunnerDal<'_, 'a> { VmRunnerDal { storage: self } } + + fn base_token_dal(&mut self) -> BaseTokenDal<'_, 'a> { + BaseTokenDal { storage: self } + } } diff --git a/core/lib/dal/src/models/mod.rs b/core/lib/dal/src/models/mod.rs index bc0e2c657da5..1e852e3f6364 100644 --- a/core/lib/dal/src/models/mod.rs +++ b/core/lib/dal/src/models/mod.rs @@ -3,6 +3,8 @@ use anyhow::Context as _; use zksync_db_connection::error::SqlxContext; use zksync_types::{ProtocolVersionId, H160, H256}; +pub mod storage_base_token_ratio; +pub(crate) mod storage_data_availability; pub mod storage_eth_tx; pub mod storage_event; pub mod storage_log; diff --git a/core/lib/dal/src/models/storage_base_token_ratio.rs b/core/lib/dal/src/models/storage_base_token_ratio.rs new file mode 100644 index 000000000000..f486aefd4085 --- /dev/null +++ b/core/lib/dal/src/models/storage_base_token_ratio.rs @@ -0,0 +1,31 @@ +use std::num::NonZeroU64; + +use bigdecimal::{BigDecimal, ToPrimitive}; +use chrono::NaiveDateTime; +use zksync_types::base_token_ratio::BaseTokenRatio; + +/// Represents a row in the `base_token_ratios` table. +#[derive(Debug, Clone)] +pub struct StorageBaseTokenRatio { + pub id: i64, + pub created_at: NaiveDateTime, + pub updated_at: NaiveDateTime, + pub ratio_timestamp: NaiveDateTime, + pub numerator: BigDecimal, + pub denominator: BigDecimal, + pub used_in_l1: bool, +} + +impl From for BaseTokenRatio { + fn from(row: StorageBaseTokenRatio) -> BaseTokenRatio { + BaseTokenRatio { + id: row.id as u32, + ratio_timestamp: row.ratio_timestamp.and_utc(), + numerator: NonZeroU64::new(row.numerator.to_u64().expect("numerator is not u64")) + .unwrap(), + denominator: NonZeroU64::new(row.denominator.to_u64().expect("denominator is not u64")) + .unwrap(), + used_in_l1: row.used_in_l1, + } + } +} diff --git a/core/lib/dal/src/models/storage_block.rs b/core/lib/dal/src/models/storage_block.rs index 95780e667784..be8b4e4152b5 100644 --- a/core/lib/dal/src/models/storage_block.rs +++ b/core/lib/dal/src/models/storage_block.rs @@ -38,7 +38,6 @@ pub(crate) struct StorageL1BatchHeader { pub timestamp: i64, pub l1_tx_count: i32, pub l2_tx_count: i32, - pub l2_to_l1_logs: Vec>, pub l2_to_l1_messages: Vec>, pub bloom: Vec, pub priority_ops_onchain_data: Vec>, @@ -55,38 +54,40 @@ pub(crate) struct StorageL1BatchHeader { pub pubdata_input: Option>, } -impl From for L1BatchHeader { - fn from(l1_batch: StorageL1BatchHeader) -> Self { - let priority_ops_onchain_data: Vec<_> = l1_batch +impl StorageL1BatchHeader { + pub fn into_l1_batch_header_with_logs( + self, + l2_to_l1_logs: Vec, + ) -> L1BatchHeader { + let priority_ops_onchain_data: Vec<_> = self .priority_ops_onchain_data .into_iter() .map(|raw_data| raw_data.into()) .collect(); - let system_logs = convert_l2_to_l1_logs(l1_batch.system_logs); - let user_l2_to_l1_logs = convert_l2_to_l1_logs(l1_batch.l2_to_l1_logs); + let system_logs = convert_l2_to_l1_logs(self.system_logs); L1BatchHeader { - number: L1BatchNumber(l1_batch.number as u32), - timestamp: l1_batch.timestamp as u64, + number: L1BatchNumber(self.number as u32), + timestamp: self.timestamp as u64, priority_ops_onchain_data, - l1_tx_count: l1_batch.l1_tx_count as u16, - l2_tx_count: l1_batch.l2_tx_count as u16, - l2_to_l1_logs: user_l2_to_l1_logs.into_iter().map(UserL2ToL1Log).collect(), - l2_to_l1_messages: l1_batch.l2_to_l1_messages, + l1_tx_count: self.l1_tx_count as u16, + l2_tx_count: self.l2_tx_count as u16, + l2_to_l1_logs, + l2_to_l1_messages: self.l2_to_l1_messages, - bloom: H2048::from_slice(&l1_batch.bloom), - used_contract_hashes: serde_json::from_value(l1_batch.used_contract_hashes) + bloom: H2048::from_slice(&self.bloom), + used_contract_hashes: serde_json::from_value(self.used_contract_hashes) .expect("invalid value for used_contract_hashes in the DB"), base_system_contracts_hashes: convert_base_system_contracts_hashes( - l1_batch.bootloader_code_hash, - l1_batch.default_aa_code_hash, + self.bootloader_code_hash, + self.default_aa_code_hash, ), system_logs: system_logs.into_iter().map(SystemL2ToL1Log).collect(), - protocol_version: l1_batch + protocol_version: self .protocol_version .map(|v| (v as u16).try_into().unwrap()), - pubdata_input: l1_batch.pubdata_input, + pubdata_input: self.pubdata_input, } } } @@ -121,7 +122,6 @@ pub(crate) struct StorageL1Batch { pub l1_tx_count: i32, pub l2_tx_count: i32, pub bloom: Vec, - pub l2_to_l1_logs: Vec>, pub priority_ops_onchain_data: Vec>, pub hash: Option>, @@ -149,38 +149,40 @@ pub(crate) struct StorageL1Batch { pub pubdata_input: Option>, } -impl From for L1BatchHeader { - fn from(l1_batch: StorageL1Batch) -> Self { - let priority_ops_onchain_data: Vec<_> = l1_batch +impl StorageL1Batch { + pub fn into_l1_batch_header_with_logs( + self, + l2_to_l1_logs: Vec, + ) -> L1BatchHeader { + let priority_ops_onchain_data: Vec<_> = self .priority_ops_onchain_data .into_iter() .map(Vec::into) .collect(); - let system_logs = convert_l2_to_l1_logs(l1_batch.system_logs); - let user_l2_to_l1_logs = convert_l2_to_l1_logs(l1_batch.l2_to_l1_logs); + let system_logs = convert_l2_to_l1_logs(self.system_logs); L1BatchHeader { - number: L1BatchNumber(l1_batch.number as u32), - timestamp: l1_batch.timestamp as u64, + number: L1BatchNumber(self.number as u32), + timestamp: self.timestamp as u64, priority_ops_onchain_data, - l1_tx_count: l1_batch.l1_tx_count as u16, - l2_tx_count: l1_batch.l2_tx_count as u16, - l2_to_l1_logs: user_l2_to_l1_logs.into_iter().map(UserL2ToL1Log).collect(), - l2_to_l1_messages: l1_batch.l2_to_l1_messages, + l1_tx_count: self.l1_tx_count as u16, + l2_tx_count: self.l2_tx_count as u16, + l2_to_l1_logs, + l2_to_l1_messages: self.l2_to_l1_messages, - bloom: H2048::from_slice(&l1_batch.bloom), - used_contract_hashes: serde_json::from_value(l1_batch.used_contract_hashes) + bloom: H2048::from_slice(&self.bloom), + used_contract_hashes: serde_json::from_value(self.used_contract_hashes) .expect("invalid value for used_contract_hashes in the DB"), base_system_contracts_hashes: convert_base_system_contracts_hashes( - l1_batch.bootloader_code_hash, - l1_batch.default_aa_code_hash, + self.bootloader_code_hash, + self.default_aa_code_hash, ), system_logs: system_logs.into_iter().map(SystemL2ToL1Log).collect(), - protocol_version: l1_batch + protocol_version: self .protocol_version .map(|v| (v as u16).try_into().unwrap()), - pubdata_input: l1_batch.pubdata_input, + pubdata_input: self.pubdata_input, } } } diff --git a/core/lib/dal/src/models/storage_data_availability.rs b/core/lib/dal/src/models/storage_data_availability.rs new file mode 100644 index 000000000000..2a1b39845e69 --- /dev/null +++ b/core/lib/dal/src/models/storage_data_availability.rs @@ -0,0 +1,29 @@ +use chrono::NaiveDateTime; +use zksync_types::{pubdata_da::DataAvailabilityBlob, L1BatchNumber}; + +/// Represents a blob in the data availability layer. +#[derive(Debug, Clone)] +pub(crate) struct StorageDABlob { + pub l1_batch_number: i64, + pub blob_id: String, + pub inclusion_data: Option>, + pub sent_at: NaiveDateTime, +} + +impl From for DataAvailabilityBlob { + fn from(blob: StorageDABlob) -> DataAvailabilityBlob { + DataAvailabilityBlob { + l1_batch_number: L1BatchNumber(blob.l1_batch_number as u32), + blob_id: blob.blob_id, + inclusion_data: blob.inclusion_data, + sent_at: blob.sent_at.and_utc(), + } + } +} + +/// A small struct used to store a batch and its data availability, which are retrieved from the database. +#[derive(Debug)] +pub struct L1BatchDA { + pub pubdata: Vec, + pub l1_batch_number: L1BatchNumber, +} diff --git a/core/lib/dal/src/models/storage_eth_tx.rs b/core/lib/dal/src/models/storage_eth_tx.rs index 615b365d8533..2654ffe0e0a7 100644 --- a/core/lib/dal/src/models/storage_eth_tx.rs +++ b/core/lib/dal/src/models/storage_eth_tx.rs @@ -77,7 +77,7 @@ impl From for EthTx { .expect("Incorrect address in db"), raw_tx: tx.raw_tx.clone(), tx_type: AggregatedActionType::from_str(&tx.tx_type).expect("Wrong agg type"), - created_at_timestamp: tx.created_at.timestamp() as u64, + created_at_timestamp: tx.created_at.and_utc().timestamp() as u64, predicted_gas_cost: tx.predicted_gas_cost as u64, from_addr: tx.from_addr.map(|f| Address::from_slice(&f)), blob_sidecar: tx.blob_sidecar.map(|b| { diff --git a/core/lib/dal/src/models/storage_event.rs b/core/lib/dal/src/models/storage_event.rs index 98e53ae374ef..db69b6bb0e86 100644 --- a/core/lib/dal/src/models/storage_event.rs +++ b/core/lib/dal/src/models/storage_event.rs @@ -1,5 +1,6 @@ use zksync_types::{ - api::{L2ToL1Log, Log}, + api, + l2_to_l1_log::{self, UserL2ToL1Log}, web3::{Bytes, Index}, Address, H256, U256, U64, }; @@ -19,10 +20,11 @@ pub struct StorageWeb3Log { pub tx_index_in_block: i32, pub event_index_in_block: i32, pub event_index_in_tx: i32, + pub block_timestamp: Option, } -impl From for Log { - fn from(log: StorageWeb3Log) -> Log { +impl From for api::Log { + fn from(log: StorageWeb3Log) -> api::Log { let topics = vec![log.topic1, log.topic2, log.topic3, log.topic4] .into_iter() .filter_map(|topic| { @@ -33,7 +35,7 @@ impl From for Log { } }) .collect(); - Log { + api::Log { address: Address::from_slice(&log.address), topics, data: Bytes(log.value), @@ -46,13 +48,13 @@ impl From for Log { transaction_log_index: Some(U256::from(log.event_index_in_tx as u32)), log_type: None, removed: Some(false), + block_timestamp: log.block_timestamp.map(|t| (t as u64).into()), } } } #[derive(sqlx::FromRow, Debug, Clone)] pub struct StorageL2ToL1Log { - pub block_hash: Option>, pub miniblock_number: i64, pub l1_batch_number: Option, pub log_index_in_miniblock: i32, @@ -67,10 +69,10 @@ pub struct StorageL2ToL1Log { pub value: Vec, } -impl From for L2ToL1Log { - fn from(log: StorageL2ToL1Log) -> L2ToL1Log { - L2ToL1Log { - block_hash: log.block_hash.map(|hash| H256::from_slice(&hash)), +impl From for api::L2ToL1Log { + fn from(log: StorageL2ToL1Log) -> api::L2ToL1Log { + api::L2ToL1Log { + block_hash: None, block_number: (log.miniblock_number as u32).into(), l1_batch_number: (log.l1_batch_number).map(|n| (n as u32).into()), log_index: (log.log_index_in_miniblock as u32).into(), @@ -86,3 +88,22 @@ impl From for L2ToL1Log { } } } + +impl From for l2_to_l1_log::L2ToL1Log { + fn from(log: StorageL2ToL1Log) -> l2_to_l1_log::L2ToL1Log { + l2_to_l1_log::L2ToL1Log { + shard_id: (log.shard_id as u32).try_into().unwrap(), + is_service: log.is_service, + tx_number_in_block: (log.tx_index_in_l1_batch as u32).try_into().unwrap(), + sender: Address::from_slice(&log.sender), + key: H256::from_slice(&log.key), + value: H256::from_slice(&log.value), + } + } +} + +impl From for l2_to_l1_log::UserL2ToL1Log { + fn from(log: StorageL2ToL1Log) -> l2_to_l1_log::UserL2ToL1Log { + UserL2ToL1Log(log.into()) + } +} diff --git a/core/lib/dal/src/models/storage_log.rs b/core/lib/dal/src/models/storage_log.rs index ef3a018f9e46..055f37cde559 100644 --- a/core/lib/dal/src/models/storage_log.rs +++ b/core/lib/dal/src/models/storage_log.rs @@ -12,8 +12,8 @@ pub struct DbInitialWrite { #[derive(Debug, PartialEq)] pub struct DbStorageLog { pub hashed_key: H256, - pub address: H160, - pub key: H256, + pub address: Option, + pub key: Option, pub value: H256, pub operation_number: u64, pub l2_block_number: L2BlockNumber, diff --git a/core/lib/dal/src/models/storage_transaction.rs b/core/lib/dal/src/models/storage_transaction.rs index 1dfd5f4b6a0e..31a182a7eca0 100644 --- a/core/lib/dal/src/models/storage_transaction.rs +++ b/core/lib/dal/src/models/storage_transaction.rs @@ -1,6 +1,7 @@ use std::{convert::TryInto, str::FromStr}; use bigdecimal::Zero; +use serde_json::Value; use sqlx::types::chrono::{DateTime, NaiveDateTime, Utc}; use zksync_types::{ api::{self, TransactionDetails, TransactionReceipt, TransactionStatus}, @@ -295,7 +296,7 @@ impl From for Transaction { let hash = H256::from_slice(&tx.hash); let execute = serde_json::from_value::(tx.data.clone()) .unwrap_or_else(|_| panic!("invalid json in database for tx {:?}", hash)); - let received_timestamp_ms = tx.received_at.timestamp_millis() as u64; + let received_timestamp_ms = tx.received_at.and_utc().timestamp_millis() as u64; match tx.tx_format { Some(t) if t == i32::from(PRIORITY_OPERATION_L2_TX_TYPE) => Transaction { common_data: ExecuteTransactionCommon::L1(tx.into()), @@ -336,6 +337,7 @@ pub(crate) struct StorageTransactionReceipt { pub effective_gas_price: Option, pub contract_address: Option>, pub initiator_address: Vec, + pub block_timestamp: Option, } impl From for TransactionReceipt { @@ -396,6 +398,13 @@ impl From for TransactionReceipt { } } +/// Details of the transaction execution. +#[derive(Debug, Clone, sqlx::FromRow)] +pub struct StorageTransactionExecutionInfo { + /// This is an opaque JSON field, with VM version specific contents. + pub execution_info: Value, +} + #[derive(Debug, Clone, sqlx::FromRow)] pub(crate) struct StorageTransactionDetails { pub is_priority: bool, diff --git a/core/lib/dal/src/proof_generation_dal.rs b/core/lib/dal/src/proof_generation_dal.rs index 040b4246604f..cf1437ff411c 100644 --- a/core/lib/dal/src/proof_generation_dal.rs +++ b/core/lib/dal/src/proof_generation_dal.rs @@ -3,7 +3,9 @@ use std::time::Duration; use strum::{Display, EnumString}; use zksync_db_connection::{ - connection::Connection, error::DalResult, instrument::Instrumented, + connection::Connection, + error::DalResult, + instrument::{InstrumentExt, Instrumented}, utils::pg_interval_from_duration, }; use zksync_types::L1BatchNumber; @@ -17,8 +19,8 @@ pub struct ProofGenerationDal<'a, 'c> { #[derive(Debug, EnumString, Display)] enum ProofGenerationJobStatus { - #[strum(serialize = "ready_to_be_proven")] - ReadyToBeProven, + #[strum(serialize = "unpicked")] + Unpicked, #[strum(serialize = "picked_by_prover")] PickedByProver, #[strum(serialize = "generated")] @@ -46,8 +48,16 @@ impl ProofGenerationDal<'_, '_> { l1_batch_number FROM proof_generation_details + LEFT JOIN l1_batches ON l1_batch_number = l1_batches.number WHERE - status = 'ready_to_be_proven' + ( + vm_run_data_blob_url IS NOT NULL + AND proof_gen_data_blob_url IS NOT NULL + AND l1_batches.hash IS NOT NULL + AND l1_batches.aux_data_hash IS NOT NULL + AND l1_batches.meta_parameters_hash IS NOT NULL + AND status = 'unpicked' + ) OR ( status = 'picked_by_prover' AND prover_taken_at < NOW() - $1::INTERVAL @@ -56,8 +66,6 @@ impl ProofGenerationDal<'_, '_> { l1_batch_number ASC LIMIT 1 - FOR UPDATE - SKIP LOCKED ) RETURNING proof_generation_details.l1_batch_number @@ -110,26 +118,64 @@ impl ProofGenerationDal<'_, '_> { Ok(()) } - pub async fn insert_proof_generation_details( + pub async fn save_vm_runner_artifacts_metadata( &mut self, - block_number: L1BatchNumber, + batch_number: L1BatchNumber, + vm_run_data_blob_url: &str, + ) -> DalResult<()> { + let batch_number = i64::from(batch_number.0); + let query = sqlx::query!( + r#" + UPDATE proof_generation_details + SET + vm_run_data_blob_url = $1, + updated_at = NOW() + WHERE + l1_batch_number = $2 + "#, + vm_run_data_blob_url, + batch_number + ); + let instrumentation = Instrumented::new("save_proof_artifacts_metadata") + .with_arg("vm_run_data_blob_url", &vm_run_data_blob_url) + .with_arg("l1_batch_number", &batch_number); + let result = instrumentation + .clone() + .with(query) + .execute(self.storage) + .await?; + if result.rows_affected() == 0 { + let err = instrumentation.constraint_error(anyhow::anyhow!( + "Cannot save vm_run_data_blob_url for a batch number {} that does not exist", + batch_number + )); + return Err(err); + } + + Ok(()) + } + + pub async fn save_merkle_paths_artifacts_metadata( + &mut self, + batch_number: L1BatchNumber, proof_gen_data_blob_url: &str, ) -> DalResult<()> { - let l1_batch_number = i64::from(block_number.0); + let batch_number = i64::from(batch_number.0); let query = sqlx::query!( r#" - INSERT INTO - proof_generation_details (l1_batch_number, status, proof_gen_data_blob_url, created_at, updated_at) - VALUES - ($1, 'ready_to_be_proven', $2, NOW(), NOW()) - ON CONFLICT (l1_batch_number) DO NOTHING + UPDATE proof_generation_details + SET + proof_gen_data_blob_url = $1, + updated_at = NOW() + WHERE + l1_batch_number = $2 "#, - l1_batch_number, proof_gen_data_blob_url, + batch_number ); - let instrumentation = Instrumented::new("insert_proof_generation_details") - .with_arg("l1_batch_number", &l1_batch_number) - .with_arg("proof_gen_data_blob_url", &proof_gen_data_blob_url); + let instrumentation = Instrumented::new("save_proof_artifacts_metadata") + .with_arg("proof_gen_data_blob_url", &proof_gen_data_blob_url) + .with_arg("l1_batch_number", &batch_number); let result = instrumentation .clone() .with(query) @@ -137,8 +183,8 @@ impl ProofGenerationDal<'_, '_> { .await?; if result.rows_affected() == 0 { let err = instrumentation.constraint_error(anyhow::anyhow!( - "Cannot save proof_blob_url for a batch number {} that does not exist", - l1_batch_number + "Cannot save proof_gen_data_blob_url for a batch number {} that does not exist", + batch_number )); return Err(err); } @@ -146,6 +192,36 @@ impl ProofGenerationDal<'_, '_> { Ok(()) } + /// The caller should ensure that `l1_batch_number` exists in the database. + pub async fn insert_proof_generation_details( + &mut self, + l1_batch_number: L1BatchNumber, + ) -> DalResult<()> { + let result = sqlx::query!( + r#" + INSERT INTO + proof_generation_details (l1_batch_number, status, created_at, updated_at) + VALUES + ($1, 'unpicked', NOW(), NOW()) + ON CONFLICT (l1_batch_number) DO NOTHING + "#, + i64::from(l1_batch_number.0), + ) + .instrument("insert_proof_generation_details") + .with_arg("l1_batch_number", &l1_batch_number) + .report_latency() + .execute(self.storage) + .await?; + + if result.rows_affected() == 0 { + // Not an error: we may call `insert_proof_generation_details()` from multiple full trees instantiated + // for the same node. Unlike tree data, we don't particularly care about correspondence of `proof_gen_data_blob_url` across calls, + // so just log this fact and carry on. + tracing::debug!("L1 batch #{l1_batch_number}: proof generation data wasn't updated as it's already present"); + } + Ok(()) + } + pub async fn mark_proof_generation_job_as_skipped( &mut self, block_number: L1BatchNumber, @@ -191,7 +267,7 @@ impl ProofGenerationDal<'_, '_> { FROM proof_generation_details WHERE - status = 'ready_to_be_proven' + status = 'unpicked' ORDER BY l1_batch_number ASC LIMIT @@ -229,3 +305,124 @@ impl ProofGenerationDal<'_, '_> { Ok(result) } } + +#[cfg(test)] +mod tests { + use zksync_types::{ + block::L1BatchTreeData, commitment::L1BatchCommitmentArtifacts, ProtocolVersion, H256, + }; + + use super::*; + use crate::{tests::create_l1_batch_header, ConnectionPool, CoreDal}; + + #[tokio::test] + async fn proof_generation_workflow() { + let pool = ConnectionPool::::test_pool().await; + let mut conn = pool.connection().await.unwrap(); + + conn.protocol_versions_dal() + .save_protocol_version_with_tx(&ProtocolVersion::default()) + .await + .unwrap(); + conn.blocks_dal() + .insert_mock_l1_batch(&create_l1_batch_header(1)) + .await + .unwrap(); + + let unpicked_l1_batch = conn + .proof_generation_dal() + .get_oldest_unpicked_batch() + .await + .unwrap(); + assert_eq!(unpicked_l1_batch, None); + + conn.proof_generation_dal() + .insert_proof_generation_details(L1BatchNumber(1)) + .await + .unwrap(); + + let unpicked_l1_batch = conn + .proof_generation_dal() + .get_oldest_unpicked_batch() + .await + .unwrap(); + assert_eq!(unpicked_l1_batch, Some(L1BatchNumber(1))); + + // Calling the method multiple times should work fine. + conn.proof_generation_dal() + .insert_proof_generation_details(L1BatchNumber(1)) + .await + .unwrap(); + conn.proof_generation_dal() + .save_vm_runner_artifacts_metadata(L1BatchNumber(1), "vm_run") + .await + .unwrap(); + conn.proof_generation_dal() + .save_merkle_paths_artifacts_metadata(L1BatchNumber(1), "data") + .await + .unwrap(); + conn.blocks_dal() + .save_l1_batch_tree_data( + L1BatchNumber(1), + &L1BatchTreeData { + hash: H256::zero(), + rollup_last_leaf_index: 123, + }, + ) + .await + .unwrap(); + conn.blocks_dal() + .save_l1_batch_commitment_artifacts( + L1BatchNumber(1), + &L1BatchCommitmentArtifacts::default(), + ) + .await + .unwrap(); + + let unpicked_l1_batch = conn + .proof_generation_dal() + .get_oldest_unpicked_batch() + .await + .unwrap(); + assert_eq!(unpicked_l1_batch, Some(L1BatchNumber(1))); + + let picked_l1_batch = conn + .proof_generation_dal() + .get_next_block_to_be_proven(Duration::MAX) + .await + .unwrap(); + assert_eq!(picked_l1_batch, Some(L1BatchNumber(1))); + let unpicked_l1_batch = conn + .proof_generation_dal() + .get_oldest_unpicked_batch() + .await + .unwrap(); + assert_eq!(unpicked_l1_batch, None); + + // Check that with small enough processing timeout, the L1 batch can be picked again + let picked_l1_batch = conn + .proof_generation_dal() + .get_next_block_to_be_proven(Duration::ZERO) + .await + .unwrap(); + assert_eq!(picked_l1_batch, Some(L1BatchNumber(1))); + + conn.proof_generation_dal() + .save_proof_artifacts_metadata(L1BatchNumber(1), "proof") + .await + .unwrap(); + + let picked_l1_batch = conn + .proof_generation_dal() + .get_next_block_to_be_proven(Duration::MAX) + .await + .unwrap(); + assert_eq!(picked_l1_batch, None); + let unpicked_l1_batch = conn + .proof_generation_dal() + .get_oldest_unpicked_batch() + .await + .unwrap(); + assert_eq!(unpicked_l1_batch, None); + } +} diff --git a/core/lib/dal/src/pruning_dal/mod.rs b/core/lib/dal/src/pruning_dal/mod.rs index 7f30af034e2b..0d1584ebba36 100644 --- a/core/lib/dal/src/pruning_dal/mod.rs +++ b/core/lib/dal/src/pruning_dal/mod.rs @@ -336,6 +336,7 @@ impl PruningDal<'_, '_> { DELETE FROM storage_logs USING new_logs WHERE storage_logs.hashed_key = new_logs.hashed_key + AND storage_logs.miniblock_number <= $2 AND (storage_logs.miniblock_number, storage_logs.operation_number) < (new_logs.miniblock_number, new_logs.operation_number) "#, i64::from(l2_blocks_to_prune.start().0), diff --git a/core/lib/dal/src/pruning_dal/tests.rs b/core/lib/dal/src/pruning_dal/tests.rs index 61b5766b93e7..0999e2be1642 100644 --- a/core/lib/dal/src/pruning_dal/tests.rs +++ b/core/lib/dal/src/pruning_dal/tests.rs @@ -1,22 +1,18 @@ use std::ops; -use zksync_contracts::BaseSystemContractsHashes; use zksync_db_connection::connection::Connection; use zksync_types::{ - block::L1BatchHeader, - fee::TransactionExecutionMetrics, - l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, - tx::IncludedTxLocation, - AccountTreeId, Address, L1BatchNumber, L2BlockNumber, L2ChainId, ProtocolVersion, - ProtocolVersionId, StorageKey, StorageLog, H256, + fee::TransactionExecutionMetrics, tx::IncludedTxLocation, AccountTreeId, Address, + L1BatchNumber, L2BlockNumber, L2ChainId, ProtocolVersion, ProtocolVersionId, StorageKey, + StorageLog, H256, }; use super::*; use crate::{ storage_logs_dal::DbStorageLog, tests::{ - create_l2_block_header, mock_execution_result, mock_l2_to_l1_log, mock_l2_transaction, - mock_vm_event, + create_l1_batch_header, create_l2_block_header, create_l2_to_l1_log, mock_execution_result, + mock_l2_transaction, mock_vm_event, }, ConnectionPool, Core, CoreDal, }; @@ -44,16 +40,16 @@ async fn insert_l2_to_l1_logs(conn: &mut Connection<'_, Core>, l2_block_number: tx_index_in_l2_block: 0, tx_initiator_address: Address::default(), }; - let first_logs = [mock_l2_to_l1_log(), mock_l2_to_l1_log()]; + let first_logs = [create_l2_to_l1_log(0, 0), create_l2_to_l1_log(0, 0)]; let second_location = IncludedTxLocation { tx_hash: H256([2; 32]), tx_index_in_l2_block: 1, tx_initiator_address: Address::default(), }; let second_logs = vec![ - mock_l2_to_l1_log(), - mock_l2_to_l1_log(), - mock_l2_to_l1_log(), + create_l2_to_l1_log(0, 0), + create_l2_to_l1_log(0, 0), + create_l2_to_l1_log(0, 0), ]; let all_logs = vec![ (first_location, first_logs.iter().collect()), @@ -89,25 +85,10 @@ async fn insert_events(conn: &mut Connection<'_, Core>, l2_block_number: L2Block } async fn insert_l1_batch(conn: &mut Connection<'_, Core>, l1_batch_number: L1BatchNumber) { - let mut header = L1BatchHeader::new( - l1_batch_number, - 100, - BaseSystemContractsHashes { - bootloader: H256::repeat_byte(1), - default_aa: H256::repeat_byte(42), - }, - ProtocolVersionId::latest(), - ); + let mut header = create_l1_batch_header(*l1_batch_number); header.l1_tx_count = 3; header.l2_tx_count = 5; - header.l2_to_l1_logs.push(UserL2ToL1Log(L2ToL1Log { - shard_id: 0, - is_service: false, - tx_number_in_block: 2, - sender: Address::repeat_byte(2), - key: H256::repeat_byte(3), - value: H256::zero(), - })); + header.l2_to_l1_logs.push(create_l2_to_l1_log(2, 2)); header.l2_to_l1_messages.push(vec![22; 22]); header.l2_to_l1_messages.push(vec![33; 33]); diff --git a/core/lib/dal/src/snapshots_creator_dal.rs b/core/lib/dal/src/snapshots_creator_dal.rs index fef3ee5b7198..b076240173b1 100644 --- a/core/lib/dal/src/snapshots_creator_dal.rs +++ b/core/lib/dal/src/snapshots_creator_dal.rs @@ -55,9 +55,74 @@ impl SnapshotsCreatorDal<'_, '_> { let storage_logs = sqlx::query!( r#" SELECT - storage_logs.key AS "key!", + storage_logs.hashed_key AS "hashed_key!", storage_logs.value AS "value!", + storage_logs.miniblock_number AS "miniblock_number!", + initial_writes.l1_batch_number AS "l1_batch_number!", + initial_writes.index + FROM + ( + SELECT + hashed_key, + MAX(ARRAY[miniblock_number, operation_number]::INT[]) AS op + FROM + storage_logs + WHERE + miniblock_number <= $1 + AND hashed_key >= $3 + AND hashed_key <= $4 + GROUP BY + hashed_key + ORDER BY + hashed_key + ) AS keys + INNER JOIN storage_logs ON keys.hashed_key = storage_logs.hashed_key + AND storage_logs.miniblock_number = keys.op[1] + AND storage_logs.operation_number = keys.op[2] + INNER JOIN initial_writes ON keys.hashed_key = initial_writes.hashed_key + WHERE + initial_writes.l1_batch_number <= $2 + "#, + i64::from(l2_block_number.0), + i64::from(l1_batch_number.0), + hashed_keys_range.start().as_bytes(), + hashed_keys_range.end().as_bytes() + ) + .instrument("get_storage_logs_chunk") + .with_arg("l2_block_number", &l2_block_number) + .with_arg("min_hashed_key", &hashed_keys_range.start()) + .with_arg("max_hashed_key", &hashed_keys_range.end()) + .report_latency() + .expect_slow_query() + .fetch_all(self.storage) + .await? + .iter() + .map(|row| SnapshotStorageLog { + key: H256::from_slice(&row.hashed_key), + value: H256::from_slice(&row.value), + l1_batch_number_of_initial_write: L1BatchNumber(row.l1_batch_number as u32), + enumeration_index: row.index as u64, + }) + .collect(); + Ok(storage_logs) + } + + /// Same as [`Self::get_storage_logs_chunk()`], but returns full keys. + #[deprecated( + note = "will fail if called on a node restored from a v1 snapshot; use `get_storage_logs_chunk()` instead" + )] + pub async fn get_storage_logs_chunk_with_key_preimages( + &mut self, + l2_block_number: L2BlockNumber, + l1_batch_number: L1BatchNumber, + hashed_keys_range: std::ops::RangeInclusive, + ) -> DalResult>> { + let storage_logs = sqlx::query!( + r#" + SELECT storage_logs.address AS "address!", + storage_logs.key AS "key!", + storage_logs.value AS "value!", storage_logs.miniblock_number AS "miniblock_number!", initial_writes.l1_batch_number AS "l1_batch_number!", initial_writes.index @@ -169,6 +234,7 @@ mod tests { .unwrap(); let mut written_keys: Vec<_> = logs.iter().map(|log| log.key).collect(); written_keys.sort_unstable(); + let written_keys: Vec<_> = written_keys.iter().map(StorageKey::hashed_key).collect(); conn.storage_logs_dedup_dal() .insert_initial_writes(L1BatchNumber(1), &written_keys) .await @@ -190,7 +256,7 @@ mod tests { ); StorageLog::new_write_log(key, H256::repeat_byte(1)) }); - let new_written_keys: Vec<_> = new_logs.clone().map(|log| log.key).collect(); + let new_written_keys: Vec<_> = new_logs.clone().map(|log| log.key.hashed_key()).collect(); let updated_logs = logs.iter().step_by(3).map(|&log| StorageLog { value: H256::repeat_byte(23), ..log @@ -238,7 +304,7 @@ mod tests { .unwrap(); assert_eq!(all_logs.len(), expected_logs.len()); for (log, expected_log) in all_logs.iter().zip(expected_logs) { - assert_eq!(log.key, expected_log.key); + assert_eq!(log.key, expected_log.key.hashed_key()); assert_eq!(log.value, expected_log.value); assert_eq!(log.l1_batch_number_of_initial_write, l1_batch_number); } @@ -253,7 +319,7 @@ mod tests { .unwrap(); assert_eq!(logs.len(), chunk.len()); for (log, expected_log) in logs.iter().zip(chunk) { - assert_eq!(log.key, expected_log.key); + assert_eq!(log.key, expected_log.key.hashed_key()); assert_eq!(log.value, expected_log.value); } } @@ -282,7 +348,7 @@ mod tests { .await .unwrap(); conn.storage_logs_dedup_dal() - .insert_initial_writes(L1BatchNumber(2), &[key]) + .insert_initial_writes(L1BatchNumber(2), &[key.hashed_key()]) .await .unwrap(); @@ -307,7 +373,7 @@ mod tests { .await .unwrap(); assert_eq!(logs.len(), 1); - assert_eq!(logs[0].key, key); + assert_eq!(logs[0].key, key.hashed_key()); assert_eq!(logs[0].value, real_write.value); assert_eq!(logs[0].l1_batch_number_of_initial_write, L1BatchNumber(2)); } diff --git a/core/lib/dal/src/storage_logs_dal.rs b/core/lib/dal/src/storage_logs_dal.rs index 052e93370333..d5de66037b49 100644 --- a/core/lib/dal/src/storage_logs_dal.rs +++ b/core/lib/dal/src/storage_logs_dal.rs @@ -72,10 +72,11 @@ impl StorageLogsDal<'_, '_> { copy.send(buffer.as_bytes()).await } - pub async fn insert_storage_logs_from_snapshot( + #[deprecated(note = "Will be removed in favor of `insert_storage_logs_from_snapshot()`")] + pub async fn insert_storage_logs_with_preimages_from_snapshot( &mut self, l2_block_number: L2BlockNumber, - snapshot_storage_logs: &[SnapshotStorageLog], + snapshot_storage_logs: &[SnapshotStorageLog], ) -> DalResult<()> { let storage_logs_len = snapshot_storage_logs.len(); let copy = CopyStatement::new( @@ -112,6 +113,44 @@ impl StorageLogsDal<'_, '_> { copy.send(buffer.as_bytes()).await } + pub async fn insert_storage_logs_from_snapshot( + &mut self, + l2_block_number: L2BlockNumber, + snapshot_storage_logs: &[SnapshotStorageLog], + ) -> DalResult<()> { + let storage_logs_len = snapshot_storage_logs.len(); + let copy = CopyStatement::new( + "COPY storage_logs( + hashed_key, value, operation_number, tx_hash, miniblock_number, + created_at, updated_at + ) + FROM STDIN WITH (DELIMITER '|')", + ) + .instrument("insert_storage_logs_from_snapshot") + .with_arg("l2_block_number", &l2_block_number) + .with_arg("storage_logs.len", &storage_logs_len) + .start(self.storage) + .await?; + + let mut buffer = String::new(); + let now = Utc::now().naive_utc().to_string(); + for log in snapshot_storage_logs.iter() { + write_str!( + &mut buffer, + r"\\x{hashed_key:x}|\\x{value:x}|", + hashed_key = log.key, + value = log.value + ); + writeln_str!( + &mut buffer, + r"{}|\\x{:x}|{l2_block_number}|{now}|{now}", + log.enumeration_index, + H256::zero() + ); + } + copy.send(buffer.as_bytes()).await + } + pub async fn append_storage_logs( &mut self, block_number: L2BlockNumber, @@ -299,17 +338,16 @@ impl StorageLogsDal<'_, '_> { Ok(deployment_data.collect()) } - /// Returns latest values for all [`StorageKey`]s written to in the specified L1 batch + /// Returns latest values for all slots written to in the specified L1 batch /// judging by storage logs (i.e., not taking deduplication logic into account). pub async fn get_touched_slots_for_l1_batch( &mut self, l1_batch_number: L1BatchNumber, - ) -> DalResult> { + ) -> DalResult> { let rows = sqlx::query!( r#" SELECT - address, - key, + hashed_key, value FROM storage_logs @@ -340,6 +378,57 @@ impl StorageLogsDal<'_, '_> { .fetch_all(self.storage) .await?; + let touched_slots = rows.into_iter().map(|row| { + ( + H256::from_slice(&row.hashed_key), + H256::from_slice(&row.value), + ) + }); + Ok(touched_slots.collect()) + } + + /// Same as [`Self::get_touched_slots_for_l1_batch()`], but loads key preimages instead of hashed keys. + /// Correspondingly, this method is safe to call for locally executed L1 batches, for which key preimages + /// are known; otherwise, it will error. + pub async fn get_touched_slots_for_executed_l1_batch( + &mut self, + l1_batch_number: L1BatchNumber, + ) -> DalResult> { + let rows = sqlx::query!( + r#" + SELECT + address AS "address!", + key AS "key!", + value + FROM + storage_logs + WHERE + miniblock_number BETWEEN ( + SELECT + MIN(number) + FROM + miniblocks + WHERE + l1_batch_number = $1 + ) AND ( + SELECT + MAX(number) + FROM + miniblocks + WHERE + l1_batch_number = $1 + ) + ORDER BY + miniblock_number, + operation_number + "#, + i64::from(l1_batch_number.0) + ) + .instrument("get_touched_slots_for_executed_l1_batch") + .with_arg("l1_batch_number", &l1_batch_number) + .fetch_all(self.storage) + .await?; + let touched_slots = rows.into_iter().map(|row| { let key = StorageKey::new( AccountTreeId::new(Address::from_slice(&row.address)), @@ -578,8 +667,8 @@ impl StorageLogsDal<'_, '_> { rows.into_iter() .map(|row| DbStorageLog { hashed_key: H256::from_slice(&row.hashed_key), - address: H160::from_slice(&row.address), - key: H256::from_slice(&row.key), + address: row.address.as_deref().map(H160::from_slice), + key: row.key.as_deref().map(H256::from_slice), value: H256::from_slice(&row.value), operation_number: row.operation_number as u64, l2_block_number: L2BlockNumber(row.miniblock_number as u32), @@ -720,7 +809,9 @@ impl StorageLogsDal<'_, '_> { #[cfg(test)] mod tests { use zksync_contracts::BaseSystemContractsHashes; - use zksync_types::{block::L1BatchHeader, ProtocolVersion, ProtocolVersionId}; + use zksync_types::{ + block::L1BatchHeader, AccountTreeId, ProtocolVersion, ProtocolVersionId, StorageKey, + }; use super::*; use crate::{tests::create_l2_block_header, ConnectionPool, Core}; @@ -773,8 +864,11 @@ mod tests { .await .unwrap(); assert_eq!(touched_slots.len(), 2); - assert_eq!(touched_slots[&first_key], H256::repeat_byte(1)); - assert_eq!(touched_slots[&second_key], H256::repeat_byte(2)); + assert_eq!(touched_slots[&first_key.hashed_key()], H256::repeat_byte(1)); + assert_eq!( + touched_slots[&second_key.hashed_key()], + H256::repeat_byte(2) + ); // Add more logs and check log ordering. let third_log = StorageLog::new_write_log(first_key, H256::repeat_byte(3)); @@ -790,8 +884,11 @@ mod tests { .await .unwrap(); assert_eq!(touched_slots.len(), 2); - assert_eq!(touched_slots[&first_key], H256::repeat_byte(3)); - assert_eq!(touched_slots[&second_key], H256::repeat_byte(2)); + assert_eq!(touched_slots[&first_key.hashed_key()], H256::repeat_byte(3)); + assert_eq!( + touched_slots[&second_key.hashed_key()], + H256::repeat_byte(2) + ); test_revert(&mut conn, first_key, second_key).await; } @@ -861,7 +958,7 @@ mod tests { }) .collect(); insert_l2_block(&mut conn, 1, logs.clone()).await; - let written_keys: Vec<_> = logs.iter().map(|log| log.key).collect(); + let written_keys: Vec<_> = logs.iter().map(|log| log.key.hashed_key()).collect(); conn.storage_logs_dedup_dal() .insert_initial_writes(L1BatchNumber(1), &written_keys) .await @@ -874,7 +971,10 @@ mod tests { }) .collect(); insert_l2_block(&mut conn, 2, new_logs.clone()).await; - let new_written_keys: Vec<_> = new_logs[5..].iter().map(|log| log.key).collect(); + let new_written_keys: Vec<_> = new_logs[5..] + .iter() + .map(|log| log.key.hashed_key()) + .collect(); conn.storage_logs_dedup_dal() .insert_initial_writes(L1BatchNumber(2), &new_written_keys) .await @@ -931,8 +1031,9 @@ mod tests { let initial_keys: Vec<_> = logs .iter() .filter_map(|log| { - (!log.value.is_zero() && !non_initial.contains(&log.key.hashed_key())) - .then_some(log.key) + let hashed_key = log.key.hashed_key(); + (!log.value.is_zero() && !non_initial.contains(&hashed_key)) + .then_some(hashed_key) }) .collect(); @@ -1016,6 +1117,7 @@ mod tests { let mut initial_keys: Vec<_> = logs.iter().map(|log| log.key).collect(); initial_keys.sort_unstable(); + let initial_keys: Vec<_> = initial_keys.iter().map(StorageKey::hashed_key).collect(); conn.storage_logs_dedup_dal() .insert_initial_writes(L1BatchNumber(1), &initial_keys) .await diff --git a/core/lib/dal/src/storage_logs_dedup_dal.rs b/core/lib/dal/src/storage_logs_dedup_dal.rs index 6df54c54fc51..02049f3e9ade 100644 --- a/core/lib/dal/src/storage_logs_dedup_dal.rs +++ b/core/lib/dal/src/storage_logs_dedup_dal.rs @@ -68,14 +68,10 @@ impl StorageLogsDedupDal<'_, '_> { let mut bytes: Vec = Vec::new(); let now = Utc::now().naive_utc().to_string(); - for log in snapshot_storage_logs.iter() { + for log in snapshot_storage_logs { let row = format!( "\\\\x{:x}|{}|{}|{}|{}\n", - log.key.hashed_key(), - log.enumeration_index, - log.l1_batch_number_of_initial_write, - now, - now, + log.key, log.enumeration_index, log.l1_batch_number_of_initial_write, now, now, ); bytes.extend_from_slice(row.as_bytes()); } @@ -85,12 +81,9 @@ impl StorageLogsDedupDal<'_, '_> { pub async fn insert_initial_writes( &mut self, l1_batch_number: L1BatchNumber, - written_storage_keys: &[StorageKey], + written_hashed_keys: &[H256], ) -> DalResult<()> { - let hashed_keys: Vec<_> = written_storage_keys - .iter() - .map(|key| StorageKey::raw_hashed_key(key.address(), key.key()).to_vec()) - .collect(); + let hashed_keys: Vec<_> = written_hashed_keys.iter().map(H256::as_bytes).collect(); let last_index = self.max_enumeration_index().await?.unwrap_or(0); let indices: Vec<_> = ((last_index + 1)..=(last_index + hashed_keys.len() as u64)) @@ -110,7 +103,7 @@ impl StorageLogsDedupDal<'_, '_> { FROM UNNEST($1::bytea[], $2::BIGINT[]) AS u (hashed_key, INDEX) "#, - &hashed_keys, + &hashed_keys as &[&[u8]], &indices, i64::from(l1_batch_number.0) ) @@ -343,8 +336,8 @@ mod tests { let account = AccountTreeId::new(Address::repeat_byte(1)); let initial_writes = [ - StorageKey::new(account, H256::zero()), - StorageKey::new(account, H256::repeat_byte(1)), + StorageKey::new(account, H256::zero()).hashed_key(), + StorageKey::new(account, H256::repeat_byte(1)).hashed_key(), ]; conn.storage_logs_dedup_dal() .insert_initial_writes(L1BatchNumber(0), &initial_writes) @@ -359,8 +352,8 @@ mod tests { assert_eq!(max_index, Some(2)); let initial_writes = [ - StorageKey::new(account, H256::repeat_byte(2)), - StorageKey::new(account, H256::repeat_byte(3)), + StorageKey::new(account, H256::repeat_byte(2)).hashed_key(), + StorageKey::new(account, H256::repeat_byte(3)).hashed_key(), ]; conn.storage_logs_dedup_dal() .insert_initial_writes(L1BatchNumber(1), &initial_writes) diff --git a/core/lib/dal/src/storage_web3_dal.rs b/core/lib/dal/src/storage_web3_dal.rs index 843752360eff..f54ac766ee8c 100644 --- a/core/lib/dal/src/storage_web3_dal.rs +++ b/core/lib/dal/src/storage_web3_dal.rs @@ -28,7 +28,7 @@ impl StorageWeb3Dal<'_, '_> { ) -> DalResult { let nonce_key = get_nonce_key(&address); let nonce_value = self - .get_historical_value_unchecked(&nonce_key, block_number) + .get_historical_value_unchecked(nonce_key.hashed_key(), block_number) .await?; let full_nonce = h256_to_u256(nonce_value); Ok(decompose_full_nonce(full_nonce).0) @@ -66,13 +66,14 @@ impl StorageWeb3Dal<'_, '_> { ) -> DalResult { let key = storage_key_for_standard_token_balance(token_id, account_id.address()); let balance = self - .get_historical_value_unchecked(&key, block_number) + .get_historical_value_unchecked(key.hashed_key(), block_number) .await?; Ok(h256_to_u256(balance)) } /// Gets the current value for the specified `key`. Uses state of the latest sealed L2 block. /// Returns error if there is no sealed L2 blocks. + // FIXME: propagate hashed_key? pub async fn get_value(&mut self, key: &StorageKey) -> DalResult { let Some(l2_block_number) = self .storage @@ -85,7 +86,7 @@ impl StorageWeb3Dal<'_, '_> { .constraint_error(anyhow::anyhow!("no sealed l2 blocks")); return Err(err); }; - self.get_historical_value_unchecked(key, l2_block_number) + self.get_historical_value_unchecked(key.hashed_key(), l2_block_number) .await } @@ -119,11 +120,9 @@ impl StorageWeb3Dal<'_, '_> { /// It will return the current value if the block is in the future. pub async fn get_historical_value_unchecked( &mut self, - key: &StorageKey, + hashed_key: H256, block_number: L2BlockNumber, ) -> DalResult { - let hashed_key = key.hashed_key(); - sqlx::query!( r#" SELECT @@ -204,9 +203,8 @@ impl StorageWeb3Dal<'_, '_> { pub async fn get_l1_batch_number_for_initial_write( &mut self, - key: &StorageKey, + hashed_key: H256, ) -> DalResult> { - let hashed_key = key.hashed_key(); let row = sqlx::query!( r#" SELECT diff --git a/core/lib/dal/src/tee_proof_generation_dal.rs b/core/lib/dal/src/tee_proof_generation_dal.rs index d5625935fa1b..0ddf36abdbed 100644 --- a/core/lib/dal/src/tee_proof_generation_dal.rs +++ b/core/lib/dal/src/tee_proof_generation_dal.rs @@ -7,7 +7,7 @@ use zksync_db_connection::{ instrument::{InstrumentExt, Instrumented}, utils::pg_interval_from_duration, }; -use zksync_types::L1BatchNumber; +use zksync_types::{tee_types::TeeType, L1BatchNumber}; use crate::Core; @@ -28,12 +28,6 @@ enum TeeProofGenerationJobStatus { Skipped, } -#[derive(Debug, EnumString, Display)] -pub enum TeeType { - #[strum(serialize = "sgx")] - Sgx, -} - impl TeeProofGenerationDal<'_, '_> { pub async fn get_next_block_to_be_proven( &mut self, diff --git a/core/lib/dal/src/tests/mod.rs b/core/lib/dal/src/tests/mod.rs index c4dab1246552..11f88ba8a70b 100644 --- a/core/lib/dal/src/tests/mod.rs +++ b/core/lib/dal/src/tests/mod.rs @@ -3,7 +3,7 @@ use std::time::Duration; use zksync_contracts::BaseSystemContractsHashes; use zksync_db_connection::connection_pool::ConnectionPool; use zksync_types::{ - block::{L2BlockHasher, L2BlockHeader}, + block::{L1BatchHeader, L2BlockHasher, L2BlockHeader}, fee::{Fee, TransactionExecutionMetrics}, fee_model::BatchFeeInput, helpers::unix_timestamp_ms, @@ -50,6 +50,17 @@ pub(crate) fn create_l2_block_header(number: u32) -> L2BlockHeader { gas_limit: 0, } } +pub(crate) fn create_l1_batch_header(number: u32) -> L1BatchHeader { + L1BatchHeader::new( + L1BatchNumber(number), + 100, + BaseSystemContractsHashes { + bootloader: H256::repeat_byte(1), + default_aa: H256::repeat_byte(42), + }, + ProtocolVersionId::latest(), + ) +} pub(crate) fn mock_l2_transaction() -> L2Tx { let fee = Fee { @@ -172,14 +183,14 @@ pub(crate) fn mock_vm_event(index: u8) -> VmEvent { } } -pub(crate) fn mock_l2_to_l1_log() -> UserL2ToL1Log { +pub(crate) fn create_l2_to_l1_log(tx_number_in_block: u16, index: u8) -> UserL2ToL1Log { UserL2ToL1Log(L2ToL1Log { shard_id: 0, is_service: false, - tx_number_in_block: 0, - sender: Address::repeat_byte(0), - key: H256::from_low_u64_be(0), - value: H256::repeat_byte(0), + tx_number_in_block, + sender: Address::repeat_byte(index), + key: H256::from_low_u64_be(u64::from(index)), + value: H256::repeat_byte(index), }) } diff --git a/core/lib/dal/src/transactions_web3_dal.rs b/core/lib/dal/src/transactions_web3_dal.rs index 2d380a8059a6..ff82664109d6 100644 --- a/core/lib/dal/src/transactions_web3_dal.rs +++ b/core/lib/dal/src/transactions_web3_dal.rs @@ -16,7 +16,7 @@ use zksync_types::{ use crate::{ models::storage_transaction::{ StorageApiTransaction, StorageTransaction, StorageTransactionDetails, - StorageTransactionReceipt, + StorageTransactionExecutionInfo, StorageTransactionReceipt, }, Core, CoreDal, }; @@ -43,7 +43,7 @@ impl TransactionsWeb3Dal<'_, '_> { // Clarification for first part of the query(`WITH` clause): // Looking for `ContractDeployed` event in the events table // to find the address of deployed contract - let mut receipts: Vec = sqlx::query_as!( + let st_receipts: Vec = sqlx::query_as!( StorageTransactionReceipt, r#" WITH @@ -75,7 +75,8 @@ impl TransactionsWeb3Dal<'_, '_> { transactions.gas_limit AS gas_limit, miniblocks.hash AS "block_hash", miniblocks.l1_batch_number AS "l1_batch_number?", - events.topic4 AS "contract_address?" + events.topic4 AS "contract_address?", + miniblocks.timestamp AS "block_timestamp?" FROM transactions JOIN miniblocks ON miniblocks.number = transactions.miniblock_number @@ -93,10 +94,13 @@ impl TransactionsWeb3Dal<'_, '_> { .instrument("get_transaction_receipts") .with_arg("hashes.len", &hashes.len()) .fetch_all(self.storage) - .await? - .into_iter() - .map(Into::into) - .collect(); + .await?; + + let block_timestamps: Vec> = + st_receipts.iter().map(|x| x.block_timestamp).collect(); + + let mut receipts: Vec = + st_receipts.into_iter().map(Into::into).collect(); let mut logs = self .storage @@ -110,7 +114,7 @@ impl TransactionsWeb3Dal<'_, '_> { .get_l2_to_l1_logs_by_hashes(hashes) .await?; - for receipt in &mut receipts { + for (receipt, block_timestamp) in receipts.iter_mut().zip(block_timestamps.into_iter()) { let logs_for_tx = logs.remove(&receipt.transaction_hash); if let Some(logs) = logs_for_tx { @@ -119,6 +123,7 @@ impl TransactionsWeb3Dal<'_, '_> { .map(|mut log| { log.block_hash = Some(receipt.block_hash); log.l1_batch_number = receipt.l1_batch_number; + log.block_timestamp = block_timestamp.map(|t| (t as u64).into()); log }) .collect(); @@ -151,6 +156,29 @@ impl TransactionsWeb3Dal<'_, '_> { .await } + pub async fn get_unstable_transaction_execution_info( + &mut self, + hash: H256, + ) -> DalResult> { + let row = sqlx::query_as!( + StorageTransactionExecutionInfo, + r#" + SELECT + transactions.execution_info + FROM + transactions + WHERE + transactions.hash = $1 + "#, + hash.as_bytes() + ) + .instrument("get_unstable_transaction_execution_info") + .with_arg("hash", &hash) + .fetch_optional(self.storage) + .await?; + Ok(row.map(|entry| entry.execution_info)) + } + async fn get_transactions_inner( &mut self, selector: TransactionSelector<'_>, @@ -550,6 +578,21 @@ mod tests { .get_transaction_by_hash(H256::zero(), L2ChainId::from(270)) .await; assert!(web3_tx.unwrap().is_none()); + + let execution_info = conn + .transactions_web3_dal() + .get_unstable_transaction_execution_info(tx_hash) + .await + .unwrap() + .expect("Transaction execution info is missing in the DAL"); + + // Check that execution info has at least the circuit statistics field. + // If this assertion fails because the transaction execution info format + // has changed, replace circuit_statistic with any other valid field + assert!( + execution_info.get("circuit_statistic").is_some(), + "Missing circuit_statistics field" + ); } #[tokio::test] diff --git a/core/lib/dal/src/vm_runner_dal.rs b/core/lib/dal/src/vm_runner_dal.rs index 2d17ff3f9fca..64e378926573 100644 --- a/core/lib/dal/src/vm_runner_dal.rs +++ b/core/lib/dal/src/vm_runner_dal.rs @@ -11,22 +11,22 @@ pub struct VmRunnerDal<'c, 'a> { impl VmRunnerDal<'_, '_> { pub async fn get_protective_reads_latest_processed_batch( &mut self, - default_batch: L1BatchNumber, - ) -> DalResult { + ) -> DalResult> { let row = sqlx::query!( r#" SELECT - COALESCE(MAX(l1_batch_number), $1) AS "last_processed_l1_batch!" + MAX(l1_batch_number) AS "last_processed_l1_batch" FROM vm_runner_protective_reads - "#, - default_batch.0 as i32 + WHERE + time_taken IS NOT NULL + "# ) .instrument("get_protective_reads_latest_processed_batch") .report_latency() .fetch_one(self.storage) .await?; - Ok(L1BatchNumber(row.last_processed_l1_batch as u32)) + Ok(row.last_processed_l1_batch.map(|n| L1BatchNumber(n as u32))) } pub async fn get_protective_reads_last_ready_batch( @@ -48,6 +48,8 @@ impl VmRunnerDal<'_, '_> { COALESCE(MAX(l1_batch_number), $1) + $2 AS "last_ready_batch" FROM vm_runner_protective_reads + WHERE + time_taken IS NOT NULL ) SELECT LEAST(last_batch, last_ready_batch) AS "last_ready_batch!" @@ -65,16 +67,177 @@ impl VmRunnerDal<'_, '_> { Ok(L1BatchNumber(row.last_ready_batch as u32)) } + pub async fn mark_protective_reads_batch_as_processing( + &mut self, + l1_batch_number: L1BatchNumber, + ) -> DalResult<()> { + sqlx::query!( + r#" + INSERT INTO + vm_runner_protective_reads (l1_batch_number, created_at, updated_at, processing_started_at) + VALUES + ($1, NOW(), NOW(), NOW()) + ON CONFLICT (l1_batch_number) DO + UPDATE + SET + updated_at = NOW(), + processing_started_at = NOW() + "#, + i64::from(l1_batch_number.0), + ) + .instrument("mark_protective_reads_batch_as_processing") + .report_latency() + .execute(self.storage) + .await?; + Ok(()) + } + pub async fn mark_protective_reads_batch_as_completed( &mut self, l1_batch_number: L1BatchNumber, + ) -> anyhow::Result<()> { + let update_result = sqlx::query!( + r#" + UPDATE vm_runner_protective_reads + SET + time_taken = NOW() - processing_started_at + WHERE + l1_batch_number = $1 + "#, + i64::from(l1_batch_number.0), + ) + .instrument("mark_protective_reads_batch_as_completed") + .report_latency() + .execute(self.storage) + .await?; + if update_result.rows_affected() == 0 { + anyhow::bail!( + "Trying to mark an L1 batch as completed while it is not being processed" + ); + } + Ok(()) + } + + pub async fn delete_protective_reads( + &mut self, + last_batch_to_keep: L1BatchNumber, + ) -> DalResult<()> { + self.delete_protective_reads_inner(Some(last_batch_to_keep)) + .await + } + + async fn delete_protective_reads_inner( + &mut self, + last_batch_to_keep: Option, + ) -> DalResult<()> { + let l1_batch_number = last_batch_to_keep.map_or(-1, |number| i64::from(number.0)); + sqlx::query!( + r#" + DELETE FROM vm_runner_protective_reads + WHERE + l1_batch_number > $1 + "#, + l1_batch_number + ) + .instrument("delete_protective_reads") + .with_arg("l1_batch_number", &l1_batch_number) + .execute(self.storage) + .await?; + Ok(()) + } + + pub async fn get_bwip_latest_processed_batch(&mut self) -> DalResult> { + let row = sqlx::query!( + r#" + SELECT + MAX(l1_batch_number) AS "last_processed_l1_batch" + FROM + vm_runner_bwip + WHERE + time_taken IS NOT NULL + "#, + ) + .instrument("get_bwip_latest_processed_batch") + .report_latency() + .fetch_one(self.storage) + .await?; + Ok(row.last_processed_l1_batch.map(|n| L1BatchNumber(n as u32))) + } + + pub async fn get_bwip_last_ready_batch( + &mut self, + default_batch: L1BatchNumber, + window_size: u32, + ) -> DalResult { + let row = sqlx::query!( + r#" + WITH + available_batches AS ( + SELECT + MAX(number) AS "last_batch" + FROM + l1_batches + ), + processed_batches AS ( + SELECT + COALESCE(MAX(l1_batch_number), $1) + $2 AS "last_ready_batch" + FROM + vm_runner_bwip + WHERE + time_taken IS NOT NULL + ) + SELECT + LEAST(last_batch, last_ready_batch) AS "last_ready_batch!" + FROM + available_batches + FULL JOIN processed_batches ON TRUE + "#, + default_batch.0 as i32, + window_size as i32 + ) + .instrument("get_bwip_last_ready_batch") + .report_latency() + .fetch_one(self.storage) + .await?; + Ok(L1BatchNumber(row.last_ready_batch as u32)) + } + + pub async fn mark_bwip_batch_as_processing( + &mut self, + l1_batch_number: L1BatchNumber, ) -> DalResult<()> { sqlx::query!( r#" INSERT INTO - vm_runner_protective_reads (l1_batch_number, created_at, updated_at) + vm_runner_bwip (l1_batch_number, created_at, updated_at, processing_started_at) VALUES - ($1, NOW(), NOW()) + ($1, NOW(), NOW(), NOW()) + ON CONFLICT (l1_batch_number) DO + UPDATE + SET + updated_at = NOW(), + processing_started_at = NOW() + "#, + i64::from(l1_batch_number.0), + ) + .instrument("mark_protective_reads_batch_as_processing") + .report_latency() + .execute(self.storage) + .await?; + Ok(()) + } + + pub async fn mark_bwip_batch_as_completed( + &mut self, + l1_batch_number: L1BatchNumber, + ) -> anyhow::Result<()> { + let update_result = sqlx::query!( + r#" + UPDATE vm_runner_bwip + SET + time_taken = NOW() - processing_started_at + WHERE + l1_batch_number = $1 "#, i64::from(l1_batch_number.0), ) @@ -82,6 +245,11 @@ impl VmRunnerDal<'_, '_> { .report_latency() .execute(self.storage) .await?; + if update_result.rows_affected() == 0 { + anyhow::bail!( + "Trying to mark an L1 batch as completed while it is not being processed" + ); + } Ok(()) } } diff --git a/core/lib/db_connection/Cargo.toml b/core/lib/db_connection/Cargo.toml index 2e929e385991..fa5bb0b20af2 100644 --- a/core/lib/db_connection/Cargo.toml +++ b/core/lib/db_connection/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "zksync_db_connection" -version = "0.1.0" +description = "ZKsync Postgres connection wrappers" +version.workspace = true edition.workspace = true authors.workspace = true homepage.workspace = true @@ -36,3 +37,4 @@ tracing.workspace = true [dev-dependencies] assert_matches.workspace = true +test-casing.workspace = true diff --git a/core/lib/db_connection/src/connection.rs b/core/lib/db_connection/src/connection.rs index 99cab4fee179..22a63765b3bf 100644 --- a/core/lib/db_connection/src/connection.rs +++ b/core/lib/db_connection/src/connection.rs @@ -215,6 +215,7 @@ impl<'a, DB: DbMarker> Connection<'a, DB> { Ok(TransactionBuilder { connection: self, is_readonly: false, + isolation_level: None, }) } @@ -280,11 +281,26 @@ impl<'a, DB: DbMarker> Connection<'a, DB> { } } +/// Transaction isolation level. +/// +/// See [Postgres docs](https://www.postgresql.org/docs/14/transaction-iso.html) for details on isolation level semantics. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +#[non_exhaustive] +pub enum IsolationLevel { + /// "Read committed" isolation level. + ReadCommitted, + /// "Repeatable read" isolation level (aka "snapshot isolation"). + RepeatableRead, + /// Serializable isolation level. + Serializable, +} + /// Builder of transactions allowing to configure transaction characteristics (for now, just its readonly status). #[derive(Debug)] pub struct TransactionBuilder<'a, 'c, DB: DbMarker> { connection: &'a mut Connection<'c, DB>, is_readonly: bool, + isolation_level: Option, } impl<'a, DB: DbMarker> TransactionBuilder<'a, '_, DB> { @@ -294,12 +310,40 @@ impl<'a, DB: DbMarker> TransactionBuilder<'a, '_, DB> { self } + /// Sets the isolation level of this transaction. If this method is not called, the isolation level will be + /// "read committed" (the default Postgres isolation level) for read-write transactions, and "repeatable read" + /// for readonly transactions. Beware that setting high isolation level for read-write transactions may lead + /// to performance degradation and/or isolation-related errors. + pub fn set_isolation(mut self, level: IsolationLevel) -> Self { + self.isolation_level = Some(level); + self + } + /// Builds the transaction with the provided characteristics. pub async fn build(self) -> DalResult> { let mut transaction = self.connection.start_transaction().await?; + + let level = self.isolation_level.unwrap_or(if self.is_readonly { + IsolationLevel::RepeatableRead + } else { + IsolationLevel::ReadCommitted + }); + let level = match level { + IsolationLevel::ReadCommitted => "READ COMMITTED", + IsolationLevel::RepeatableRead => "REPEATABLE READ", + IsolationLevel::Serializable => "SERIALIZABLE", + }; + let mut set_transaction_args = format!(" ISOLATION LEVEL {level}"); + if self.is_readonly { - sqlx::query("SET TRANSACTION READ ONLY") + set_transaction_args += " READ ONLY"; + } + + if !set_transaction_args.is_empty() { + sqlx::query(&format!("SET TRANSACTION{set_transaction_args}")) .instrument("set_transaction_characteristics") + .with_arg("isolation_level", &self.isolation_level) + .with_arg("readonly", &self.is_readonly) .execute(&mut transaction) .await?; } @@ -309,6 +353,8 @@ impl<'a, DB: DbMarker> TransactionBuilder<'a, '_, DB> { #[cfg(test)] mod tests { + use test_casing::test_casing; + use super::*; #[tokio::test] @@ -344,17 +390,51 @@ mod tests { } } + const ISOLATION_LEVELS: [Option; 4] = [ + None, + Some(IsolationLevel::ReadCommitted), + Some(IsolationLevel::RepeatableRead), + Some(IsolationLevel::Serializable), + ]; + + #[test_casing(4, ISOLATION_LEVELS)] #[tokio::test] - async fn creating_readonly_transaction() { + async fn setting_isolation_level_for_transaction(level: Option) { let pool = ConnectionPool::::constrained_test_pool(1).await; let mut connection = pool.connection().await.unwrap(); - let mut readonly_transaction = connection - .transaction_builder() + let mut transaction_builder = connection.transaction_builder().unwrap(); + if let Some(level) = level { + transaction_builder = transaction_builder.set_isolation(level); + } + + let mut transaction = transaction_builder.build().await.unwrap(); + assert!(transaction.in_transaction()); + + sqlx::query("SELECT COUNT(*) AS \"count?\" FROM miniblocks") + .instrument("test") + .fetch_optional(&mut transaction) + .await .unwrap() - .set_readonly() - .build() + .expect("no row returned"); + // Check that it's possible to execute write statements in the transaction. + sqlx::query("DELETE FROM miniblocks") + .instrument("test") + .execute(&mut transaction) .await .unwrap(); + } + + #[test_casing(4, ISOLATION_LEVELS)] + #[tokio::test] + async fn creating_readonly_transaction(level: Option) { + let pool = ConnectionPool::::constrained_test_pool(1).await; + let mut connection = pool.connection().await.unwrap(); + let mut transaction_builder = connection.transaction_builder().unwrap().set_readonly(); + if let Some(level) = level { + transaction_builder = transaction_builder.set_isolation(level); + } + + let mut readonly_transaction = transaction_builder.build().await.unwrap(); assert!(readonly_transaction.in_transaction()); sqlx::query("SELECT COUNT(*) AS \"count?\" FROM miniblocks") diff --git a/core/lib/db_connection/src/healthcheck.rs b/core/lib/db_connection/src/healthcheck.rs deleted file mode 100644 index 81be78a64f1d..000000000000 --- a/core/lib/db_connection/src/healthcheck.rs +++ /dev/null @@ -1,58 +0,0 @@ -use serde::Serialize; -use zksync_health_check::{async_trait, CheckHealth, Health, HealthStatus}; - -use crate::{connection::DbMarker, connection_pool::ConnectionPool}; - -#[derive(Debug, Serialize)] -struct ConnectionPoolHealthDetails { - pool_size: u32, - max_size: u32, -} - -impl ConnectionPoolHealthDetails { - fn new(pool: &ConnectionPool) -> Self { - Self { - pool_size: pool.inner.size(), - max_size: pool.max_size(), - } - } -} - -// HealthCheck used to verify if we can connect to the database. -// This guarantees that the app can use it's main "communication" channel. -// Used in the /health endpoint -#[derive(Clone, Debug)] -pub struct ConnectionPoolHealthCheck { - connection_pool: ConnectionPool, -} - -impl ConnectionPoolHealthCheck { - pub fn new(connection_pool: ConnectionPool) -> ConnectionPoolHealthCheck { - Self { connection_pool } - } -} - -#[async_trait] -impl CheckHealth for ConnectionPoolHealthCheck { - fn name(&self) -> &'static str { - "connection_pool" - } - - async fn check_health(&self) -> Health { - // This check is rather feeble, plan to make reliable here: - // https://linear.app/matterlabs/issue/PLA-255/revamp-db-connection-health-check - match self.connection_pool.connection().await { - Ok(_) => { - let details = ConnectionPoolHealthDetails::new(&self.connection_pool); - Health::from(HealthStatus::Ready).with_details(details) - } - Err(err) => { - tracing::warn!("Failed acquiring DB connection for health check: {err:?}"); - let details = serde_json::json!({ - "error": format!("{err:?}"), - }); - Health::from(HealthStatus::NotReady).with_details(details) - } - } - } -} diff --git a/core/lib/db_connection/src/instrument.rs b/core/lib/db_connection/src/instrument.rs index e0728ce22b85..244329ca75bb 100644 --- a/core/lib/db_connection/src/instrument.rs +++ b/core/lib/db_connection/src/instrument.rs @@ -200,6 +200,21 @@ impl<'a> InstrumentedData<'a> { } } + fn observe_error(&self, err: &dyn fmt::Display) { + let InstrumentedData { + name, + location, + args, + .. + } = self; + tracing::warn!( + "Query {name}{args} called at {file}:{line} has resulted in error: {err}", + file = location.file(), + line = location.line() + ); + REQUEST_METRICS.request_error[name].inc(); + } + async fn fetch( self, connection_tags: Option<&ConnectionTags>, @@ -295,32 +310,40 @@ impl<'a> Instrumented<'a, ()> { } } - /// Wraps a provided argument validation error. + /// Wraps a provided argument validation error. It is assumed that the returned error + /// will be returned as an error cause (e.g., it is logged as an error and observed using metrics). + #[must_use] pub fn arg_error(&self, arg_name: &str, err: E) -> DalError where E: Into, { let err: anyhow::Error = err.into(); let err = err.context(format!("failed validating query argument `{arg_name}`")); - DalRequestError::new( + let err = DalRequestError::new( sqlx::Error::Decode(err.into()), self.data.name, self.data.location, ) - .with_args(self.data.args.to_owned()) - .into() + .with_args(self.data.args.to_owned()); + + self.data.observe_error(&err); + err.into() } - /// Wraps a provided application-level data constraint error. + /// Wraps a provided application-level data constraint error. It is assumed that the returned error + /// will be returned as an error cause (e.g., it is logged as an error and observed using metrics). + #[must_use] pub fn constraint_error(&self, err: anyhow::Error) -> DalError { let err = err.context("application-level data constraint violation"); - DalRequestError::new( + let err = DalRequestError::new( sqlx::Error::Decode(err.into()), self.data.name, self.data.location, ) - .with_args(self.data.args.to_owned()) - .into() + .with_args(self.data.args.to_owned()); + + self.data.observe_error(&err); + err.into() } pub fn with(self, query: Q) -> Instrumented<'a, Q> { @@ -475,7 +498,7 @@ mod tests { #[tokio::test] async fn instrumenting_erroneous_query() { let pool = ConnectionPool::::test_pool().await; - // Add `vlog::init()` here to debug this test + // Add `zksync_vlog::init()` here to debug this test let mut conn = pool.connection().await.unwrap(); sqlx::query("WHAT") @@ -491,7 +514,7 @@ mod tests { #[tokio::test] async fn instrumenting_slow_query() { let pool = ConnectionPool::::test_pool().await; - // Add `vlog::init()` here to debug this test + // Add `zksync_vlog::init()` here to debug this test let mut conn = pool.connection().await.unwrap(); sqlx::query("SELECT pg_sleep(1.5)") diff --git a/core/lib/db_connection/src/lib.rs b/core/lib/db_connection/src/lib.rs index 649af477e636..908a310c72ba 100644 --- a/core/lib/db_connection/src/lib.rs +++ b/core/lib/db_connection/src/lib.rs @@ -3,7 +3,6 @@ pub mod connection; pub mod connection_pool; pub mod error; -pub mod healthcheck; pub mod instrument; pub mod metrics; #[macro_use] diff --git a/core/lib/db_connection/src/utils.rs b/core/lib/db_connection/src/utils.rs index 7c917845c7e3..80cf0a5cbb35 100644 --- a/core/lib/db_connection/src/utils.rs +++ b/core/lib/db_connection/src/utils.rs @@ -9,6 +9,10 @@ pub(crate) struct InternalMarker; impl DbMarker for InternalMarker {} +const MICROSECONDS_IN_A_SECOND: i64 = 1_000_000; +const MICROSECONDS_IN_A_MINUTE: i64 = MICROSECONDS_IN_A_SECOND * 60; +const MICROSECONDS_IN_AN_HOUR: i64 = MICROSECONDS_IN_A_MINUTE * 60; + pub fn duration_to_naive_time(duration: Duration) -> NaiveTime { let total_seconds = duration.as_secs() as u32; NaiveTime::from_hms_opt( @@ -26,3 +30,16 @@ pub const fn pg_interval_from_duration(processing_timeout: Duration) -> PgInterv microseconds: processing_timeout.as_micros() as i64, } } + +// Note: this conversion purposefully ignores `.days` and `.months` fields of PgInterval. +// The PgIntervals expected are below 24h (represented by `.microseconds`). If that's not the case, +// the function will trim days and months. Use at your own risk. +pub fn naive_time_from_pg_interval(pg_interval: PgInterval) -> NaiveTime { + NaiveTime::from_hms_micro_opt( + (pg_interval.microseconds / MICROSECONDS_IN_AN_HOUR) as u32, + ((pg_interval.microseconds / MICROSECONDS_IN_A_MINUTE) % 60) as u32, + ((pg_interval.microseconds / MICROSECONDS_IN_A_SECOND) % 60) as u32, + (pg_interval.microseconds as u32) % 1_000_000, + ) + .expect("failed to convert PgInterval to NaiveTime") +} diff --git a/core/lib/default_da_clients/Cargo.toml b/core/lib/default_da_clients/Cargo.toml new file mode 100644 index 000000000000..737d209aed31 --- /dev/null +++ b/core/lib/default_da_clients/Cargo.toml @@ -0,0 +1,25 @@ +[package] +name = "zksync_default_da_clients" +description = "ZKsync DA client implementations" +version.workspace = true +edition.workspace = true +authors.workspace = true +homepage.workspace = true +repository.workspace = true +license.workspace = true +keywords.workspace = true +categories.workspace = true + +[dependencies] +serde = { workspace = true, features = ["derive"] } +tracing.workspace = true +async-trait.workspace = true +anyhow.workspace = true +flate2.workspace = true + +zksync_config.workspace = true +zksync_types.workspace = true +zksync_object_store.workspace = true +zksync_da_client.workspace = true +zksync_node_framework.workspace = true +zksync_env_config.workspace = true diff --git a/core/lib/default_da_clients/README.md b/core/lib/default_da_clients/README.md new file mode 100644 index 000000000000..17ced715b268 --- /dev/null +++ b/core/lib/default_da_clients/README.md @@ -0,0 +1,11 @@ +# Default DA Clients + +This crate contains the default implementations of the Data Availability clients. Default clients are maintained within +this repo because they are tightly coupled with the codebase, and would cause the circular dependency if they were to be +moved to the [hyperchain-da](https://github.com/matter-labs/hyperchain-da) repository. + +Currently, the following DataAvailability clients are implemented: + +- `NoDA client` that does not send or store any pubdata, it is needed to run the zkSync network in the "no-DA" mode + utilizing the DA framework. +- `Object Store client` that stores the pubdata in the Object Store(GCS). diff --git a/core/lib/default_da_clients/src/lib.rs b/core/lib/default_da_clients/src/lib.rs new file mode 100644 index 000000000000..3aa2a18cdcec --- /dev/null +++ b/core/lib/default_da_clients/src/lib.rs @@ -0,0 +1,2 @@ +pub mod no_da; +pub mod object_store; diff --git a/core/lib/default_da_clients/src/no_da/client.rs b/core/lib/default_da_clients/src/no_da/client.rs new file mode 100644 index 000000000000..2710c9ce9d9b --- /dev/null +++ b/core/lib/default_da_clients/src/no_da/client.rs @@ -0,0 +1,28 @@ +use async_trait::async_trait; +use zksync_da_client::{ + types::{DAError, DispatchResponse, InclusionData}, + DataAvailabilityClient, +}; + +/// A no-op implementation of the `DataAvailabilityClient` trait, that doesn't store the pubdata. +#[derive(Clone, Debug, Default)] +pub struct NoDAClient; + +#[async_trait] +impl DataAvailabilityClient for NoDAClient { + async fn dispatch_blob(&self, _: u32, _: Vec) -> Result { + Ok(DispatchResponse::default()) + } + + async fn get_inclusion_data(&self, _: &str) -> Result, DAError> { + return Ok(Some(InclusionData::default())); + } + + fn clone_boxed(&self) -> Box { + Box::new(self.clone()) + } + + fn blob_size_limit(&self) -> Option { + None + } +} diff --git a/core/lib/default_da_clients/src/no_da/mod.rs b/core/lib/default_da_clients/src/no_da/mod.rs new file mode 100644 index 000000000000..814cf30c2cbd --- /dev/null +++ b/core/lib/default_da_clients/src/no_da/mod.rs @@ -0,0 +1,2 @@ +pub mod client; +pub mod wiring_layer; diff --git a/core/lib/default_da_clients/src/no_da/wiring_layer.rs b/core/lib/default_da_clients/src/no_da/wiring_layer.rs new file mode 100644 index 000000000000..71a2ee7ce582 --- /dev/null +++ b/core/lib/default_da_clients/src/no_da/wiring_layer.rs @@ -0,0 +1,36 @@ +use std::fmt::Debug; + +use zksync_da_client::DataAvailabilityClient; +use zksync_node_framework::{ + implementations::resources::da_client::DAClientResource, + wiring_layer::{WiringError, WiringLayer}, + IntoContext, +}; + +use crate::no_da::client::NoDAClient; + +#[derive(Debug, Default)] +pub struct NoDAClientWiringLayer; + +#[derive(Debug, IntoContext)] +pub struct Output { + pub client: DAClientResource, +} + +#[async_trait::async_trait] +impl WiringLayer for NoDAClientWiringLayer { + type Input = (); + type Output = Output; + + fn layer_name(&self) -> &'static str { + "no_da_layer" + } + + async fn wire(self, _input: Self::Input) -> Result { + let client: Box = Box::new(NoDAClient); + + Ok(Output { + client: DAClientResource(client), + }) + } +} diff --git a/core/lib/default_da_clients/src/object_store/client.rs b/core/lib/default_da_clients/src/object_store/client.rs new file mode 100644 index 000000000000..fc17a842a099 --- /dev/null +++ b/core/lib/default_da_clients/src/object_store/client.rs @@ -0,0 +1,86 @@ +use std::sync::Arc; + +use async_trait::async_trait; +use zksync_config::ObjectStoreConfig; +use zksync_da_client::{ + types::{DAError, DispatchResponse, InclusionData}, + DataAvailabilityClient, +}; +use zksync_object_store::{ObjectStore, ObjectStoreFactory}; +use zksync_types::L1BatchNumber; + +use crate::object_store::types::StorablePubdata; + +/// An implementation of the `DataAvailabilityClient` trait that stores the pubdata in the GCS. +#[derive(Clone, Debug)] +pub struct ObjectStoreDAClient { + object_store: Arc, +} + +impl ObjectStoreDAClient { + pub async fn new(object_store_conf: ObjectStoreConfig) -> anyhow::Result { + Ok(ObjectStoreDAClient { + object_store: ObjectStoreFactory::new(object_store_conf) + .create_store() + .await?, + }) + } +} + +#[async_trait] +impl DataAvailabilityClient for ObjectStoreDAClient { + async fn dispatch_blob( + &self, + batch_number: u32, + data: Vec, + ) -> Result { + if let Err(err) = self + .object_store + .put(L1BatchNumber(batch_number), &StorablePubdata { data }) + .await + { + return Err(DAError { + is_transient: err.is_transient(), + error: anyhow::Error::from(err), + }); + } + + Ok(DispatchResponse { + blob_id: batch_number.to_string(), + }) + } + + async fn get_inclusion_data(&self, key: &str) -> Result, DAError> { + let key_u32 = key.parse::().map_err(|err| DAError { + error: anyhow::Error::from(err).context(format!("Failed to parse blob key: {}", key)), + is_transient: false, + })?; + + if let Err(err) = self + .object_store + .get::(L1BatchNumber(key_u32)) + .await + { + if let zksync_object_store::ObjectStoreError::KeyNotFound(_) = err { + return Ok(None); + } + + return Err(DAError { + is_transient: err.is_transient(), + error: anyhow::Error::from(err), + }); + } + + // Using default here because we don't get any inclusion data from object store, thus + // there's nothing to check on L1. + return Ok(Some(InclusionData::default())); + } + + fn clone_boxed(&self) -> Box { + Box::new(self.clone()) + } + + fn blob_size_limit(&self) -> Option { + None + } +} diff --git a/core/lib/default_da_clients/src/object_store/config.rs b/core/lib/default_da_clients/src/object_store/config.rs new file mode 100644 index 000000000000..285c39827c79 --- /dev/null +++ b/core/lib/default_da_clients/src/object_store/config.rs @@ -0,0 +1,12 @@ +use zksync_config::ObjectStoreConfig; +use zksync_env_config::envy_load; + +#[derive(Debug)] +pub struct DAObjectStoreConfig(pub ObjectStoreConfig); + +impl DAObjectStoreConfig { + pub fn from_env() -> anyhow::Result { + let config = envy_load("object_store", "DA_CLIENT_OBJECT_STORE_")?; + Ok(Self(config)) + } +} diff --git a/core/lib/default_da_clients/src/object_store/mod.rs b/core/lib/default_da_clients/src/object_store/mod.rs new file mode 100644 index 000000000000..1600941b0572 --- /dev/null +++ b/core/lib/default_da_clients/src/object_store/mod.rs @@ -0,0 +1,4 @@ +pub mod client; +pub mod config; +mod types; +pub mod wiring_layer; diff --git a/core/lib/default_da_clients/src/object_store/types.rs b/core/lib/default_da_clients/src/object_store/types.rs new file mode 100644 index 000000000000..b8ec9303e71e --- /dev/null +++ b/core/lib/default_da_clients/src/object_store/types.rs @@ -0,0 +1,38 @@ +use std::io::{Read, Write}; + +use flate2::{read::GzDecoder, write::GzEncoder, Compression}; +use zksync_object_store::{Bucket, StoredObject, _reexports::BoxedError}; +use zksync_types::L1BatchNumber; + +/// Used as a wrapper for the pubdata to be stored in the GCS. +#[derive(Debug)] +pub struct StorablePubdata { + pub data: Vec, +} + +impl StoredObject for StorablePubdata { + const BUCKET: Bucket = Bucket::DataAvailability; + type Key<'a> = L1BatchNumber; + + fn encode_key(key: Self::Key<'_>) -> String { + format!("l1_batch_{key}_pubdata.gzip") + } + + fn serialize(&self) -> Result, BoxedError> { + let mut encoder = GzEncoder::new(Vec::new(), Compression::default()); + encoder.write_all(&self.data[..])?; + encoder.finish().map_err(From::from) + } + + fn deserialize(bytes: Vec) -> Result { + let mut decoder = GzDecoder::new(&bytes[..]); + let mut decompressed_bytes = Vec::new(); + decoder + .read_to_end(&mut decompressed_bytes) + .map_err(BoxedError::from)?; + + Ok(Self { + data: decompressed_bytes, + }) + } +} diff --git a/core/lib/default_da_clients/src/object_store/wiring_layer.rs b/core/lib/default_da_clients/src/object_store/wiring_layer.rs new file mode 100644 index 000000000000..6fc84fb707b7 --- /dev/null +++ b/core/lib/default_da_clients/src/object_store/wiring_layer.rs @@ -0,0 +1,44 @@ +use zksync_config::ObjectStoreConfig; +use zksync_da_client::DataAvailabilityClient; +use zksync_node_framework::{ + implementations::resources::da_client::DAClientResource, + wiring_layer::{WiringError, WiringLayer}, + IntoContext, +}; + +use crate::object_store::client::ObjectStoreDAClient; + +#[derive(Debug)] +pub struct ObjectStorageClientWiringLayer { + config: ObjectStoreConfig, +} + +impl ObjectStorageClientWiringLayer { + pub fn new(config: ObjectStoreConfig) -> Self { + Self { config } + } +} + +#[derive(Debug, IntoContext)] +pub struct Output { + pub client: DAClientResource, +} + +#[async_trait::async_trait] +impl WiringLayer for ObjectStorageClientWiringLayer { + type Input = (); + type Output = Output; + + fn layer_name(&self) -> &'static str { + "object_store_da_layer" + } + + async fn wire(self, _input: Self::Input) -> Result { + let client: Box = + Box::new(ObjectStoreDAClient::new(self.config).await?); + + Ok(Output { + client: DAClientResource(client), + }) + } +} diff --git a/core/lib/env_config/Cargo.toml b/core/lib/env_config/Cargo.toml index c86621584010..31ffb8223bd1 100644 --- a/core/lib/env_config/Cargo.toml +++ b/core/lib/env_config/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "zksync_env_config" -version = "0.1.0" +description = "ZKsync env deserialization for configs" +version.workspace = true edition.workspace = true authors.workspace = true homepage.workspace = true diff --git a/core/lib/env_config/src/api.rs b/core/lib/env_config/src/api.rs index 6f1948241c9e..64d8696f50bb 100644 --- a/core/lib/env_config/src/api.rs +++ b/core/lib/env_config/src/api.rs @@ -98,10 +98,12 @@ mod tests { addr("0x0000000000000000000000000000000000000001"), addr("0x0000000000000000000000000000000000000002"), ], + api_namespaces: Some(vec!["debug".to_string()]), + extended_api_tracing: true, }, prometheus: PrometheusConfig { listener_port: 3312, - pushgateway_url: "http://127.0.0.1:9091".into(), + pushgateway_url: Some("http://127.0.0.1:9091".into()), push_interval_ms: Some(100), }, healthcheck: HealthCheckConfig { @@ -129,6 +131,8 @@ mod tests { API_WEB3_JSON_RPC_MAX_NONCE_AHEAD=5 API_WEB3_JSON_RPC_GAS_PRICE_SCALE_FACTOR=1.2 API_WEB3_JSON_RPC_REQUEST_TIMEOUT=10 + API_WEB3_JSON_RPC_API_NAMESPACES=debug + API_WEB3_JSON_RPC_EXTENDED_API_TRACING=true API_WEB3_JSON_RPC_ACCOUNT_PKS="0x0000000000000000000000000000000000000000000000000000000000000001,0x0000000000000000000000000000000000000000000000000000000000000002" API_WEB3_JSON_RPC_WHITELISTED_TOKENS_FOR_AA="0x0000000000000000000000000000000000000001,0x0000000000000000000000000000000000000002" API_WEB3_JSON_RPC_ESTIMATE_GAS_SCALE_FACTOR=1.0 diff --git a/core/lib/env_config/src/base_token_adjuster.rs b/core/lib/env_config/src/base_token_adjuster.rs new file mode 100644 index 000000000000..5e4ef39671ca --- /dev/null +++ b/core/lib/env_config/src/base_token_adjuster.rs @@ -0,0 +1,9 @@ +use zksync_config::configs::BaseTokenAdjusterConfig; + +use crate::{envy_load, FromEnv}; + +impl FromEnv for BaseTokenAdjusterConfig { + fn from_env() -> anyhow::Result { + envy_load("base_token_adjuster", "BASE_TOKEN_ADJUSTER_") + } +} diff --git a/core/lib/env_config/src/chain.rs b/core/lib/env_config/src/chain.rs index 5aaae9216736..441fcc4159cb 100644 --- a/core/lib/env_config/src/chain.rs +++ b/core/lib/env_config/src/chain.rs @@ -104,6 +104,7 @@ mod tests { )), l1_batch_commit_data_generator_mode, max_circuits_per_batch: 24100, + protective_reads_persistence_enabled: true, } } diff --git a/core/lib/env_config/src/contracts.rs b/core/lib/env_config/src/contracts.rs index ae5eb6f30c92..3365f56add77 100644 --- a/core/lib/env_config/src/contracts.rs +++ b/core/lib/env_config/src/contracts.rs @@ -71,6 +71,7 @@ mod tests { transparent_proxy_admin_addr: addr("0xdd6fa5c14e7550b4caf2aa2818d24c69cbc347e5"), }), base_token_addr: Some(SHARED_BRIDGE_ETHER_TOKEN_ADDRESS), + chain_admin_addr: Some(addr("0xdd6fa5c14e7550b4caf2aa2818d24c69cbc347ff")), } } @@ -95,6 +96,7 @@ CONTRACTS_BRIDGEHUB_PROXY_ADDR="0x35ea7f92f4c5f433efe15284e99c040110cf6297" CONTRACTS_STATE_TRANSITION_PROXY_ADDR="0xd90f1c081c6117241624e97cb6147257c3cb2097" CONTRACTS_TRANSPARENT_PROXY_ADMIN_ADDR="0xdd6fa5c14e7550b4caf2aa2818d24c69cbc347e5" CONTRACTS_BASE_TOKEN_ADDR="0x0000000000000000000000000000000000000001" +CONTRACTS_CHAIN_ADMIN_ADDR="0xdd6fa5c14e7550b4caf2aa2818d24c69cbc347ff" "#; lock.set_env(config); diff --git a/core/lib/env_config/src/da_dispatcher.rs b/core/lib/env_config/src/da_dispatcher.rs new file mode 100644 index 000000000000..194e4185b286 --- /dev/null +++ b/core/lib/env_config/src/da_dispatcher.rs @@ -0,0 +1,44 @@ +use zksync_config::DADispatcherConfig; + +use crate::{envy_load, FromEnv}; + +impl FromEnv for DADispatcherConfig { + fn from_env() -> anyhow::Result { + envy_load("da_dispatcher", "DA_DISPATCHER_") + } +} + +#[cfg(test)] +mod tests { + use zksync_config::configs::da_dispatcher::DADispatcherConfig; + + use super::*; + use crate::test_utils::EnvMutex; + + static MUTEX: EnvMutex = EnvMutex::new(); + + fn expected_da_layer_config( + interval: u32, + rows_limit: u32, + max_retries: u16, + ) -> DADispatcherConfig { + DADispatcherConfig { + polling_interval_ms: Some(interval), + max_rows_to_dispatch: Some(rows_limit), + max_retries: Some(max_retries), + } + } + + #[test] + fn from_env_da_dispatcher() { + let mut lock = MUTEX.lock(); + let config = r#" + DA_DISPATCHER_POLLING_INTERVAL_MS=5000 + DA_DISPATCHER_MAX_ROWS_TO_DISPATCH=60 + DA_DISPATCHER_MAX_RETRIES=7 + "#; + lock.set_env(config); + let actual = DADispatcherConfig::from_env().unwrap(); + assert_eq!(actual, expected_da_layer_config(5000, 60, 7)); + } +} diff --git a/core/lib/env_config/src/external_price_api_client.rs b/core/lib/env_config/src/external_price_api_client.rs new file mode 100644 index 000000000000..7ec3782dc6b4 --- /dev/null +++ b/core/lib/env_config/src/external_price_api_client.rs @@ -0,0 +1,48 @@ +use zksync_config::configs::ExternalPriceApiClientConfig; + +use crate::{envy_load, FromEnv}; + +impl FromEnv for ExternalPriceApiClientConfig { + fn from_env() -> anyhow::Result { + envy_load("external_price_api_client", "EXTERNAL_PRICE_API_CLIENT_") + } +} + +#[cfg(test)] +mod tests { + use zksync_config::configs::external_price_api_client::{ + ExternalPriceApiClientConfig, DEFAULT_TIMEOUT_MS, + }; + + use super::*; + use crate::test_utils::EnvMutex; + + static MUTEX: EnvMutex = EnvMutex::new(); + + fn expected_external_price_api_client_config() -> ExternalPriceApiClientConfig { + ExternalPriceApiClientConfig { + source: "no-op".to_string(), + base_url: Some("https://pro-api.coingecko.com".to_string()), + api_key: Some("qwerty12345".to_string()), + client_timeout_ms: DEFAULT_TIMEOUT_MS, + forced_numerator: Some(100), + forced_denominator: Some(1), + } + } + + #[test] + fn from_env_external_price_api_client() { + let mut lock = MUTEX.lock(); + let config = r#" + EXTERNAL_PRICE_API_CLIENT_SOURCE=no-op + EXTERNAL_PRICE_API_CLIENT_BASE_URL=https://pro-api.coingecko.com + EXTERNAL_PRICE_API_CLIENT_API_KEY=qwerty12345 + EXTERNAL_PRICE_API_CLIENT_FORCED_NUMERATOR=100 + EXTERNAL_PRICE_API_CLIENT_FORCED_DENOMINATOR=1 + "#; + lock.set_env(config); + + let actual = ExternalPriceApiClientConfig::from_env().unwrap(); + assert_eq!(actual, expected_external_price_api_client_config()); + } +} diff --git a/core/lib/env_config/src/fri_prover.rs b/core/lib/env_config/src/fri_prover.rs index 96069d6514ea..bdcf5291ee05 100644 --- a/core/lib/env_config/src/fri_prover.rs +++ b/core/lib/env_config/src/fri_prover.rs @@ -18,7 +18,10 @@ impl FromEnv for FriProverConfig { #[cfg(test)] mod tests { use zksync_config::{ - configs::{fri_prover::SetupLoadMode, object_store::ObjectStoreMode}, + configs::{ + fri_prover::{CloudType, SetupLoadMode}, + object_store::ObjectStoreMode, + }, ObjectStoreConfig, }; @@ -57,6 +60,7 @@ mod tests { local_mirror_path: None, }), availability_check_interval_in_secs: Some(1_800), + cloud_type: CloudType::GCP, } } diff --git a/core/lib/env_config/src/fri_witness_generator.rs b/core/lib/env_config/src/fri_witness_generator.rs index 9780e6aec682..5853a0178308 100644 --- a/core/lib/env_config/src/fri_witness_generator.rs +++ b/core/lib/env_config/src/fri_witness_generator.rs @@ -26,6 +26,7 @@ mod tests { max_attempts: 4, last_l1_batch_to_process: None, shall_save_to_public_bucket: true, + prometheus_listener_port: Some(3333u16), } } @@ -41,6 +42,7 @@ mod tests { FRI_WITNESS_SCHEDULER_GENERATION_TIMEOUT_IN_SECS=900 FRI_WITNESS_MAX_ATTEMPTS=4 FRI_WITNESS_SHALL_SAVE_TO_PUBLIC_BUCKET=true + FRI_WITNESS_PROMETHEUS_LISTENER_PORT=3333 "#; lock.set_env(config); diff --git a/core/lib/env_config/src/lib.rs b/core/lib/env_config/src/lib.rs index 9218467fdaba..789f6f8be2fd 100644 --- a/core/lib/env_config/src/lib.rs +++ b/core/lib/env_config/src/lib.rs @@ -21,6 +21,9 @@ mod proof_data_handler; mod snapshots_creator; mod utils; +mod base_token_adjuster; +mod da_dispatcher; +mod external_price_api_client; mod genesis; #[cfg(test)] mod test_utils; diff --git a/core/lib/env_config/src/snapshots_creator.rs b/core/lib/env_config/src/snapshots_creator.rs index 6ed80e3780ce..80e1f5ec0b00 100644 --- a/core/lib/env_config/src/snapshots_creator.rs +++ b/core/lib/env_config/src/snapshots_creator.rs @@ -1,9 +1,13 @@ use zksync_config::SnapshotsCreatorConfig; -use crate::{envy_load, FromEnv}; +use crate::{envy_load, object_store::SnapshotsObjectStoreConfig, FromEnv}; impl FromEnv for SnapshotsCreatorConfig { fn from_env() -> anyhow::Result { - envy_load("snapshots_creator", "SNAPSHOTS_CREATOR_") + let mut snapshot_creator: SnapshotsCreatorConfig = + envy_load("snapshots_creator", "SNAPSHOTS_CREATOR_")?; + + snapshot_creator.object_store = SnapshotsObjectStoreConfig::from_env().map(|a| a.0).ok(); + Ok(snapshot_creator) } } diff --git a/core/lib/env_config/src/vm_runner.rs b/core/lib/env_config/src/vm_runner.rs index 8a99ea2dc8e2..9973d760a236 100644 --- a/core/lib/env_config/src/vm_runner.rs +++ b/core/lib/env_config/src/vm_runner.rs @@ -1,4 +1,4 @@ -use zksync_config::configs::ProtectiveReadsWriterConfig; +use zksync_config::configs::{BasicWitnessInputProducerConfig, ProtectiveReadsWriterConfig}; use crate::{envy_load, FromEnv}; @@ -7,3 +7,9 @@ impl FromEnv for ProtectiveReadsWriterConfig { envy_load("vm_runner.protective_reads", "VM_RUNNER_PROTECTIVE_READS_") } } + +impl FromEnv for BasicWitnessInputProducerConfig { + fn from_env() -> anyhow::Result { + envy_load("vm_runner.bwip", "VM_RUNNER_BWIP_") + } +} diff --git a/core/lib/eth_client/Cargo.toml b/core/lib/eth_client/Cargo.toml index 72d92f2ce48f..4daa5a729ff6 100644 --- a/core/lib/eth_client/Cargo.toml +++ b/core/lib/eth_client/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "zksync_eth_client" -version = "0.1.0" +description = "ZKsync Ethereum client implementations" +version.workspace = true edition.workspace = true authors.workspace = true homepage.workspace = true diff --git a/core/lib/eth_client/src/clients/http/query.rs b/core/lib/eth_client/src/clients/http/query.rs index 33d9838dc735..3abea2c7e420 100644 --- a/core/lib/eth_client/src/clients/http/query.rs +++ b/core/lib/eth_client/src/clients/http/query.rs @@ -8,7 +8,7 @@ use zksync_web3_decl::error::{ClientRpcContext, EnrichedClientError, EnrichedCli use super::{decl::L1EthNamespaceClient, Method, COUNTERS, LATENCIES}; use crate::{ types::{ExecutedTxStatus, FailureInfo}, - EthInterface, RawTransactionBytes, + BaseFees, EthInterface, RawTransactionBytes, }; #[async_trait] @@ -78,7 +78,15 @@ where &self, upto_block: usize, block_count: usize, - ) -> EnrichedClientResult> { + ) -> EnrichedClientResult> { + // Non-panicking conversion to u64. + fn cast_to_u64(value: U256, tag: &str) -> EnrichedClientResult { + u64::try_from(value).map_err(|_| { + let err = ClientError::Custom(format!("{tag} value does not fit in u64")); + EnrichedClientError::new(err, "cast_to_u64").with_arg("value", &value) + }) + } + const MAX_REQUEST_CHUNK: usize = 1024; COUNTERS.call[&(Method::BaseFeeHistory, self.component())].inc(); @@ -103,11 +111,34 @@ where .with_arg("chunk_size", &chunk_size) .with_arg("block", &chunk_end) .await?; - history.extend(fee_history.base_fee_per_gas); + + // Check that the lengths are the same. + // Per specification, the values should always be provided, and must be 0 for blocks + // prior to EIP-4844. + // https://ethereum.github.io/execution-apis/api-documentation/ + if fee_history.base_fee_per_gas.len() != fee_history.base_fee_per_blob_gas.len() { + tracing::error!( + "base_fee_per_gas and base_fee_per_blob_gas have different lengths: {} and {}", + fee_history.base_fee_per_gas.len(), + fee_history.base_fee_per_blob_gas.len() + ); + } + + for (base, blob) in fee_history + .base_fee_per_gas + .into_iter() + .zip(fee_history.base_fee_per_blob_gas) + { + let fees = BaseFees { + base_fee_per_gas: cast_to_u64(base, "base_fee_per_gas")?, + base_fee_per_blob_gas: blob, + }; + history.push(fees) + } } latency.observe(); - Ok(history.into_iter().map(|fee| fee.as_u64()).collect()) + Ok(history) } async fn get_pending_block_base_fee_per_gas(&self) -> EnrichedClientResult { @@ -322,3 +353,18 @@ where Ok(block) } } + +#[cfg(test)] +mod tests { + use zksync_web3_decl::client::{Client, L1}; + + /// This test makes sure that we can instantiate a client with an HTTPS provider. + /// The need for this test was caused by feature collisions for `rustls` in our dependency graph, + /// which caused this test to panic. + #[tokio::test] + async fn test_https_provider() { + let url = "https://rpc.flashbots.net/"; + let _client = Client::::http(url.parse().unwrap()).unwrap().build(); + // No need to do anything; if the client was created and we didn't panic, we're good. + } +} diff --git a/core/lib/eth_client/src/clients/mock.rs b/core/lib/eth_client/src/clients/mock.rs index 03162c2cfeb4..9fbc5ceb4b2e 100644 --- a/core/lib/eth_client/src/clients/mock.rs +++ b/core/lib/eth_client/src/clients/mock.rs @@ -14,7 +14,7 @@ use zksync_web3_decl::client::{DynClient, MockClient, L1}; use crate::{ types::{ContractCallError, SignedCallResult, SigningError}, - BoundEthInterface, Options, RawTransactionBytes, + BaseFees, BoundEthInterface, Options, RawTransactionBytes, }; #[derive(Debug, Clone)] @@ -212,8 +212,7 @@ type CallHandler = pub struct MockEthereumBuilder { max_fee_per_gas: U256, max_priority_fee_per_gas: U256, - base_fee_history: Vec, - excess_blob_gas_history: Vec, + base_fee_history: Vec, /// If true, the mock will not check the ordering nonces of the transactions. /// This is useful for testing the cases when the transactions are executed out of order. non_ordering_confirmations: bool, @@ -228,7 +227,6 @@ impl fmt::Debug for MockEthereumBuilder { .field("max_fee_per_gas", &self.max_fee_per_gas) .field("max_priority_fee_per_gas", &self.max_priority_fee_per_gas) .field("base_fee_history", &self.base_fee_history) - .field("excess_blob_gas_history", &self.excess_blob_gas_history) .field( "non_ordering_confirmations", &self.non_ordering_confirmations, @@ -244,7 +242,6 @@ impl Default for MockEthereumBuilder { max_fee_per_gas: 100.into(), max_priority_fee_per_gas: 10.into(), base_fee_history: vec![], - excess_blob_gas_history: vec![], non_ordering_confirmations: false, inner: Arc::default(), call_handler: Box::new(|call, block_id| { @@ -256,21 +253,13 @@ impl Default for MockEthereumBuilder { impl MockEthereumBuilder { /// Sets fee history for each block in the mocked Ethereum network, starting from the 0th block. - pub fn with_fee_history(self, history: Vec) -> Self { + pub fn with_fee_history(self, history: Vec) -> Self { Self { base_fee_history: history, ..self } } - /// Sets the excess blob gas history for each block in the mocked Ethereum network, starting from the 0th block. - pub fn with_excess_blob_gas_history(self, history: Vec) -> Self { - Self { - excess_blob_gas_history: history, - ..self - } - } - pub fn with_non_ordering_confirmation(self, non_ordering_confirmations: bool) -> Self { Self { non_ordering_confirmations, @@ -306,19 +295,16 @@ impl MockEthereumBuilder { } fn get_block_by_number( - base_fee_history: &[u64], - excess_blob_gas_history: &[u64], + fee_history: &[BaseFees], block: web3::BlockNumber, ) -> Option> { let web3::BlockNumber::Number(number) = block else { panic!("Non-numeric block requested"); }; - let excess_blob_gas = excess_blob_gas_history - .get(number.as_usize()) - .map(|excess_blob_gas| (*excess_blob_gas).into()); - let base_fee_per_gas = base_fee_history + let excess_blob_gas = Some(0.into()); // Not relevant for tests. + let base_fee_per_gas = fee_history .get(number.as_usize()) - .map(|base_fee| (*base_fee).into()); + .map(|fees| fees.base_fee_per_gas.into()); Some(web3::Block { number: Some(number), @@ -341,18 +327,12 @@ impl MockEthereumBuilder { move || Ok(U64::from(inner.read().unwrap().block_number)) }) .method("eth_getBlockByNumber", { - let base_fee_history = self.base_fee_history; - let excess_blob_gas_history = self.excess_blob_gas_history; move |number, full_transactions: bool| { assert!( !full_transactions, "getting blocks with transactions is not mocked" ); - Ok(Self::get_block_by_number( - &base_fee_history, - &excess_blob_gas_history, - number, - )) + Ok(Self::get_block_by_number(&self.base_fee_history, number)) } }) .method("eth_getTransactionCount", { @@ -374,10 +354,14 @@ impl MockEthereumBuilder { oldest_block: start_block.into(), base_fee_per_gas: base_fee_history[start_block..=from_block] .iter() - .copied() - .map(U256::from) + .map(|fee| U256::from(fee.base_fee_per_gas)) .collect(), - gas_used_ratio: vec![], // not used + base_fee_per_blob_gas: base_fee_history[start_block..=from_block] + .iter() + .map(|fee| fee.base_fee_per_blob_gas) + .collect(), + gas_used_ratio: vec![], // not used + blob_gas_used_ratio: vec![], // not used reward: None, }) }, @@ -591,10 +575,23 @@ mod tests { use super::*; use crate::{CallFunctionArgs, EthInterface}; + fn base_fees(block: u64, blob: u64) -> BaseFees { + BaseFees { + base_fee_per_gas: block, + base_fee_per_blob_gas: U256::from(blob), + } + } + #[tokio::test] async fn managing_block_number() { let mock = MockEthereum::builder() - .with_fee_history(vec![0, 1, 2, 3, 4]) + .with_fee_history(vec![ + base_fees(0, 4), + base_fees(1, 3), + base_fees(2, 2), + base_fees(3, 1), + base_fees(4, 0), + ]) .build(); let block_number = mock.client.block_number().await.unwrap(); assert_eq!(block_number, 0.into()); @@ -625,17 +622,24 @@ mod tests { #[tokio::test] async fn managing_fee_history() { + let initial_fee_history = vec![ + base_fees(1, 4), + base_fees(2, 3), + base_fees(3, 2), + base_fees(4, 1), + base_fees(5, 0), + ]; let client = MockEthereum::builder() - .with_fee_history(vec![1, 2, 3, 4, 5]) + .with_fee_history(initial_fee_history.clone()) .build(); client.advance_block_number(4); let fee_history = client.as_ref().base_fee_history(4, 4).await.unwrap(); - assert_eq!(fee_history, [2, 3, 4, 5]); + assert_eq!(fee_history, &initial_fee_history[1..=4]); let fee_history = client.as_ref().base_fee_history(2, 2).await.unwrap(); - assert_eq!(fee_history, [2, 3]); + assert_eq!(fee_history, &initial_fee_history[1..=2]); let fee_history = client.as_ref().base_fee_history(3, 2).await.unwrap(); - assert_eq!(fee_history, [3, 4]); + assert_eq!(fee_history, &initial_fee_history[2..=3]); } #[tokio::test] diff --git a/core/lib/eth_client/src/lib.rs b/core/lib/eth_client/src/lib.rs index 6e24047dd48c..b6ac3a89b54f 100644 --- a/core/lib/eth_client/src/lib.rs +++ b/core/lib/eth_client/src/lib.rs @@ -65,6 +65,13 @@ impl Options { } } +/// Information about the base fees provided by the L1 client. +#[derive(Debug, Clone, PartialEq, Eq, Default)] +pub struct BaseFees { + pub base_fee_per_gas: u64, + pub base_fee_per_blob_gas: U256, +} + /// Common Web3 interface, as seen by the core applications. /// Encapsulates the raw Web3 interaction, providing a high-level interface. Acts as an extension /// trait implemented for L1 / Ethereum [clients](zksync_web3_decl::client::Client). @@ -96,7 +103,7 @@ pub trait EthInterface: Sync + Send { &self, from_block: usize, block_count: usize, - ) -> EnrichedClientResult>; + ) -> EnrichedClientResult>; /// Returns the `base_fee_per_gas` value for the currently pending L1 block. async fn get_pending_block_base_fee_per_gas(&self) -> EnrichedClientResult; diff --git a/core/lib/eth_signer/Cargo.toml b/core/lib/eth_signer/Cargo.toml index 866a0c158ed6..f760134e09bb 100644 --- a/core/lib/eth_signer/Cargo.toml +++ b/core/lib/eth_signer/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "zksync_eth_signer" -version = "0.1.0" +description = "ZKsync Ethereum signer" +version.workspace = true edition.workspace = true authors.workspace = true homepage.workspace = true diff --git a/core/lib/external_price_api/Cargo.toml b/core/lib/external_price_api/Cargo.toml new file mode 100644 index 000000000000..9539aa3fdc3c --- /dev/null +++ b/core/lib/external_price_api/Cargo.toml @@ -0,0 +1,26 @@ +[package] +name = "zksync_external_price_api" +description = "ZKsync clients for fetching token prices" +version.workspace = true +edition.workspace = true +authors.workspace = true +homepage.workspace = true +repository.workspace = true +license.workspace = true +keywords.workspace = true +categories.workspace = true + +[dependencies] +async-trait.workspace = true +anyhow.workspace = true +url.workspace = true +bigdecimal.workspace = true +chrono.workspace = true +serde.workspace = true +reqwest = { workspace = true, features = ["json"] } +fraction.workspace = true +rand.workspace = true + +zksync_config.workspace = true +zksync_types.workspace = true +tokio.workspace = true diff --git a/core/lib/external_price_api/README.md b/core/lib/external_price_api/README.md new file mode 100644 index 000000000000..d1604bbae7e7 --- /dev/null +++ b/core/lib/external_price_api/README.md @@ -0,0 +1,7 @@ +# Price API Client + +This crate provides a simple trait to be implemented by clients interacting with external price APIs to fetch +ETH<->BaseToken ratio. + +All clients should be implemented here and used by the node framework layer, which will be agnostic to the number of +clients available. diff --git a/core/lib/external_price_api/src/coingecko_api.rs b/core/lib/external_price_api/src/coingecko_api.rs new file mode 100644 index 000000000000..8fa7514b3684 --- /dev/null +++ b/core/lib/external_price_api/src/coingecko_api.rs @@ -0,0 +1,112 @@ +use std::collections::HashMap; + +use async_trait::async_trait; +use chrono::Utc; +use reqwest; +use serde::{Deserialize, Serialize}; +use url::Url; +use zksync_config::configs::ExternalPriceApiClientConfig; +use zksync_types::{base_token_ratio::BaseTokenAPIRatio, Address}; + +use crate::{address_to_string, utils::get_fraction, PriceAPIClient}; + +#[derive(Debug)] +pub struct CoinGeckoPriceAPIClient { + base_url: Url, + client: reqwest::Client, +} + +const DEFAULT_COINGECKO_API_URL: &str = "https://pro-api.coingecko.com"; +const COINGECKO_AUTH_HEADER: &str = "x-cg-pro-api-key"; +const ETH_ID: &str = "eth"; + +impl CoinGeckoPriceAPIClient { + pub fn new(config: ExternalPriceApiClientConfig) -> Self { + let client = if let Some(api_key) = &config.api_key { + let mut headers = reqwest::header::HeaderMap::new(); + headers.insert( + reqwest::header::HeaderName::from_static(COINGECKO_AUTH_HEADER), + reqwest::header::HeaderValue::from_str(api_key) + .expect("Failed to create header value"), + ); + + reqwest::Client::builder() + .default_headers(headers) + .timeout(config.client_timeout()) + .build() + .expect("Failed to build reqwest client") + } else { + reqwest::Client::new() + }; + + let base_url = config + .base_url + .unwrap_or(DEFAULT_COINGECKO_API_URL.to_string()); + + Self { + base_url: Url::parse(&base_url).expect("Failed to parse CoinGecko URL"), + client, + } + } + + async fn get_token_price_by_address(&self, address: Address) -> anyhow::Result { + let address_str = address_to_string(&address); + let price_url = self + .base_url + .join( + format!( + "/api/v3/simple/token_price/ethereum?contract_addresses={}&vs_currencies={}", + address_str, ETH_ID + ) + .as_str(), + ) + .expect("failed to join URL path"); + + let response = self.client.get(price_url).send().await?; + if !response.status().is_success() { + return Err(anyhow::anyhow!( + "Http error while fetching token price. Status: {}, token_addr: {}, msg: {}", + response.status(), + address_str, + response.text().await.unwrap_or(String::new()) + )); + } + + let cg_response = response.json::().await?; + match cg_response.get_price(&address_str, Ð_ID.to_string()) { + Some(&price) => Ok(price), + None => Err(anyhow::anyhow!( + "Price not found for token: {}", + address_str + )), + } + } +} + +#[async_trait] +impl PriceAPIClient for CoinGeckoPriceAPIClient { + async fn fetch_ratio(&self, token_address: Address) -> anyhow::Result { + let base_token_in_eth = self.get_token_price_by_address(token_address).await?; + let (numerator, denominator) = get_fraction(base_token_in_eth); + + return Ok(BaseTokenAPIRatio { + numerator, + denominator, + ratio_timestamp: Utc::now(), + }); + } +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +struct CoinGeckoPriceResponse { + #[serde(flatten)] + pub(crate) prices: HashMap>, +} + +impl CoinGeckoPriceResponse { + fn get_price(&self, address: &String, currency: &String) -> Option<&f64> { + self.prices + .get(address) + .and_then(|price| price.get(currency)) + } +} diff --git a/core/lib/external_price_api/src/forced_price_client.rs b/core/lib/external_price_api/src/forced_price_client.rs new file mode 100644 index 000000000000..f4b8d72b8b2c --- /dev/null +++ b/core/lib/external_price_api/src/forced_price_client.rs @@ -0,0 +1,62 @@ +use std::num::NonZeroU64; + +use async_trait::async_trait; +use rand::Rng; +use zksync_config::configs::ExternalPriceApiClientConfig; +use zksync_types::{base_token_ratio::BaseTokenAPIRatio, Address}; + +use crate::PriceAPIClient; + +// Struct for a a forced price "client" (conversion ratio is always a configured "forced" ratio). +#[derive(Debug, Clone)] +pub struct ForcedPriceClient { + ratio: BaseTokenAPIRatio, +} + +impl ForcedPriceClient { + pub fn new(config: ExternalPriceApiClientConfig) -> Self { + let numerator = config + .forced_numerator + .expect("forced price client started with no forced numerator"); + let denominator = config + .forced_denominator + .expect("forced price client started with no forced denominator"); + + Self { + ratio: BaseTokenAPIRatio { + numerator: NonZeroU64::new(numerator).unwrap(), + denominator: NonZeroU64::new(denominator).unwrap(), + ratio_timestamp: chrono::Utc::now(), + }, + } + } +} + +#[async_trait] +impl PriceAPIClient for ForcedPriceClient { + // Returns a ratio which is 10% higher or lower than the configured forced ratio. + async fn fetch_ratio(&self, _token_address: Address) -> anyhow::Result { + let mut rng = rand::thread_rng(); + + let numerator_range = ( + (self.ratio.numerator.get() as f64 * 0.9).round() as u64, + (self.ratio.numerator.get() as f64 * 1.1).round() as u64, + ); + + let denominator_range = ( + (self.ratio.denominator.get() as f64 * 0.9).round() as u64, + (self.ratio.denominator.get() as f64 * 1.1).round() as u64, + ); + + let new_numerator = rng.gen_range(numerator_range.0..=numerator_range.1); + let new_denominator = rng.gen_range(denominator_range.0..=denominator_range.1); + + let adjusted_ratio = BaseTokenAPIRatio { + numerator: NonZeroU64::new(new_numerator).unwrap_or(self.ratio.numerator), + denominator: NonZeroU64::new(new_denominator).unwrap_or(self.ratio.denominator), + ratio_timestamp: chrono::Utc::now(), + }; + + Ok(adjusted_ratio) + } +} diff --git a/core/lib/external_price_api/src/lib.rs b/core/lib/external_price_api/src/lib.rs new file mode 100644 index 000000000000..e86279dbe850 --- /dev/null +++ b/core/lib/external_price_api/src/lib.rs @@ -0,0 +1,30 @@ +pub mod coingecko_api; +pub mod forced_price_client; +mod utils; + +use std::fmt; + +use async_trait::async_trait; +use zksync_types::{base_token_ratio::BaseTokenAPIRatio, Address}; + +/// Trait that defines the interface for a client connecting with an external API to get prices. +#[async_trait] +pub trait PriceAPIClient: Sync + Send + fmt::Debug + 'static { + /// Returns the BaseToken<->ETH ratio for the input token address. + async fn fetch_ratio(&self, token_address: Address) -> anyhow::Result; +} + +// Struct for a no-op PriceAPIClient (conversion ratio is always 1:1). +#[derive(Debug, Clone)] +pub struct NoOpPriceAPIClient; + +#[async_trait] +impl PriceAPIClient for NoOpPriceAPIClient { + async fn fetch_ratio(&self, _token_address: Address) -> anyhow::Result { + Ok(BaseTokenAPIRatio::default()) + } +} + +fn address_to_string(address: &Address) -> String { + format!("{:#x}", address) +} diff --git a/core/lib/external_price_api/src/utils.rs b/core/lib/external_price_api/src/utils.rs new file mode 100644 index 000000000000..879be44e1737 --- /dev/null +++ b/core/lib/external_price_api/src/utils.rs @@ -0,0 +1,15 @@ +use std::num::NonZeroU64; + +use fraction::Fraction; + +/// Using the base token price and eth price, calculate the fraction of the base token to eth. +pub fn get_fraction(ratio_f64: f64) -> (NonZeroU64, NonZeroU64) { + let rate_fraction = Fraction::from(ratio_f64); + + let numerator = NonZeroU64::new(*rate_fraction.numer().expect("numerator is empty")) + .expect("numerator is zero"); + let denominator = NonZeroU64::new(*rate_fraction.denom().expect("denominator is empty")) + .expect("denominator is zero"); + + (numerator, denominator) +} diff --git a/core/lib/health_check/Cargo.toml b/core/lib/health_check/Cargo.toml index c2d4e85d209e..6f1d863d8cec 100644 --- a/core/lib/health_check/Cargo.toml +++ b/core/lib/health_check/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "zksync_health_check" -version = "0.1.0" +description = "Health checks library" +version.workspace = true edition.workspace = true authors.workspace = true homepage.workspace = true diff --git a/core/lib/health_check/src/lib.rs b/core/lib/health_check/src/lib.rs index 8a3068d661d9..e4e8ba3c9a58 100644 --- a/core/lib/health_check/src/lib.rs +++ b/core/lib/health_check/src/lib.rs @@ -106,7 +106,12 @@ pub enum AppHealthCheckError { /// Application health check aggregating health from multiple components. #[derive(Debug)] pub struct AppHealthCheck { - components: Mutex>>, + inner: Mutex, +} + +#[derive(Debug, Clone)] +struct AppHealthCheckInner { + components: Vec>, slow_time_limit: Duration, hard_time_limit: Duration, } @@ -118,17 +123,52 @@ impl Default for AppHealthCheck { } impl AppHealthCheck { - pub fn new(slow_time_limit: Option, hard_time_limit: Option) -> Self { - const DEFAULT_SLOW_TIME_LIMIT: Duration = Duration::from_millis(500); - const DEFAULT_HARD_TIME_LIMIT: Duration = Duration::from_secs(3); + const DEFAULT_SLOW_TIME_LIMIT: Duration = Duration::from_millis(500); + const DEFAULT_HARD_TIME_LIMIT: Duration = Duration::from_secs(3); - let slow_time_limit = slow_time_limit.unwrap_or(DEFAULT_SLOW_TIME_LIMIT); - let hard_time_limit = hard_time_limit.unwrap_or(DEFAULT_HARD_TIME_LIMIT); + pub fn new(slow_time_limit: Option, hard_time_limit: Option) -> Self { + let slow_time_limit = slow_time_limit.unwrap_or(Self::DEFAULT_SLOW_TIME_LIMIT); + let hard_time_limit = hard_time_limit.unwrap_or(Self::DEFAULT_HARD_TIME_LIMIT); tracing::debug!("Created app health with time limits: slow={slow_time_limit:?}, hard={hard_time_limit:?}"); - let config = AppHealthCheckConfig { - slow_time_limit: slow_time_limit.into(), - hard_time_limit: hard_time_limit.into(), + let inner = AppHealthCheckInner { + components: Vec::default(), + slow_time_limit, + hard_time_limit, + }; + Self { + inner: Mutex::new(inner), + } + } + + pub fn override_limits( + &self, + slow_time_limit: Option, + hard_time_limit: Option, + ) { + let mut guard = self.inner.lock().expect("`AppHealthCheck` is poisoned"); + if let Some(slow_time_limit) = slow_time_limit { + guard.slow_time_limit = slow_time_limit; + } + if let Some(hard_time_limit) = hard_time_limit { + guard.hard_time_limit = hard_time_limit; + } + tracing::debug!( + "Overridden app health time limits: slow={:?}, hard={:?}", + guard.slow_time_limit, + guard.hard_time_limit + ); + } + + /// Sets the info metrics for the metrics time limits. + /// This method should be called at most once when all the health checks are collected. + pub fn expose_metrics(&self) { + let config = { + let inner = self.inner.lock().expect("`AppHealthCheck` is poisoned"); + AppHealthCheckConfig { + slow_time_limit: inner.slow_time_limit.into(), + hard_time_limit: inner.hard_time_limit.into(), + } }; if METRICS.info.set(config).is_err() { tracing::warn!( @@ -136,12 +176,6 @@ impl AppHealthCheck { METRICS.info.get() ); } - - Self { - components: Mutex::default(), - slow_time_limit, - hard_time_limit, - } } /// Inserts health check for a component. @@ -166,32 +200,33 @@ impl AppHealthCheck { health_check: Arc, ) -> Result<(), AppHealthCheckError> { let health_check_name = health_check.name(); - let mut guard = self + let mut guard = self.inner.lock().expect("`AppHealthCheck` is poisoned"); + if guard .components - .lock() - .expect("`AppHealthCheck` is poisoned"); - if guard.iter().any(|check| check.name() == health_check_name) { + .iter() + .any(|check| check.name() == health_check_name) + { return Err(AppHealthCheckError::RedefinedComponent(health_check_name)); } - guard.push(health_check); + guard.components.push(health_check); Ok(()) } /// Checks the overall application health. This will query all component checks concurrently. pub async fn check_health(&self) -> AppHealth { - // Clone checks so that we don't hold a lock for them across a wait point. - let health_checks = self - .components + // Clone `inner` so that we don't hold a lock for them across a wait point. + let AppHealthCheckInner { + components, + slow_time_limit, + hard_time_limit, + } = self + .inner .lock() .expect("`AppHealthCheck` is poisoned") .clone(); - let check_futures = health_checks.iter().map(|check| { - Self::check_health_with_time_limit( - check.as_ref(), - self.slow_time_limit, - self.hard_time_limit, - ) + let check_futures = components.iter().map(|check| { + Self::check_health_with_time_limit(check.as_ref(), slow_time_limit, hard_time_limit) }); let components: HashMap<_, _> = future::join_all(check_futures).await.into_iter().collect(); diff --git a/core/lib/health_check/src/tests.rs b/core/lib/health_check/src/tests.rs index 46c276372ae1..14c610e9fd83 100644 --- a/core/lib/health_check/src/tests.rs +++ b/core/lib/health_check/src/tests.rs @@ -81,9 +81,13 @@ async fn updating_health_status_return_value() { async fn aggregating_health_checks() { let (first_check, first_updater) = ReactiveHealthCheck::new("first"); let (second_check, second_updater) = ReactiveHealthCheck::new("second"); + let inner = AppHealthCheckInner { + components: vec![Arc::new(first_check), Arc::new(second_check)], + slow_time_limit: AppHealthCheck::DEFAULT_SLOW_TIME_LIMIT, + hard_time_limit: AppHealthCheck::DEFAULT_HARD_TIME_LIMIT, + }; let checks = AppHealthCheck { - components: Mutex::new(vec![Arc::new(first_check), Arc::new(second_check)]), - ..AppHealthCheck::default() + inner: Mutex::new(inner), }; let app_health = checks.check_health().await; diff --git a/core/lib/l1_contract_interface/Cargo.toml b/core/lib/l1_contract_interface/Cargo.toml index 56274c525f99..8b68df854e71 100644 --- a/core/lib/l1_contract_interface/Cargo.toml +++ b/core/lib/l1_contract_interface/Cargo.toml @@ -1,5 +1,6 @@ [package] name = "zksync_l1_contract_interface" +description = "Interfaces for interacting with ZKsync contracts" version.workspace = true edition.workspace = true authors.workspace = true diff --git a/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs b/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs index cf17d8c7909e..b5d77ff60c16 100644 --- a/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs +++ b/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs @@ -17,6 +17,7 @@ use crate::{ /// These are used by the L1 Contracts to indicate what DA layer is used for pubdata const PUBDATA_SOURCE_CALLDATA: u8 = 0; const PUBDATA_SOURCE_BLOBS: u8 = 1; +const PUBDATA_SOURCE_CUSTOM: u8 = 2; /// Encoding for `CommitBatchInfo` from `IExecutor.sol` for a contract running in rollup mode. #[derive(Debug)] @@ -208,6 +209,13 @@ impl Tokenizable for CommitBatchInfo<'_> { vec![PUBDATA_SOURCE_BLOBS] } + (L1BatchCommitmentMode::Rollup, PubdataDA::Custom) => { + panic!("Custom pubdata DA is incompatible with Rollup mode") + } + (L1BatchCommitmentMode::Validium, PubdataDA::Custom) => { + vec![PUBDATA_SOURCE_CUSTOM] + } + (L1BatchCommitmentMode::Rollup, PubdataDA::Calldata) => { // We compute and add the blob commitment to the pubdata payload so that we can verify the proof // even if we are not using blobs. diff --git a/core/lib/mempool/Cargo.toml b/core/lib/mempool/Cargo.toml index 25502cd1e83e..ca2203f174f6 100644 --- a/core/lib/mempool/Cargo.toml +++ b/core/lib/mempool/Cargo.toml @@ -1,5 +1,6 @@ [package] name = "zksync_mempool" +description = "ZKsync mempool implementation" version.workspace = true edition.workspace = true authors.workspace = true diff --git a/core/lib/merkle_tree/Cargo.toml b/core/lib/merkle_tree/Cargo.toml index 54c1e14e67b6..579350bccf4e 100644 --- a/core/lib/merkle_tree/Cargo.toml +++ b/core/lib/merkle_tree/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "zksync_merkle_tree" -version = "0.1.0" +description = "ZKsync implementation of Jellyfish Merkle tree" +version.workspace = true edition.workspace = true authors.workspace = true homepage.workspace = true @@ -12,7 +13,7 @@ categories.workspace = true [dependencies] vise.workspace = true zksync_types.workspace = true -zksync_crypto.workspace = true +zksync_crypto_primitives.workspace = true zksync_storage.workspace = true zksync_prover_interface.workspace = true zksync_utils.workspace = true diff --git a/core/lib/merkle_tree/examples/loadtest/main.rs b/core/lib/merkle_tree/examples/loadtest/main.rs index 2560124842b0..6ac8425c0fc6 100644 --- a/core/lib/merkle_tree/examples/loadtest/main.rs +++ b/core/lib/merkle_tree/examples/loadtest/main.rs @@ -13,7 +13,7 @@ use clap::Parser; use rand::{rngs::StdRng, seq::IteratorRandom, SeedableRng}; use tempfile::TempDir; use tracing_subscriber::EnvFilter; -use zksync_crypto::hasher::blake2::Blake2Hasher; +use zksync_crypto_primitives::hasher::blake2::Blake2Hasher; use zksync_merkle_tree::{ Database, HashTree, MerkleTree, MerkleTreePruner, PatchSet, RocksDBWrapper, TreeEntry, TreeInstruction, diff --git a/core/lib/merkle_tree/examples/recovery.rs b/core/lib/merkle_tree/examples/recovery.rs index 882bfe9d9823..113471ff9e0d 100644 --- a/core/lib/merkle_tree/examples/recovery.rs +++ b/core/lib/merkle_tree/examples/recovery.rs @@ -7,7 +7,7 @@ use clap::Parser; use rand::{rngs::StdRng, Rng, SeedableRng}; use tempfile::TempDir; use tracing_subscriber::EnvFilter; -use zksync_crypto::hasher::blake2::Blake2Hasher; +use zksync_crypto_primitives::hasher::blake2::Blake2Hasher; use zksync_merkle_tree::{ recovery::MerkleTreeRecovery, HashTree, Key, MerkleTree, PatchSet, PruneDatabase, RocksDBWrapper, TreeEntry, ValueHash, diff --git a/core/lib/merkle_tree/src/domain.rs b/core/lib/merkle_tree/src/domain.rs index ffc4b0b84106..a4d577fc3ba5 100644 --- a/core/lib/merkle_tree/src/domain.rs +++ b/core/lib/merkle_tree/src/domain.rs @@ -1,8 +1,8 @@ //! Tying the Merkle tree implementation to the problem domain. use rayon::{ThreadPool, ThreadPoolBuilder}; -use zksync_crypto::hasher::blake2::Blake2Hasher; -use zksync_prover_interface::inputs::{PrepareBasicCircuitsJob, StorageLogMetadata}; +use zksync_crypto_primitives::hasher::blake2::Blake2Hasher; +use zksync_prover_interface::inputs::{StorageLogMetadata, WitnessInputMerklePaths}; use zksync_types::{L1BatchNumber, StorageKey}; use crate::{ @@ -15,6 +15,20 @@ use crate::{ BlockOutput, HashTree, MerkleTree, MerkleTreePruner, MerkleTreePrunerHandle, NoVersionError, }; +impl TreeInstruction { + /// Maps the key preimage in this instruction to a hashed key used by the Merkle tree. + pub fn with_hashed_key(self) -> TreeInstruction { + match self { + Self::Read(key) => TreeInstruction::Read(key.hashed_key_u256()), + Self::Write(entry) => TreeInstruction::Write(TreeEntry { + key: entry.key.hashed_key_u256(), + value: entry.value, + leaf_index: entry.leaf_index, + }), + } + } +} + /// Metadata for the current tree state. #[derive(Debug, Clone)] pub struct TreeMetadata { @@ -23,7 +37,7 @@ pub struct TreeMetadata { /// 1-based index of the next leaf to be inserted in the tree. pub rollup_last_leaf_index: u64, /// Witness information. As with `repeated_writes`, no-op updates will be omitted from Merkle paths. - pub witness: Option, + pub witness: Option, } #[derive(Debug, PartialEq, Eq)] @@ -63,18 +77,13 @@ impl ZkSyncTree { /// Returns metadata based on `storage_logs` generated by the genesis L1 batch. This does not /// create a persistent tree. #[allow(clippy::missing_panics_doc)] // false positive - pub fn process_genesis_batch(storage_logs: &[TreeInstruction]) -> BlockOutput { + pub fn process_genesis_batch(storage_logs: &[TreeInstruction]) -> BlockOutput { let kvs = Self::filter_write_instructions(storage_logs); tracing::info!( - "Creating Merkle tree for genesis batch with {instr_count} writes", + "Creating Merkle tree for genesis batch with {instr_count} writes", instr_count = kvs.len() ); - let kvs: Vec<_> = kvs - .iter() - .map(|instr| instr.map_key(StorageKey::hashed_key_u256)) - .collect(); - // `unwrap()`s are safe: in-memory trees never raise I/O errors let mut in_memory_tree = MerkleTree::new(PatchSet::default()).unwrap(); let output = in_memory_tree.extend(kvs).unwrap(); @@ -212,7 +221,7 @@ impl ZkSyncTree { /// Proxies database I/O errors. pub fn process_l1_batch( &mut self, - storage_logs: &[TreeInstruction], + storage_logs: &[TreeInstruction], ) -> anyhow::Result { match self.mode { TreeMode::Full => self.process_l1_batch_full(storage_logs), @@ -222,29 +231,24 @@ impl ZkSyncTree { fn process_l1_batch_full( &mut self, - instructions: &[TreeInstruction], + instructions: &[TreeInstruction], ) -> anyhow::Result { let l1_batch_number = self.next_l1_batch_number(); let starting_leaf_count = self.tree.latest_root().leaf_count(); let starting_root_hash = self.tree.latest_root_hash(); - let instructions_with_hashed_keys: Vec<_> = instructions - .iter() - .map(|instr| instr.map_key(StorageKey::hashed_key_u256)) - .collect(); - tracing::info!( "Extending Merkle tree with batch #{l1_batch_number} with {instr_count} ops in full mode", instr_count = instructions.len() ); let output = if let Some(thread_pool) = &self.thread_pool { - thread_pool.install(|| self.tree.extend_with_proofs(instructions_with_hashed_keys)) + thread_pool.install(|| self.tree.extend_with_proofs(instructions.to_vec())) } else { - self.tree.extend_with_proofs(instructions_with_hashed_keys) + self.tree.extend_with_proofs(instructions.to_vec()) }?; - let mut witness = PrepareBasicCircuitsJob::new(starting_leaf_count + 1); + let mut witness = WitnessInputMerklePaths::new(starting_leaf_count + 1); witness.reserve(output.logs.len()); for (log, instruction) in output.logs.iter().zip(instructions) { let empty_levels_end = TREE_DEPTH - log.merkle_path.len(); @@ -265,7 +269,7 @@ impl ZkSyncTree { is_write: !log.base.is_read(), first_write: matches!(log.base, TreeLogEntry::Inserted), merkle_paths, - leaf_hashed_key: instruction.key().hashed_key_u256(), + leaf_hashed_key: instruction.key(), leaf_enumeration_index: match instruction { TreeInstruction::Write(entry) => entry.leaf_index, TreeInstruction::Read(_) => match log.base { @@ -307,7 +311,7 @@ impl ZkSyncTree { fn process_l1_batch_lightweight( &mut self, - instructions: &[TreeInstruction], + instructions: &[TreeInstruction], ) -> anyhow::Result { let kvs = Self::filter_write_instructions(instructions); let l1_batch_number = self.next_l1_batch_number(); @@ -317,15 +321,10 @@ impl ZkSyncTree { kv_count = kvs.len() ); - let kvs_with_derived_key: Vec<_> = kvs - .iter() - .map(|entry| entry.map_key(StorageKey::hashed_key_u256)) - .collect(); - let output = if let Some(thread_pool) = &self.thread_pool { - thread_pool.install(|| self.tree.extend(kvs_with_derived_key.clone())) + thread_pool.install(|| self.tree.extend(kvs)) } else { - self.tree.extend(kvs_with_derived_key.clone()) + self.tree.extend(kvs) }?; tracing::info!( @@ -342,9 +341,7 @@ impl ZkSyncTree { }) } - fn filter_write_instructions( - instructions: &[TreeInstruction], - ) -> Vec> { + fn filter_write_instructions(instructions: &[TreeInstruction]) -> Vec { let kvs = instructions .iter() .filter_map(|instruction| match instruction { diff --git a/core/lib/merkle_tree/src/hasher/mod.rs b/core/lib/merkle_tree/src/hasher/mod.rs index fa700a68244f..3e4444b3bef2 100644 --- a/core/lib/merkle_tree/src/hasher/mod.rs +++ b/core/lib/merkle_tree/src/hasher/mod.rs @@ -3,7 +3,7 @@ use std::{fmt, iter}; use once_cell::sync::Lazy; -use zksync_crypto::hasher::{blake2::Blake2Hasher, Hasher}; +use zksync_crypto_primitives::hasher::{blake2::Blake2Hasher, Hasher}; pub(crate) use self::nodes::{InternalNodeCache, MerklePath}; pub use self::proofs::TreeRangeDigest; diff --git a/core/lib/merkle_tree/src/hasher/nodes.rs b/core/lib/merkle_tree/src/hasher/nodes.rs index 6172d9088126..c652b44c6fc6 100644 --- a/core/lib/merkle_tree/src/hasher/nodes.rs +++ b/core/lib/merkle_tree/src/hasher/nodes.rs @@ -268,7 +268,7 @@ impl Root { #[cfg(test)] mod tests { - use zksync_crypto::hasher::{blake2::Blake2Hasher, Hasher}; + use zksync_crypto_primitives::hasher::{blake2::Blake2Hasher, Hasher}; use zksync_types::H256; use super::*; diff --git a/core/lib/merkle_tree/src/lib.rs b/core/lib/merkle_tree/src/lib.rs index 0e6dd7793260..6f9da59cf0ed 100644 --- a/core/lib/merkle_tree/src/lib.rs +++ b/core/lib/merkle_tree/src/lib.rs @@ -46,7 +46,7 @@ clippy::doc_markdown // frequent false positive: RocksDB )] -use zksync_crypto::hasher::blake2::Blake2Hasher; +use zksync_crypto_primitives::hasher::blake2::Blake2Hasher; pub use crate::{ errors::NoVersionError, diff --git a/core/lib/merkle_tree/src/metrics.rs b/core/lib/merkle_tree/src/metrics.rs index 84769482527a..99757a2580cb 100644 --- a/core/lib/merkle_tree/src/metrics.rs +++ b/core/lib/merkle_tree/src/metrics.rs @@ -309,6 +309,21 @@ enum Bound { End, } +const LARGE_NODE_COUNT_BUCKETS: Buckets = Buckets::values(&[ + 1_000.0, + 2_000.0, + 5_000.0, + 10_000.0, + 20_000.0, + 50_000.0, + 100_000.0, + 200_000.0, + 500_000.0, + 1_000_000.0, + 2_000_000.0, + 5_000_000.0, +]); + #[derive(Debug, Metrics)] #[metrics(prefix = "merkle_tree_pruning")] struct PruningMetrics { @@ -316,7 +331,7 @@ struct PruningMetrics { /// may not remove all stale keys to this version if there are too many. target_retained_version: Gauge, /// Number of pruned node keys on a specific pruning iteration. - #[metrics(buckets = NODE_COUNT_BUCKETS)] + #[metrics(buckets = LARGE_NODE_COUNT_BUCKETS)] key_count: Histogram, /// Lower and upper boundaries on the new stale key versions deleted /// during a pruning iteration. The lower boundary is inclusive, the upper one is exclusive. @@ -368,26 +383,11 @@ pub(crate) enum RecoveryStage { ParallelPersistence, } -const CHUNK_SIZE_BUCKETS: Buckets = Buckets::values(&[ - 1_000.0, - 2_000.0, - 5_000.0, - 10_000.0, - 20_000.0, - 50_000.0, - 100_000.0, - 200_000.0, - 500_000.0, - 1_000_000.0, - 2_000_000.0, - 5_000_000.0, -]); - #[derive(Debug, Metrics)] #[metrics(prefix = "merkle_tree_recovery")] pub(crate) struct RecoveryMetrics { /// Number of entries in a recovered chunk. - #[metrics(buckets = CHUNK_SIZE_BUCKETS)] + #[metrics(buckets = LARGE_NODE_COUNT_BUCKETS)] pub chunk_size: Histogram, /// Latency of a specific stage of recovery for a single chunk. #[metrics(buckets = Buckets::LATENCIES, unit = Unit::Seconds)] diff --git a/core/lib/merkle_tree/src/pruning.rs b/core/lib/merkle_tree/src/pruning.rs index 1734fdcbf0a8..a74db40ef5e6 100644 --- a/core/lib/merkle_tree/src/pruning.rs +++ b/core/lib/merkle_tree/src/pruning.rs @@ -166,7 +166,7 @@ impl MerkleTreePruner { break; } } - load_stale_keys_latency.observe(); + let load_stale_keys_latency = load_stale_keys_latency.observe(); if pruned_keys.is_empty() { tracing::debug!("No stale keys to remove; skipping"); @@ -174,7 +174,7 @@ impl MerkleTreePruner { } let deleted_stale_key_versions = min_stale_key_version..(max_stale_key_version + 1); tracing::info!( - "Collected {} stale keys with new versions in {deleted_stale_key_versions:?}", + "Collected {} stale keys with new versions in {deleted_stale_key_versions:?} in {load_stale_keys_latency:?}", pruned_keys.len() ); @@ -186,7 +186,8 @@ impl MerkleTreePruner { let patch = PrunePatchSet::new(pruned_keys, deleted_stale_key_versions); let apply_patch_latency = PRUNING_TIMINGS.apply_patch.start(); self.db.prune(patch)?; - apply_patch_latency.observe(); + let apply_patch_latency = apply_patch_latency.observe(); + tracing::info!("Pruned stale keys in {apply_patch_latency:?}: {stats:?}"); Ok(Some(stats)) } @@ -230,6 +231,7 @@ impl MerkleTreePruner { self.poll_interval }; } + tracing::info!("Stop signal received, tree pruning is shut down"); Ok(()) } } diff --git a/core/lib/merkle_tree/src/recovery/mod.rs b/core/lib/merkle_tree/src/recovery/mod.rs index 87a601f32f97..c208c12795a2 100644 --- a/core/lib/merkle_tree/src/recovery/mod.rs +++ b/core/lib/merkle_tree/src/recovery/mod.rs @@ -38,7 +38,7 @@ use std::{collections::HashMap, time::Instant}; use anyhow::Context as _; -use zksync_crypto::hasher::blake2::Blake2Hasher; +use zksync_crypto_primitives::hasher::blake2::Blake2Hasher; pub use crate::storage::PersistenceThreadHandle; use crate::{ diff --git a/core/lib/merkle_tree/src/storage/tests.rs b/core/lib/merkle_tree/src/storage/tests.rs index 8656c471905e..accf2d2de10e 100644 --- a/core/lib/merkle_tree/src/storage/tests.rs +++ b/core/lib/merkle_tree/src/storage/tests.rs @@ -7,7 +7,7 @@ use rand::{ Rng, SeedableRng, }; use test_casing::test_casing; -use zksync_crypto::hasher::blake2::Blake2Hasher; +use zksync_crypto_primitives::hasher::blake2::Blake2Hasher; use zksync_types::{H256, U256}; use super::*; diff --git a/core/lib/merkle_tree/src/types/mod.rs b/core/lib/merkle_tree/src/types/mod.rs index bd59099a3a65..807ae0238769 100644 --- a/core/lib/merkle_tree/src/types/mod.rs +++ b/core/lib/merkle_tree/src/types/mod.rs @@ -38,13 +38,6 @@ impl TreeInstruction { Self::Write(entry) => entry.key, } } - - pub(crate) fn map_key(&self, map_fn: impl FnOnce(&K) -> U) -> TreeInstruction { - match self { - Self::Read(key) => TreeInstruction::Read(map_fn(key)), - Self::Write(entry) => TreeInstruction::Write(entry.map_key(map_fn)), - } - } } /// Entry in a Merkle tree associated with a key. @@ -77,10 +70,6 @@ impl TreeEntry { leaf_index, } } - - pub(crate) fn map_key(&self, map_fn: impl FnOnce(&K) -> U) -> TreeEntry { - TreeEntry::new(map_fn(&self.key), self.leaf_index, self.value) - } } impl TreeEntry { diff --git a/core/lib/merkle_tree/tests/integration/common.rs b/core/lib/merkle_tree/tests/integration/common.rs index 28c3827827a9..453fd1f05bda 100644 --- a/core/lib/merkle_tree/tests/integration/common.rs +++ b/core/lib/merkle_tree/tests/integration/common.rs @@ -3,7 +3,7 @@ use std::collections::HashMap; use once_cell::sync::Lazy; -use zksync_crypto::hasher::{blake2::Blake2Hasher, Hasher}; +use zksync_crypto_primitives::hasher::{blake2::Blake2Hasher, Hasher}; use zksync_merkle_tree::{HashTree, TreeEntry, TreeInstruction}; use zksync_types::{AccountTreeId, Address, StorageKey, H256, U256}; diff --git a/core/lib/merkle_tree/tests/integration/domain.rs b/core/lib/merkle_tree/tests/integration/domain.rs index db5accf30a6b..abd3dbbcd3f3 100644 --- a/core/lib/merkle_tree/tests/integration/domain.rs +++ b/core/lib/merkle_tree/tests/integration/domain.rs @@ -5,14 +5,14 @@ use std::slice; use serde::{Deserialize, Serialize}; use serde_with::{hex::Hex, serde_as}; use tempfile::TempDir; -use zksync_crypto::hasher::blake2::Blake2Hasher; +use zksync_crypto_primitives::hasher::blake2::Blake2Hasher; use zksync_merkle_tree::{domain::ZkSyncTree, HashTree, TreeEntry, TreeInstruction}; use zksync_prover_interface::inputs::StorageLogMetadata; use zksync_storage::RocksDB; use zksync_system_constants::ACCOUNT_CODE_STORAGE_ADDRESS; use zksync_types::{AccountTreeId, Address, L1BatchNumber, StorageKey, H256}; -fn gen_storage_logs() -> Vec> { +fn gen_storage_logs() -> Vec { let addrs = vec![ "4b3af74f66ab1f0da3f2e4ec7a3cb99baf1af7b2", "ef4bb7b21c5fe7432a7d63876cc59ecc23b46636", @@ -32,7 +32,7 @@ fn gen_storage_logs() -> Vec> { .zip(proof_values) .enumerate() .map(|(i, (proof_key, proof_value))| { - let entry = TreeEntry::new(proof_key, i as u64 + 1, proof_value); + let entry = TreeEntry::new(proof_key.hashed_key_u256(), i as u64 + 1, proof_value); TreeInstruction::Write(entry) }) .collect() @@ -171,11 +171,12 @@ fn revert_blocks() { // Produce 4 blocks with distinct values and 1 block with modified values from first block let block_size: usize = 25; let address: Address = "4b3af74f66ab1f0da3f2e4ec7a3cb99baf1af7b2".parse().unwrap(); - let proof_keys = (0..100) - .map(move |i| StorageKey::new(AccountTreeId::new(address), H256::from_low_u64_be(i))); + let proof_keys = (0..100).map(move |i| { + StorageKey::new(AccountTreeId::new(address), H256::from_low_u64_be(i)).hashed_key_u256() + }); let proof_values = (0..100).map(H256::from_low_u64_be); - // Add couple of blocks of distinct keys/values + // Add a couple of blocks of distinct keys/values let mut logs: Vec<_> = proof_keys .zip(proof_values) .map(|(proof_key, proof_value)| { @@ -185,7 +186,8 @@ fn revert_blocks() { .collect(); // Add a block with repeated keys let extra_logs = (0..block_size).map(move |i| { - let key = StorageKey::new(AccountTreeId::new(address), H256::from_low_u64_be(i as u64)); + let key = StorageKey::new(AccountTreeId::new(address), H256::from_low_u64_be(i as u64)) + .hashed_key_u256(); let entry = TreeEntry::new(key, i as u64 + 1, H256::from_low_u64_be(i as u64 + 1)); TreeInstruction::Write(entry) }); @@ -317,9 +319,13 @@ fn create_write_log( address: Address, address_storage_key: [u8; 32], value: [u8; 32], -) -> TreeInstruction { +) -> TreeInstruction { let key = StorageKey::new(AccountTreeId::new(address), H256(address_storage_key)); - TreeInstruction::Write(TreeEntry::new(key, leaf_index, H256(value))) + TreeInstruction::Write(TreeEntry::new( + key.hashed_key_u256(), + leaf_index, + H256(value), + )) } fn subtract_from_max_value(diff: u8) -> [u8; 32] { diff --git a/core/lib/merkle_tree/tests/integration/merkle_tree.rs b/core/lib/merkle_tree/tests/integration/merkle_tree.rs index a83b982cc497..fc26cafe9ba7 100644 --- a/core/lib/merkle_tree/tests/integration/merkle_tree.rs +++ b/core/lib/merkle_tree/tests/integration/merkle_tree.rs @@ -4,7 +4,7 @@ use std::{cmp, mem}; use rand::{rngs::StdRng, seq::SliceRandom, Rng, SeedableRng}; use test_casing::test_casing; -use zksync_crypto::hasher::blake2::Blake2Hasher; +use zksync_crypto_primitives::hasher::blake2::Blake2Hasher; use zksync_merkle_tree::{ Database, HashTree, MerkleTree, PatchSet, Patched, TreeEntry, TreeInstruction, TreeLogEntry, TreeRangeDigest, diff --git a/core/lib/merkle_tree/tests/integration/recovery.rs b/core/lib/merkle_tree/tests/integration/recovery.rs index 0bed36185d7c..f7ee2d154394 100644 --- a/core/lib/merkle_tree/tests/integration/recovery.rs +++ b/core/lib/merkle_tree/tests/integration/recovery.rs @@ -2,7 +2,7 @@ use rand::{rngs::StdRng, seq::SliceRandom, SeedableRng}; use test_casing::test_casing; -use zksync_crypto::hasher::blake2::Blake2Hasher; +use zksync_crypto_primitives::hasher::blake2::Blake2Hasher; use zksync_merkle_tree::{ recovery::MerkleTreeRecovery, Database, MerkleTree, PatchSet, PruneDatabase, ValueHash, }; diff --git a/core/lib/mini_merkle_tree/Cargo.toml b/core/lib/mini_merkle_tree/Cargo.toml index d4cccbda6d3e..1a8744318039 100644 --- a/core/lib/mini_merkle_tree/Cargo.toml +++ b/core/lib/mini_merkle_tree/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "zksync_mini_merkle_tree" -version = "0.1.0" +description = "ZKsync implementation of small Merkle trees" +version.workspace = true edition.workspace = true authors.workspace = true homepage.workspace = true @@ -10,7 +11,7 @@ keywords.workspace = true categories.workspace = true [dependencies] -zksync_crypto.workspace = true +zksync_crypto_primitives.workspace = true zksync_basic_types.workspace = true once_cell.workspace = true diff --git a/core/lib/mini_merkle_tree/src/lib.rs b/core/lib/mini_merkle_tree/src/lib.rs index 3d4ff3cf561c..d34f57999961 100644 --- a/core/lib/mini_merkle_tree/src/lib.rs +++ b/core/lib/mini_merkle_tree/src/lib.rs @@ -13,7 +13,7 @@ use once_cell::sync::OnceCell; mod tests; use zksync_basic_types::H256; -use zksync_crypto::hasher::{keccak::KeccakHasher, Hasher}; +use zksync_crypto_primitives::hasher::{keccak::KeccakHasher, Hasher}; /// Maximum supported depth of the tree. 32 corresponds to `2^32` elements in the tree, which /// we unlikely to ever hit. diff --git a/core/lib/multivm/Cargo.toml b/core/lib/multivm/Cargo.toml index ee0db774ec64..5a21d35b1f60 100644 --- a/core/lib/multivm/Cargo.toml +++ b/core/lib/multivm/Cargo.toml @@ -1,6 +1,7 @@ [package] -name = "multivm" -version = "0.1.0" +name = "zksync_multivm" +description = "ZKsync out-of-circuit VM" +version.workspace = true edition.workspace = true authors.workspace = true homepage.workspace = true diff --git a/core/lib/multivm/src/interface/traits/vm.rs b/core/lib/multivm/src/interface/traits/vm.rs index 4686e4ca17be..fdf2ccd8f3f9 100644 --- a/core/lib/multivm/src/interface/traits/vm.rs +++ b/core/lib/multivm/src/interface/traits/vm.rs @@ -19,7 +19,7 @@ //! sync::Arc //! }; //! use once_cell::sync::OnceCell; -//! use multivm::{ +//! use zksync_multivm::{ //! interface::{L1BatchEnv, SystemEnv, VmInterface}, //! tracers::CallTracer , //! vm_latest::ToTracerPointer diff --git a/core/lib/multivm/src/versions/vm_latest/bootloader_state/l2_block.rs b/core/lib/multivm/src/versions/vm_latest/bootloader_state/l2_block.rs index bb74f9628946..103c5d16540e 100644 --- a/core/lib/multivm/src/versions/vm_latest/bootloader_state/l2_block.rs +++ b/core/lib/multivm/src/versions/vm_latest/bootloader_state/l2_block.rs @@ -13,7 +13,7 @@ use crate::{ const EMPTY_TXS_ROLLING_HASH: H256 = H256::zero(); -#[derive(Debug, Clone)] +#[derive(Debug)] pub(crate) struct BootloaderL2Block { pub(crate) number: u32, pub(crate) timestamp: u64, @@ -56,12 +56,6 @@ impl BootloaderL2Block { self.txs_rolling_hash = concat_and_hash(self.txs_rolling_hash, tx_hash) } - pub(crate) fn interim_version(&self) -> BootloaderL2Block { - let mut interim = self.clone(); - interim.max_virtual_blocks_to_create = 0; - interim - } - pub(crate) fn make_snapshot(&self) -> L2BlockSnapshot { L2BlockSnapshot { txs_rolling_hash: self.txs_rolling_hash, diff --git a/core/lib/multivm/src/versions/vm_latest/bootloader_state/state.rs b/core/lib/multivm/src/versions/vm_latest/bootloader_state/state.rs index c43d82b0d281..a3f59937d57e 100644 --- a/core/lib/multivm/src/versions/vm_latest/bootloader_state/state.rs +++ b/core/lib/multivm/src/versions/vm_latest/bootloader_state/state.rs @@ -29,7 +29,7 @@ use crate::{ /// Serves two purposes: /// - Tracks where next tx should be pushed to in the bootloader memory. /// - Tracks which transaction should be executed next. -#[derive(Debug, Clone)] +#[derive(Debug)] pub struct BootloaderState { /// ID of the next transaction to be executed. /// See the structure doc-comment for a better explanation of purpose. diff --git a/core/lib/multivm/src/versions/vm_latest/bootloader_state/utils.rs b/core/lib/multivm/src/versions/vm_latest/bootloader_state/utils.rs index 346c1bde5368..db4c834fbc77 100644 --- a/core/lib/multivm/src/versions/vm_latest/bootloader_state/utils.rs +++ b/core/lib/multivm/src/versions/vm_latest/bootloader_state/utils.rs @@ -67,12 +67,7 @@ pub(super) fn apply_tx_to_memory( .zip(bootloader_tx.encoded.clone()), ); - let bootloader_l2_block = if start_new_l2_block { - bootloader_l2_block.clone() - } else { - bootloader_l2_block.interim_version() - }; - apply_l2_block(memory, &bootloader_l2_block, tx_index); + apply_l2_block_inner(memory, bootloader_l2_block, tx_index, start_new_l2_block); // Note, +1 is moving for pointer let compressed_bytecodes_offset = COMPRESSED_BYTECODES_OFFSET + 1 + compressed_bytecodes_size; @@ -93,6 +88,15 @@ pub(crate) fn apply_l2_block( memory: &mut BootloaderMemory, bootloader_l2_block: &BootloaderL2Block, txs_index: usize, +) { + apply_l2_block_inner(memory, bootloader_l2_block, txs_index, true) +} + +fn apply_l2_block_inner( + memory: &mut BootloaderMemory, + bootloader_l2_block: &BootloaderL2Block, + txs_index: usize, + start_new_l2_block: bool, ) { // Since L2 block information start from the `TX_OPERATOR_L2_BLOCK_INFO_OFFSET` and each // L2 block info takes `TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO` slots, the position where the L2 block info @@ -110,7 +114,11 @@ pub(crate) fn apply_l2_block( ), ( block_position + 3, - bootloader_l2_block.max_virtual_blocks_to_create.into(), + if start_new_l2_block { + bootloader_l2_block.max_virtual_blocks_to_create.into() + } else { + U256::zero() + }, ), ]) } diff --git a/core/lib/node_framework_derive/Cargo.toml b/core/lib/node_framework_derive/Cargo.toml new file mode 100644 index 000000000000..0d3c69a3e591 --- /dev/null +++ b/core/lib/node_framework_derive/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "zksync_node_framework_derive" +description = "Derive macro for ZKsync node framework" +version.workspace = true +edition.workspace = true +authors.workspace = true +homepage.workspace = true +repository.workspace = true +license.workspace = true +keywords.workspace = true +categories.workspace = true + +[lib] +proc-macro = true + +[dependencies] +syn = { workspace = true, features = ["full"] } +quote.workspace = true +proc-macro2.workspace = true diff --git a/core/lib/node_framework_derive/src/helpers.rs b/core/lib/node_framework_derive/src/helpers.rs new file mode 100644 index 000000000000..005e959b2bee --- /dev/null +++ b/core/lib/node_framework_derive/src/helpers.rs @@ -0,0 +1,44 @@ +use std::fmt; + +use syn::{GenericArgument, PathArguments, Type}; + +use crate::labels::CtxLabel; + +/// Representation of a single structure field. +pub(crate) struct Field { + /// Name of the field. + pub(crate) ident: syn::Ident, + /// Type of the field. + pub(crate) ty: syn::Type, + /// Parsed label. + pub(crate) label: CtxLabel, +} + +impl fmt::Debug for Field { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Field") + .field("ident", &self.ident) + .field("label", &self.label) + .finish() + } +} + +// Helper function to check if a field is of type Option and extract T +pub(crate) fn extract_option_inner_type(ty: &Type) -> Option<&Type> { + if let Type::Path(type_path) = ty { + // Check if the path is `Option` + if type_path.path.segments.len() == 1 { + let segment = &type_path.path.segments[0]; + if segment.ident == "Option" { + if let PathArguments::AngleBracketed(angle_bracketed_args) = &segment.arguments { + if angle_bracketed_args.args.len() == 1 { + if let GenericArgument::Type(inner_type) = &angle_bracketed_args.args[0] { + return Some(inner_type); + } + } + } + } + } + } + None +} diff --git a/core/lib/node_framework_derive/src/labels.rs b/core/lib/node_framework_derive/src/labels.rs new file mode 100644 index 000000000000..2bac5a7f7552 --- /dev/null +++ b/core/lib/node_framework_derive/src/labels.rs @@ -0,0 +1,98 @@ +use std::fmt; + +use syn::{spanned::Spanned as _, Attribute, Result}; + +/// Context label, e.g. `ctx(crate = "crate")`. +#[derive(Default)] +pub(crate) struct CtxLabel { + /// Special attribute that marks the derive as internal. + /// Alters the path to the trait to be implemented. + pub(crate) krate: Option, // `crate` is a reserved keyword and cannot be a raw identifier. + pub(crate) span: Option, + pub(crate) task: bool, + pub(crate) default: bool, +} + +impl fmt::Debug for CtxLabel { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + // For some weird reason, doc tests fail with the derived impl, stating that + // `syn::Path` does not implement `Debug`. + f.debug_struct("CtxLabel") + .field("krate", &self.krate.as_ref().and_then(|p| p.get_ident())) + .field("span", &self.span) + .field("task", &self.task) + .field("default", &self.default) + .finish() + } +} + +impl CtxLabel { + const ATTR_NAME: &'static str = "context"; + const CRATE_LABEL: &'static str = "crate"; + const TASK_LABEL: &'static str = "task"; + const DEFAULT_LABEL: &'static str = "default"; + const LABELS: &'static [&'static str] = + &[Self::CRATE_LABEL, Self::TASK_LABEL, Self::DEFAULT_LABEL]; + + pub(crate) fn parse(attrs: &[Attribute]) -> Result> { + let mut self_ = Self::default(); + + let mut found = false; + for attr in attrs { + if attr.path().is_ident(Self::ATTR_NAME) { + found = true; + self_.span = Some(attr.span()); + match attr.meta { + syn::Meta::Path(_) => { + // No values to parse. + break; + } + syn::Meta::NameValue(_) => { + return Err(syn::Error::new_spanned( + attr, + "Unexpected value, expected a list of labels", + )); + } + syn::Meta::List(_) => { + // Do nothing, parsing happens below. + } + } + attr.parse_nested_meta(|meta| { + let mut added = false; + for &label in Self::LABELS { + if meta.path.is_ident(label) { + match label { + Self::CRATE_LABEL => { + let value = meta.value()?; + let path: syn::Path = value.parse()?; + self_.krate = Some(path); + } + Self::TASK_LABEL => { + self_.task = true; + } + Self::DEFAULT_LABEL => { + self_.default = true; + } + _ => unreachable!(), + } + added = true; + break; + } + } + + if !added { + let err_msg = + format!("Unexpected token, supported labels: `{:?}`", Self::LABELS); + let err = syn::Error::new_spanned(attr, err_msg); + return Err(err); + } + Ok(()) + })?; + } + } + if !found { + return Ok(None); + } + Ok(Some(self_)) + } +} diff --git a/core/lib/node_framework_derive/src/lib.rs b/core/lib/node_framework_derive/src/lib.rs new file mode 100644 index 000000000000..867e0c75fa54 --- /dev/null +++ b/core/lib/node_framework_derive/src/lib.rs @@ -0,0 +1,39 @@ +extern crate proc_macro; + +use proc_macro::TokenStream; +use syn::{parse_macro_input, DeriveInput}; + +use crate::macro_impl::{MacroImpl, MacroKind}; + +pub(crate) mod helpers; +mod labels; +mod macro_impl; + +/// Derive macro for the `FromContext` trait. +/// Allows to automatically fetch all the resources and tasks from the context. +/// +/// See the trait documentation for more details. +#[proc_macro_derive(FromContext, attributes(context))] +pub fn from_context_derive(input: TokenStream) -> TokenStream { + // Parse the input tokens into a syntax tree + let input = parse_macro_input!(input as DeriveInput); + MacroImpl::parse(MacroKind::FromContext, input) + .and_then(|from_context| from_context.render()) + .unwrap_or_else(syn::Error::into_compile_error) + .into() +} + +/// Derive macro for the `IntoContext` trait. +/// Allows to automatically insert all the resources in tasks created by the wiring layer +/// into the context. +/// +/// See the trait documentation for more details. +#[proc_macro_derive(IntoContext, attributes(context))] +pub fn into_context_derive(input: TokenStream) -> TokenStream { + // Parse the input tokens into a syntax tree + let input = parse_macro_input!(input as DeriveInput); + MacroImpl::parse(MacroKind::IntoContext, input) + .and_then(|from_context| from_context.render()) + .unwrap_or_else(syn::Error::into_compile_error) + .into() +} diff --git a/core/lib/node_framework_derive/src/macro_impl.rs b/core/lib/node_framework_derive/src/macro_impl.rs new file mode 100644 index 000000000000..30532184fd2c --- /dev/null +++ b/core/lib/node_framework_derive/src/macro_impl.rs @@ -0,0 +1,190 @@ +use std::fmt; + +use quote::quote; +use syn::{DeriveInput, Result}; + +use crate::{helpers::Field, labels::CtxLabel}; + +#[derive(Debug)] +pub enum MacroKind { + FromContext, + IntoContext, +} + +impl fmt::Display for MacroKind { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::FromContext => write!(f, "FromContext"), + Self::IntoContext => write!(f, "IntoContext"), + } + } +} + +/// Parser and renderer for `FromContext` derive macro. +#[derive(Debug)] +pub struct MacroImpl { + macro_kind: MacroKind, + ctx: CtxLabel, + ident: syn::Ident, + fields: Vec, +} + +impl MacroImpl { + pub(crate) fn parse(macro_kind: MacroKind, input: DeriveInput) -> Result { + let ctx = CtxLabel::parse(&input.attrs)?.unwrap_or_default(); + let ident = input.ident; + if !input.generics.params.is_empty() { + return Err(syn::Error::new( + ident.span(), + format!("Generics are not supported for `{macro_kind}`"), + )); + } + let fields = match input.data { + syn::Data::Struct(data) => match data.fields { + syn::Fields::Named(fields) => fields + .named + .into_iter() + .map(|field| { + let ident = field.ident.unwrap(); + let ty = field.ty; + let label = CtxLabel::parse(&field.attrs)?.unwrap_or_default(); + Ok(Field { ident, ty, label }) + }) + .collect::>>()?, + _ => { + return Err(syn::Error::new( + ident.span(), + format!("Only named fields are supported for `{macro_kind}`"), + )) + } + }, + _ => { + return Err(syn::Error::new( + ident.span(), + format!("Only structures are supported for `{macro_kind}`"), + )) + } + }; + + Ok(Self { + macro_kind, + ctx, + ident, + fields, + }) + } + + pub fn render(self) -> Result { + match self.macro_kind { + MacroKind::FromContext => self.render_from_context(), + MacroKind::IntoContext => self.render_into_context(), + } + } + + fn crate_path(&self) -> proc_macro2::TokenStream { + if let Some(krate) = &self.ctx.krate { + quote! { #krate } + } else { + quote! { zksync_node_framework } + } + } + + fn render_from_context(self) -> Result { + let crate_path = self.crate_path(); + let ident = self.ident; + let mut fields = Vec::new(); + for field in self.fields { + let ty = field.ty; + let ident = field.ident; + let default = field.label.default; + + if field.label.krate.is_some() { + return Err(syn::Error::new_spanned( + ident, + "`crate` attribute is not allowed for fields", + )); + } + + if field.label.task { + return Err(syn::Error::new_spanned( + ident, + "`task` attribute is not allowed in `FromContext` macro", + )); + } + + let field = if default { + quote! { + #ident: ctx.get_resource_or_default::<#ty>() + } + } else { + quote! { + #ident: <#ty as #crate_path::service::FromContext>::from_context(ctx)? + } + }; + + fields.push(field) + } + + Ok(quote! { + impl #crate_path::FromContext for #ident { + fn from_context(ctx: &mut #crate_path::service::ServiceContext<'_>) -> std::result::Result { + Ok(Self { + #(#fields),* + }) + } + } + }) + } + + fn render_into_context(self) -> Result { + let crate_path = self.crate_path(); + let ident = self.ident; + let mut actions = Vec::new(); + for field in self.fields { + let ty = field.ty; + let ident = field.ident; + if field.label.default { + return Err(syn::Error::new_spanned( + ident, + "`default` attribute is not allowed in `IntoContext` macro", + )); + } + + if field.label.krate.is_some() { + return Err(syn::Error::new_spanned( + ident, + "`crate` attribute is not allowed for fields", + )); + } + + let action = if field.label.task { + // Check whether the task is an `Option`. + if let Some(_inner_ty) = crate::helpers::extract_option_inner_type(&ty) { + quote! { + if let Some(task) = self.#ident { + ctx.add_task(task); + } + } + } else { + quote! { + ctx.add_task(self.#ident); + } + } + } else { + quote! { + <#ty as #crate_path::service::IntoContext>::into_context(self.#ident, ctx)?; + } + }; + actions.push(action); + } + + Ok(quote! { + impl #crate_path::IntoContext for #ident { + fn into_context(self, ctx: &mut #crate_path::service::ServiceContext<'_>) -> std::result::Result<(), #crate_path::WiringError> { + #(#actions)* + Ok(()) + } + } + }) + } +} diff --git a/core/lib/object_store/Cargo.toml b/core/lib/object_store/Cargo.toml index e400642bd2cd..1c75d6d0f922 100644 --- a/core/lib/object_store/Cargo.toml +++ b/core/lib/object_store/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "zksync_object_store" -version = "0.1.0" +description = "ZKsync implementation of object stores" +version.workspace = true edition.workspace = true authors.workspace = true homepage.workspace = true diff --git a/core/lib/object_store/src/factory.rs b/core/lib/object_store/src/factory.rs index 0fa1329ad72c..af00a8193d7f 100644 --- a/core/lib/object_store/src/factory.rs +++ b/core/lib/object_store/src/factory.rs @@ -52,6 +52,11 @@ impl ObjectStoreFactory { .cloned() } + /// Creates an [`ObjectStore`] based on the provided `config`. + /// + /// # Errors + /// + /// Returns an error if store initialization fails (e.g., because of incorrect configuration). async fn create_from_config( config: &ObjectStoreConfig, ) -> Result, ObjectStoreError> { diff --git a/core/lib/object_store/src/gcs.rs b/core/lib/object_store/src/gcs.rs index 2d4fae77ab80..451866236243 100644 --- a/core/lib/object_store/src/gcs.rs +++ b/core/lib/object_store/src/gcs.rs @@ -107,21 +107,22 @@ fn is_transient_http_error(err: &reqwest::Error) -> bool { || err.status() == Some(StatusCode::SERVICE_UNAVAILABLE) } -fn has_transient_io_source(mut err: &(dyn StdError + 'static)) -> bool { +fn get_source<'a, T: StdError + 'static>(mut err: &'a (dyn StdError + 'static)) -> Option<&'a T> { loop { - if err.is::() { - // We treat any I/O errors as transient. This isn't always true, but frequently occurring I/O errors - // (e.g., "connection reset by peer") *are* transient, and treating an error as transient is a safer option, - // even if it can lead to unnecessary retries. - return true; + if let Some(err) = err.downcast_ref::() { + return Some(err); } - err = match err.source() { - Some(source) => source, - None => return false, - }; + err = err.source()?; } } +fn has_transient_io_source(err: &(dyn StdError + 'static)) -> bool { + // We treat any I/O errors as transient. This isn't always true, but frequently occurring I/O errors + // (e.g., "connection reset by peer") *are* transient, and treating an error as transient is a safer option, + // even if it can lead to unnecessary retries. + get_source::(err).is_some() +} + impl From for ObjectStoreError { fn from(err: HttpError) -> Self { let is_not_found = match &err { @@ -129,16 +130,23 @@ impl From for ObjectStoreError { .status() .map_or(false, |status| matches!(status, StatusCode::NOT_FOUND)), HttpError::Response(response) => response.code == StatusCode::NOT_FOUND.as_u16(), - HttpError::TokenSource(_) => false, + _ => false, }; if is_not_found { ObjectStoreError::KeyNotFound(err.into()) } else { - let is_transient = matches!( - &err, - HttpError::HttpClient(err) if is_transient_http_error(err) - ); + let is_transient = match &err { + HttpError::HttpClient(err) => is_transient_http_error(err), + HttpError::TokenSource(err) => { + // Token sources are mostly based on the `reqwest` HTTP client, so transient error detection + // can reuse the same logic. + let err = err.as_ref(); + has_transient_io_source(err) + || get_source::(err).is_some_and(is_transient_http_error) + } + _ => false, + }; ObjectStoreError::Other { is_transient, source: err.into(), diff --git a/core/lib/object_store/src/objects.rs b/core/lib/object_store/src/objects.rs index d67e4e5df137..897c93e0b6f8 100644 --- a/core/lib/object_store/src/objects.rs +++ b/core/lib/object_store/src/objects.rs @@ -87,11 +87,15 @@ impl StoredObject for SnapshotFactoryDependencies { } } -impl StoredObject for SnapshotStorageLogsChunk { +impl StoredObject for SnapshotStorageLogsChunk +where + Self: ProtoFmt, +{ const BUCKET: Bucket = Bucket::StorageSnapshot; type Key<'a> = SnapshotStorageLogsStorageKey; fn encode_key(key: Self::Key<'_>) -> String { + // FIXME: should keys be separated by version? format!( "snapshot_l1_batch_{}_storage_logs_part_{:0>4}.proto.gzip", key.l1_batch_number, key.chunk_id @@ -181,7 +185,7 @@ mod tests { use zksync_types::{ snapshots::{SnapshotFactoryDependency, SnapshotStorageLog}, web3::Bytes, - AccountTreeId, StorageKey, H160, H256, + H256, }; use super::*; @@ -189,15 +193,15 @@ mod tests { #[test] fn test_storage_logs_filesnames_generate_corretly() { - let filename1 = SnapshotStorageLogsChunk::encode_key(SnapshotStorageLogsStorageKey { + let filename1 = ::encode_key(SnapshotStorageLogsStorageKey { l1_batch_number: L1BatchNumber(42), chunk_id: 97, }); - let filename2 = SnapshotStorageLogsChunk::encode_key(SnapshotStorageLogsStorageKey { + let filename2 = ::encode_key(SnapshotStorageLogsStorageKey { l1_batch_number: L1BatchNumber(3), chunk_id: 531, }); - let filename3 = SnapshotStorageLogsChunk::encode_key(SnapshotStorageLogsStorageKey { + let filename3 = ::encode_key(SnapshotStorageLogsStorageKey { l1_batch_number: L1BatchNumber(567), chunk_id: 5, }); @@ -225,13 +229,13 @@ mod tests { let storage_logs = SnapshotStorageLogsChunk { storage_logs: vec![ SnapshotStorageLog { - key: StorageKey::new(AccountTreeId::new(H160::random()), H256::random()), + key: H256::random(), value: H256::random(), l1_batch_number_of_initial_write: L1BatchNumber(123), enumeration_index: 234, }, SnapshotStorageLog { - key: StorageKey::new(AccountTreeId::new(H160::random()), H256::random()), + key: H256::random(), value: H256::random(), l1_batch_number_of_initial_write: L1BatchNumber(345), enumeration_index: 456, diff --git a/core/lib/object_store/src/raw.rs b/core/lib/object_store/src/raw.rs index 66cda57a0ab1..da1cd99728d9 100644 --- a/core/lib/object_store/src/raw.rs +++ b/core/lib/object_store/src/raw.rs @@ -18,6 +18,7 @@ pub enum Bucket { ProofsFri, ProofsTee, StorageSnapshot, + DataAvailability, TeeVerifierInput, } @@ -36,6 +37,7 @@ impl Bucket { Self::ProofsFri => "proofs_fri", Self::ProofsTee => "proofs_tee", Self::StorageSnapshot => "storage_logs_snapshots", + Self::DataAvailability => "data_availability", Self::TeeVerifierInput => "tee_verifier_inputs", } } diff --git a/core/lib/prometheus_exporter/Cargo.toml b/core/lib/prometheus_exporter/Cargo.toml deleted file mode 100644 index 3158aeb73da0..000000000000 --- a/core/lib/prometheus_exporter/Cargo.toml +++ /dev/null @@ -1,19 +0,0 @@ -[package] -name = "prometheus_exporter" -version = "0.1.0" -edition.workspace = true -authors.workspace = true -homepage.workspace = true -repository.workspace = true -license.workspace = true -keywords.workspace = true -categories.workspace = true - -[dependencies] -anyhow.workspace = true -metrics.workspace = true -metrics-exporter-prometheus.workspace = true -tokio.workspace = true -vise.workspace = true - -vise-exporter = { workspace = true, features = ["legacy"] } diff --git a/core/lib/prometheus_exporter/src/lib.rs b/core/lib/prometheus_exporter/src/lib.rs deleted file mode 100644 index 4eda0bebe0e6..000000000000 --- a/core/lib/prometheus_exporter/src/lib.rs +++ /dev/null @@ -1,142 +0,0 @@ -use std::{net::Ipv4Addr, time::Duration}; - -use anyhow::Context as _; -use metrics_exporter_prometheus::{Matcher, PrometheusBuilder}; -use tokio::sync::watch; -use vise::MetricsCollection; -use vise_exporter::MetricsExporter; - -fn configure_legacy_exporter(builder: PrometheusBuilder) -> PrometheusBuilder { - // in seconds - let default_latency_buckets = [0.001, 0.005, 0.025, 0.1, 0.25, 1.0, 5.0, 30.0, 120.0]; - let slow_latency_buckets = [ - 0.33, 1.0, 2.0, 5.0, 10.0, 30.0, 60.0, 180.0, 600.0, 1800.0, 3600.0, - ]; - let prover_buckets = [ - 1.0, 10.0, 20.0, 40.0, 60.0, 120.0, 240.0, 360.0, 600.0, 1800.0, 3600.0, - ]; - - builder - .set_buckets(&default_latency_buckets) - .unwrap() - .set_buckets_for_metric(Matcher::Prefix("server.prover".to_owned()), &prover_buckets) - .unwrap() - .set_buckets_for_metric( - Matcher::Prefix("server.witness_generator".to_owned()), - &slow_latency_buckets, - ) - .unwrap() -} - -#[derive(Debug)] -enum PrometheusTransport { - Pull { - port: u16, - }, - Push { - gateway_uri: String, - interval: Duration, - }, -} - -/// Configuration of a Prometheus exporter. -#[derive(Debug)] -pub struct PrometheusExporterConfig { - transport: PrometheusTransport, - use_new_facade: bool, -} - -impl PrometheusExporterConfig { - /// Creates an exporter that will run an HTTP server on the specified `port`. - pub const fn pull(port: u16) -> Self { - Self { - transport: PrometheusTransport::Pull { port }, - use_new_facade: true, - } - } - - /// Creates an exporter that will push metrics to the specified Prometheus gateway endpoint. - pub const fn push(gateway_uri: String, interval: Duration) -> Self { - Self { - transport: PrometheusTransport::Push { - gateway_uri, - interval, - }, - use_new_facade: true, - } - } - - /// Disables the new metrics façade (`vise`), which is on by default. - #[must_use] - pub fn without_new_facade(self) -> Self { - Self { - use_new_facade: false, - transport: self.transport, - } - } - - /// Runs the exporter. This future should be spawned in a separate Tokio task. - pub async fn run(self, stop_receiver: watch::Receiver) -> anyhow::Result<()> { - if self.use_new_facade { - self.run_with_new_facade(stop_receiver) - .await - .context("run_with_new_facade()") - } else { - self.run_without_new_facade() - .await - .context("run_without_new_facade()") - } - } - - async fn run_with_new_facade( - self, - mut stop_receiver: watch::Receiver, - ) -> anyhow::Result<()> { - let registry = MetricsCollection::lazy().collect(); - let metrics_exporter = MetricsExporter::new(registry.into()) - .with_legacy_exporter(configure_legacy_exporter) - .with_graceful_shutdown(async move { - stop_receiver.changed().await.ok(); - }); - - match self.transport { - PrometheusTransport::Pull { port } => { - let prom_bind_address = (Ipv4Addr::UNSPECIFIED, port).into(); - metrics_exporter - .start(prom_bind_address) - .await - .expect("Failed starting metrics server"); - } - PrometheusTransport::Push { - gateway_uri, - interval, - } => { - let endpoint = gateway_uri - .parse() - .context("Failed parsing Prometheus push gateway endpoint")?; - metrics_exporter.push_to_gateway(endpoint, interval).await; - } - } - Ok(()) - } - - async fn run_without_new_facade(self) -> anyhow::Result<()> { - let builder = match self.transport { - PrometheusTransport::Pull { port } => { - let prom_bind_address = (Ipv4Addr::UNSPECIFIED, port); - PrometheusBuilder::new().with_http_listener(prom_bind_address) - } - PrometheusTransport::Push { - gateway_uri, - interval, - } => PrometheusBuilder::new() - .with_push_gateway(gateway_uri, interval, None, None) - .context("PrometheusBuilder::with_push_gateway()")?, - }; - let builder = configure_legacy_exporter(builder); - let (recorder, exporter) = builder.build().context("PrometheusBuilder::build()")?; - metrics::set_boxed_recorder(Box::new(recorder)) - .context("failed to set metrics recorder")?; - exporter.await.context("Prometheus exporter failed") - } -} diff --git a/core/lib/protobuf_config/Cargo.toml b/core/lib/protobuf_config/Cargo.toml index ee52d8d5472f..453d5ab65f69 100644 --- a/core/lib/protobuf_config/Cargo.toml +++ b/core/lib/protobuf_config/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "zksync_protobuf_config" -version = "0.1.0" +description = "Protobuf deserialization for ZKsync configs" +version.workspace = true edition.workspace = true authors.workspace = true homepage.workspace = true diff --git a/core/lib/protobuf_config/build.rs b/core/lib/protobuf_config/build.rs index 9a23d015239f..5705ed44c1d5 100644 --- a/core/lib/protobuf_config/build.rs +++ b/core/lib/protobuf_config/build.rs @@ -3,7 +3,7 @@ fn main() { zksync_protobuf_build::Config { input_root: "src/proto".into(), proto_root: "zksync".into(), - dependencies: vec![], + dependencies: vec!["::zksync_protobuf::proto".parse().unwrap()], protobuf_crate: "::zksync_protobuf".parse().unwrap(), is_public: true, } diff --git a/core/lib/protobuf_config/src/api.rs b/core/lib/protobuf_config/src/api.rs index fe0cfb3e0d6e..4eac849773f3 100644 --- a/core/lib/protobuf_config/src/api.rs +++ b/core/lib/protobuf_config/src/api.rs @@ -69,7 +69,11 @@ impl ProtoRepr for proto::Web3JsonRpc { }) .collect::>() .context("max_response_body_size_overrides")?; - + let api_namespaces = if self.api_namespaces.is_empty() { + None + } else { + Some(self.api_namespaces.clone()) + }; Ok(Self::Type { http_port: required(&self.http_port) .and_then(|p| Ok((*p).try_into()?)) @@ -154,6 +158,8 @@ impl ProtoRepr for proto::Web3JsonRpc { .map(|(i, k)| parse_h160(k).context(i)) .collect::, _>>() .context("account_pks")?, + extended_api_tracing: self.extended_api_tracing.unwrap_or_default(), + api_namespaces, }) } @@ -222,6 +228,8 @@ impl ProtoRepr for proto::Web3JsonRpc { .iter() .map(|k| format!("{:?}", k)) .collect(), + extended_api_tracing: Some(this.extended_api_tracing), + api_namespaces: this.api_namespaces.clone().unwrap_or_default(), } } } diff --git a/core/lib/protobuf_config/src/base_token_adjuster.rs b/core/lib/protobuf_config/src/base_token_adjuster.rs new file mode 100644 index 000000000000..850acb4bae20 --- /dev/null +++ b/core/lib/protobuf_config/src/base_token_adjuster.rs @@ -0,0 +1,27 @@ +use zksync_config::configs::{self}; +use zksync_protobuf::ProtoRepr; + +use crate::proto::base_token_adjuster as proto; + +impl ProtoRepr for proto::BaseTokenAdjuster { + type Type = configs::base_token_adjuster::BaseTokenAdjusterConfig; + + fn read(&self) -> anyhow::Result { + Ok(configs::base_token_adjuster::BaseTokenAdjusterConfig { + price_polling_interval_ms: self + .price_polling_interval_ms + .expect("price_polling_interval_ms"), + + price_cache_update_interval_ms: self + .price_cache_update_interval_ms + .expect("price_cache_update_interval_ms"), + }) + } + + fn build(this: &Self::Type) -> Self { + Self { + price_polling_interval_ms: Some(this.price_polling_interval_ms), + price_cache_update_interval_ms: Some(this.price_cache_update_interval_ms), + } + } +} diff --git a/core/lib/protobuf_config/src/chain.rs b/core/lib/protobuf_config/src/chain.rs index 7b1d9f532fd7..fafecc0131cd 100644 --- a/core/lib/protobuf_config/src/chain.rs +++ b/core/lib/protobuf_config/src/chain.rs @@ -78,6 +78,10 @@ impl ProtoRepr for proto::StateKeeper { max_circuits_per_batch: required(&self.max_circuits_per_batch) .and_then(|x| Ok((*x).try_into()?)) .context("max_circuits_per_batch")?, + protective_reads_persistence_enabled: *required( + &self.protective_reads_persistence_enabled, + ) + .context("protective_reads_persistence_enabled")?, // We need these values only for instantiating configs from environmental variables, so it's not // needed during the initialization from files @@ -115,6 +119,7 @@ impl ProtoRepr for proto::StateKeeper { validation_computational_gas_limit: Some(this.validation_computational_gas_limit), save_call_traces: Some(this.save_call_traces), max_circuits_per_batch: Some(this.max_circuits_per_batch.try_into().unwrap()), + protective_reads_persistence_enabled: Some(this.protective_reads_persistence_enabled), } } } diff --git a/core/lib/protobuf_config/src/commitment_generator.rs b/core/lib/protobuf_config/src/commitment_generator.rs new file mode 100644 index 000000000000..23af3ccce76e --- /dev/null +++ b/core/lib/protobuf_config/src/commitment_generator.rs @@ -0,0 +1,24 @@ +use std::num::NonZeroU32; + +use anyhow::Context as _; +use zksync_config::configs::CommitmentGeneratorConfig; +use zksync_protobuf::{repr::ProtoRepr, required}; + +use crate::proto::commitment_generator as proto; + +impl ProtoRepr for proto::CommitmentGenerator { + type Type = CommitmentGeneratorConfig; + fn read(&self) -> anyhow::Result { + Ok(Self::Type { + max_parallelism: NonZeroU32::new( + *required(&self.max_parallelism).context("max_parallelism")?, + ) + .context("cannot be 0")?, + }) + } + fn build(this: &Self::Type) -> Self { + Self { + max_parallelism: Some(this.max_parallelism.into()), + } + } +} diff --git a/core/lib/protobuf_config/src/consensus.rs b/core/lib/protobuf_config/src/consensus.rs index 428333f450c6..a659a6f16abc 100644 --- a/core/lib/protobuf_config/src/consensus.rs +++ b/core/lib/protobuf_config/src/consensus.rs @@ -1,10 +1,10 @@ use anyhow::Context as _; use zksync_basic_types::L2ChainId; use zksync_config::configs::consensus::{ - ConsensusConfig, GenesisSpec, Host, NodePublicKey, ProtocolVersion, ValidatorPublicKey, - WeightedValidator, + AttesterPublicKey, ConsensusConfig, GenesisSpec, Host, NodePublicKey, ProtocolVersion, + RpcConfig, ValidatorPublicKey, WeightedAttester, WeightedValidator, }; -use zksync_protobuf::{repr::ProtoRepr, required}; +use zksync_protobuf::{kB, read_optional, repr::ProtoRepr, required, ProtoFmt}; use crate::{proto::consensus as proto, read_optional_repr}; @@ -24,6 +24,22 @@ impl ProtoRepr for proto::WeightedValidator { } } +impl ProtoRepr for proto::WeightedAttester { + type Type = WeightedAttester; + fn read(&self) -> anyhow::Result { + Ok(Self::Type { + key: AttesterPublicKey(required(&self.key).context("key")?.clone()), + weight: *required(&self.weight).context("weight")?, + }) + } + fn build(this: &Self::Type) -> Self { + Self { + key: Some(this.key.0.clone()), + weight: Some(this.weight), + } + } +} + impl ProtoRepr for proto::GenesisSpec { type Type = GenesisSpec; fn read(&self) -> anyhow::Result { @@ -41,6 +57,13 @@ impl ProtoRepr for proto::GenesisSpec { .map(|(i, x)| x.read().context(i)) .collect::>() .context("validators")?, + attesters: self + .attesters + .iter() + .enumerate() + .map(|(i, x)| x.read().context(i)) + .collect::>() + .context("attesters")?, leader: ValidatorPublicKey(required(&self.leader).context("leader")?.clone()), }) } @@ -49,11 +72,26 @@ impl ProtoRepr for proto::GenesisSpec { chain_id: Some(this.chain_id.as_u64()), protocol_version: Some(this.protocol_version.0), validators: this.validators.iter().map(ProtoRepr::build).collect(), + attesters: this.attesters.iter().map(ProtoRepr::build).collect(), leader: Some(this.leader.0.clone()), } } } +impl ProtoRepr for proto::RpcConfig { + type Type = RpcConfig; + fn read(&self) -> anyhow::Result { + Ok(Self::Type { + get_block_rate: read_optional(&self.get_block_rate).context("get_block_rate")?, + }) + } + fn build(this: &Self::Type) -> Self { + Self { + get_block_rate: this.get_block_rate.as_ref().map(ProtoFmt::build), + } + } +} + impl ProtoRepr for proto::Config { type Type = ConsensusConfig; fn read(&self) -> anyhow::Result { @@ -62,14 +100,31 @@ impl ProtoRepr for proto::Config { let addr = Host(required(&e.addr).context("addr")?.clone()); anyhow::Ok((key, addr)) }; + + let max_payload_size = required(&self.max_payload_size) + .and_then(|x| Ok((*x).try_into()?)) + .context("max_payload_size")?; + + let max_batch_size = match self.max_batch_size { + Some(x) => x.try_into().context("max_batch_size")?, + None => { + // Compute a default batch size, so operators are not caught out by the missing setting + // while we're still working on batch syncing. The batch interval is ~1 minute, + // so there will be ~60 blocks, and an Ethereum Merkle proof is ~1kB, but under high + // traffic there can be thousands of huge transactions that quickly fill up blocks + // and there could be more blocks in a batch then expected. We chose a generous + // limit so as not to prevent any legitimate batch from being transmitted. + max_payload_size * 5000 + kB + } + }; + Ok(Self::Type { server_addr: required(&self.server_addr) .and_then(|x| Ok(x.parse()?)) .context("server_addr")?, public_addr: Host(required(&self.public_addr).context("public_addr")?.clone()), - max_payload_size: required(&self.max_payload_size) - .and_then(|x| Ok((*x).try_into()?)) - .context("max_payload_size")?, + max_payload_size, + max_batch_size, gossip_dynamic_inbound_limit: required(&self.gossip_dynamic_inbound_limit) .and_then(|x| Ok((*x).try_into()?)) .context("gossip_dynamic_inbound_limit")?, @@ -85,6 +140,7 @@ impl ProtoRepr for proto::Config { .map(|(i, e)| read_addr(e).context(i)) .collect::>()?, genesis_spec: read_optional_repr(&self.genesis_spec).context("genesis_spec")?, + rpc: read_optional_repr(&self.rpc_config).context("rpc_config")?, }) } @@ -93,6 +149,7 @@ impl ProtoRepr for proto::Config { server_addr: Some(this.server_addr.to_string()), public_addr: Some(this.public_addr.0.clone()), max_payload_size: Some(this.max_payload_size.try_into().unwrap()), + max_batch_size: Some(this.max_batch_size.try_into().unwrap()), gossip_dynamic_inbound_limit: Some( this.gossip_dynamic_inbound_limit.try_into().unwrap(), ), @@ -110,6 +167,7 @@ impl ProtoRepr for proto::Config { }) .collect(), genesis_spec: this.genesis_spec.as_ref().map(ProtoRepr::build), + rpc_config: this.rpc.as_ref().map(ProtoRepr::build), } } } diff --git a/core/lib/protobuf_config/src/contracts.rs b/core/lib/protobuf_config/src/contracts.rs index ac1864b7a0bd..84c404367503 100644 --- a/core/lib/protobuf_config/src/contracts.rs +++ b/core/lib/protobuf_config/src/contracts.rs @@ -101,6 +101,12 @@ impl ProtoRepr for proto::Contracts { .map(|x| parse_h160(x)) .transpose() .context("base_token_addr")?, + chain_admin_addr: l1 + .chain_admin_addr + .as_ref() + .map(|x| parse_h160(x)) + .transpose() + .context("chain_admin_addr")?, }) } @@ -132,6 +138,7 @@ impl ProtoRepr for proto::Contracts { default_upgrade_addr: Some(format!("{:?}", this.default_upgrade_addr)), multicall3_addr: Some(format!("{:?}", this.l1_multicall3_addr)), base_token_addr: this.base_token_addr.map(|a| format!("{:?}", a)), + chain_admin_addr: this.chain_admin_addr.map(|a| format!("{:?}", a)), }), l2: Some(proto::L2 { testnet_paymaster_addr: this.l2_testnet_paymaster_addr.map(|a| format!("{:?}", a)), diff --git a/core/lib/protobuf_config/src/da_dispatcher.rs b/core/lib/protobuf_config/src/da_dispatcher.rs new file mode 100644 index 000000000000..1cafa37a1e19 --- /dev/null +++ b/core/lib/protobuf_config/src/da_dispatcher.rs @@ -0,0 +1,24 @@ +use zksync_config::configs::{self}; +use zksync_protobuf::ProtoRepr; + +use crate::proto::da_dispatcher as proto; + +impl ProtoRepr for proto::DataAvailabilityDispatcher { + type Type = configs::da_dispatcher::DADispatcherConfig; + + fn read(&self) -> anyhow::Result { + Ok(configs::da_dispatcher::DADispatcherConfig { + polling_interval_ms: self.polling_interval_ms, + max_rows_to_dispatch: self.max_rows_to_dispatch, + max_retries: self.max_retries.map(|x| x as u16), + }) + } + + fn build(this: &Self::Type) -> Self { + Self { + polling_interval_ms: this.polling_interval_ms, + max_rows_to_dispatch: this.max_rows_to_dispatch, + max_retries: this.max_retries.map(Into::into), + } + } +} diff --git a/core/lib/protobuf_config/src/en.rs b/core/lib/protobuf_config/src/en.rs new file mode 100644 index 000000000000..b6323de6ea61 --- /dev/null +++ b/core/lib/protobuf_config/src/en.rs @@ -0,0 +1,50 @@ +use std::{num::NonZeroUsize, str::FromStr}; + +use anyhow::Context; +use zksync_basic_types::{url::SensitiveUrl, L1ChainId, L2ChainId}; +use zksync_config::configs::en_config::ENConfig; +use zksync_protobuf::{required, ProtoRepr}; + +use crate::proto::en as proto; + +impl ProtoRepr for proto::ExternalNode { + type Type = ENConfig; + + fn read(&self) -> anyhow::Result { + Ok(Self::Type { + main_node_url: SensitiveUrl::from_str( + required(&self.main_node_url).context("main_node_url")?, + )?, + l1_chain_id: required(&self.l1_chain_id) + .map(|x| L1ChainId(*x)) + .context("l1_chain_id")?, + l2_chain_id: required(&self.l2_chain_id) + .and_then(|x| L2ChainId::try_from(*x).map_err(|a| anyhow::anyhow!(a))) + .context("l2_chain_id")?, + l1_batch_commit_data_generator_mode: required( + &self.l1_batch_commit_data_generator_mode, + ) + .and_then(|x| Ok(crate::proto::genesis::L1BatchCommitDataGeneratorMode::try_from(*x)?)) + .context("l1_batch_commit_data_generator_mode")? + .parse(), + main_node_rate_limit_rps: self + .main_node_rate_limit_rps + .and_then(|a| NonZeroUsize::new(a as usize)), + }) + } + + fn build(this: &Self::Type) -> Self { + Self { + main_node_url: Some(this.main_node_url.expose_str().to_string()), + l1_chain_id: Some(this.l1_chain_id.0), + l2_chain_id: Some(this.l2_chain_id.as_u64()), + l1_batch_commit_data_generator_mode: Some( + crate::proto::genesis::L1BatchCommitDataGeneratorMode::new( + &this.l1_batch_commit_data_generator_mode, + ) + .into(), + ), + main_node_rate_limit_rps: this.main_node_rate_limit_rps.map(|a| a.get() as u64), + } + } +} diff --git a/core/lib/protobuf_config/src/eth.rs b/core/lib/protobuf_config/src/eth.rs index 4ed5a8841436..90807f7dafa3 100644 --- a/core/lib/protobuf_config/src/eth.rs +++ b/core/lib/protobuf_config/src/eth.rs @@ -30,6 +30,7 @@ impl proto::PubdataSendingMode { match x { From::Calldata => Self::Calldata, From::Blobs => Self::Blobs, + From::Custom => Self::Custom, } } @@ -38,6 +39,7 @@ impl proto::PubdataSendingMode { match self { Self::Calldata => To::Calldata, Self::Blobs => To::Blobs, + Self::Custom => To::Custom, } } } diff --git a/core/lib/protobuf_config/src/experimental.rs b/core/lib/protobuf_config/src/experimental.rs index c4fe17aadf43..8d92f3ef87a8 100644 --- a/core/lib/protobuf_config/src/experimental.rs +++ b/core/lib/protobuf_config/src/experimental.rs @@ -21,6 +21,13 @@ impl ProtoRepr for proto::Db { .map(|count| NonZeroU32::new(count).context("cannot be 0")) .transpose() .context("state_keeper_db_max_open_files")?, + protective_reads_persistence_enabled: self + .reads_persistence_enabled + .unwrap_or_default(), + processing_delay_ms: self.processing_delay_ms.unwrap_or_default(), + include_indices_and_filters_in_block_cache: self + .include_indices_and_filters_in_block_cache + .unwrap_or_default(), }) } @@ -34,6 +41,11 @@ impl ProtoRepr for proto::Db { state_keeper_db_max_open_files: this .state_keeper_db_max_open_files .map(NonZeroU32::get), + reads_persistence_enabled: Some(this.protective_reads_persistence_enabled), + processing_delay_ms: Some(this.processing_delay_ms), + include_indices_and_filters_in_block_cache: Some( + this.include_indices_and_filters_in_block_cache, + ), } } } diff --git a/core/lib/protobuf_config/src/external_price_api_client.rs b/core/lib/protobuf_config/src/external_price_api_client.rs new file mode 100644 index 000000000000..cd16957d55ad --- /dev/null +++ b/core/lib/protobuf_config/src/external_price_api_client.rs @@ -0,0 +1,32 @@ +use zksync_config::configs::{self}; +use zksync_protobuf::ProtoRepr; + +use crate::proto::external_price_api_client as proto; + +impl ProtoRepr for proto::ExternalPriceApiClient { + type Type = configs::external_price_api_client::ExternalPriceApiClientConfig; + + fn read(&self) -> anyhow::Result { + Ok( + configs::external_price_api_client::ExternalPriceApiClientConfig { + source: self.source.clone().expect("source"), + client_timeout_ms: self.client_timeout_ms.expect("client_timeout_ms"), + base_url: self.base_url.clone(), + api_key: self.api_key.clone(), + forced_numerator: self.forced_numerator, + forced_denominator: self.forced_denominator, + }, + ) + } + + fn build(this: &Self::Type) -> Self { + Self { + source: Some(this.source.clone()), + base_url: this.base_url.clone(), + api_key: this.api_key.clone(), + client_timeout_ms: Some(this.client_timeout_ms), + forced_numerator: this.forced_numerator, + forced_denominator: this.forced_denominator, + } + } +} diff --git a/core/lib/protobuf_config/src/general.rs b/core/lib/protobuf_config/src/general.rs index 834977759ae2..31d1ea6bc1b7 100644 --- a/core/lib/protobuf_config/src/general.rs +++ b/core/lib/protobuf_config/src/general.rs @@ -37,10 +37,26 @@ impl ProtoRepr for proto::GeneralConfig { snapshot_creator: read_optional_repr(&self.snapshot_creator) .context("snapshot_creator")?, observability: read_optional_repr(&self.observability).context("observability")?, + da_dispatcher_config: read_optional_repr(&self.da_dispatcher) + .context("da_dispatcher")?, protective_reads_writer_config: read_optional_repr(&self.protective_reads_writer) .context("protective_reads_writer")?, + basic_witness_input_producer_config: read_optional_repr( + &self.basic_witness_input_producer, + ) + .context("basic_witness_input_producer")?, core_object_store: read_optional_repr(&self.core_object_store) .context("core_object_store")?, + base_token_adjuster: read_optional_repr(&self.base_token_adjuster) + .context("base_token_adjuster")?, + commitment_generator: read_optional_repr(&self.commitment_generator) + .context("commitment_generator")?, + pruning: read_optional_repr(&self.pruning).context("pruning")?, + snapshot_recovery: read_optional_repr(&self.snapshot_recovery) + .context("snapshot_recovery")?, + external_price_api_client_config: read_optional_repr(&self.external_price_api_client) + .context("external_price_api_client")?, + consensus_config: read_optional_repr(&self.consensus).context("consensus")?, }) } @@ -72,11 +88,25 @@ impl ProtoRepr for proto::GeneralConfig { eth: this.eth.as_ref().map(ProtoRepr::build), snapshot_creator: this.snapshot_creator.as_ref().map(ProtoRepr::build), observability: this.observability.as_ref().map(ProtoRepr::build), + da_dispatcher: this.da_dispatcher_config.as_ref().map(ProtoRepr::build), protective_reads_writer: this .protective_reads_writer_config .as_ref() .map(ProtoRepr::build), + basic_witness_input_producer: this + .basic_witness_input_producer_config + .as_ref() + .map(ProtoRepr::build), + commitment_generator: this.commitment_generator.as_ref().map(ProtoRepr::build), + snapshot_recovery: this.snapshot_recovery.as_ref().map(ProtoRepr::build), + pruning: this.pruning.as_ref().map(ProtoRepr::build), core_object_store: this.core_object_store.as_ref().map(ProtoRepr::build), + base_token_adjuster: this.base_token_adjuster.as_ref().map(ProtoRepr::build), + external_price_api_client: this + .external_price_api_client_config + .as_ref() + .map(ProtoRepr::build), + consensus: this.consensus_config.as_ref().map(ProtoRepr::build), } } } diff --git a/core/lib/protobuf_config/src/genesis.rs b/core/lib/protobuf_config/src/genesis.rs index 9cab754150d2..52045ed9dbed 100644 --- a/core/lib/protobuf_config/src/genesis.rs +++ b/core/lib/protobuf_config/src/genesis.rs @@ -11,20 +11,21 @@ use zksync_protobuf::{repr::ProtoRepr, required}; use crate::{parse_h160, parse_h256, proto::genesis as proto}; impl proto::L1BatchCommitDataGeneratorMode { - fn new(n: &L1BatchCommitmentMode) -> Self { + pub(crate) fn new(n: &L1BatchCommitmentMode) -> Self { match n { L1BatchCommitmentMode::Rollup => Self::Rollup, L1BatchCommitmentMode::Validium => Self::Validium, } } - fn parse(&self) -> L1BatchCommitmentMode { + pub(crate) fn parse(&self) -> L1BatchCommitmentMode { match self { Self::Rollup => L1BatchCommitmentMode::Rollup, Self::Validium => L1BatchCommitmentMode::Validium, } } } + impl ProtoRepr for proto::Genesis { type Type = configs::GenesisConfig; fn read(&self) -> anyhow::Result { diff --git a/core/lib/protobuf_config/src/lib.rs b/core/lib/protobuf_config/src/lib.rs index 2fd9bbd9e059..839f3e3cf8ca 100644 --- a/core/lib/protobuf_config/src/lib.rs +++ b/core/lib/protobuf_config/src/lib.rs @@ -5,12 +5,16 @@ //! * protobuf json format mod api; +mod base_token_adjuster; mod chain; mod circuit_breaker; +mod commitment_generator; mod consensus; mod contract_verifier; mod contracts; +mod da_dispatcher; mod database; +mod en; mod eth; mod experimental; mod general; @@ -21,18 +25,22 @@ mod observability; mod proof_data_handler; pub mod proto; mod prover; +mod pruning; mod secrets; mod snapshots_creator; -pub mod testonly; + +mod external_price_api_client; +mod snapshot_recovery; #[cfg(test)] mod tests; mod utils; mod vm_runner; mod wallets; -use std::str::FromStr; +use std::{path::PathBuf, str::FromStr}; -use zksync_protobuf::ProtoRepr; +use anyhow::Context; +use zksync_protobuf::{serde::serialize_proto, ProtoRepr}; use zksync_types::{H160, H256}; fn parse_h256(bytes: &str) -> anyhow::Result { @@ -46,3 +54,20 @@ fn parse_h160(bytes: &str) -> anyhow::Result { pub fn read_optional_repr(field: &Option

) -> anyhow::Result> { field.as_ref().map(|x| x.read()).transpose() } + +pub fn decode_yaml_repr( + path: &PathBuf, + deny_unknown_fields: bool, +) -> anyhow::Result { + let yaml = std::fs::read_to_string(path).with_context(|| path.display().to_string())?; + let d = serde_yaml::Deserializer::from_str(&yaml); + let this: T = zksync_protobuf::serde::deserialize_proto_with_options(d, deny_unknown_fields)?; + this.read() +} + +pub fn encode_yaml_repr(value: &T::Type) -> anyhow::Result> { + let mut buffer = vec![]; + let mut s = serde_yaml::Serializer::new(&mut buffer); + serialize_proto(&T::build(value), &mut s)?; + Ok(buffer) +} diff --git a/core/lib/protobuf_config/src/proto/config/api.proto b/core/lib/protobuf_config/src/proto/config/api.proto index 09503056a3f1..4fea0691f79d 100644 --- a/core/lib/protobuf_config/src/proto/config/api.proto +++ b/core/lib/protobuf_config/src/proto/config/api.proto @@ -40,7 +40,8 @@ message Web3JsonRpc { optional uint64 mempool_cache_size = 29; // optional repeated string whitelisted_tokens_for_aa = 30; // optional repeated MaxResponseSizeOverride max_response_body_size_overrides = 31; - + repeated string api_namespaces = 32; // Optional, if empty all namespaces are available + optional bool extended_api_tracing = 33; // optional, default false reserved 15; reserved "l1_to_l2_transactions_compatibility_mode"; } diff --git a/core/lib/protobuf_config/src/proto/config/base_token_adjuster.proto b/core/lib/protobuf_config/src/proto/config/base_token_adjuster.proto new file mode 100644 index 000000000000..f3adad8707b5 --- /dev/null +++ b/core/lib/protobuf_config/src/proto/config/base_token_adjuster.proto @@ -0,0 +1,8 @@ +syntax = "proto3"; + +package zksync.config.base_token_adjuster; + +message BaseTokenAdjuster { + optional uint64 price_polling_interval_ms = 1; + optional uint64 price_cache_update_interval_ms = 2; +} diff --git a/core/lib/protobuf_config/src/proto/config/chain.proto b/core/lib/protobuf_config/src/proto/config/chain.proto index c04f41ca4751..258d6d1d6d4c 100644 --- a/core/lib/protobuf_config/src/proto/config/chain.proto +++ b/core/lib/protobuf_config/src/proto/config/chain.proto @@ -8,7 +8,6 @@ enum FeeModelVersion { V2 = 1; } - message StateKeeper { optional uint64 transaction_slots = 1; // required optional uint64 block_commit_deadline_ms = 2; // required; ms @@ -33,6 +32,7 @@ message StateKeeper { optional bool save_call_traces = 22; // required optional uint64 max_circuits_per_batch = 27; // required optional uint64 miniblock_max_payload_size = 28; // required + optional bool protective_reads_persistence_enabled = 29; // optional reserved 23; reserved "virtual_blocks_interval"; reserved 24; reserved "virtual_blocks_per_miniblock"; reserved 26; reserved "enum_index_migration_chunk_size"; diff --git a/core/lib/protobuf_config/src/proto/config/commitment_generator.proto b/core/lib/protobuf_config/src/proto/config/commitment_generator.proto new file mode 100644 index 000000000000..62b9566e1866 --- /dev/null +++ b/core/lib/protobuf_config/src/proto/config/commitment_generator.proto @@ -0,0 +1,7 @@ +syntax = "proto3"; + +package zksync.config.commitment_generator; + +message CommitmentGenerator { + optional uint32 max_parallelism = 1; +} diff --git a/core/lib/protobuf_config/src/proto/config/contracts.proto b/core/lib/protobuf_config/src/proto/config/contracts.proto index 7a9c92c08157..f4488c7901a1 100644 --- a/core/lib/protobuf_config/src/proto/config/contracts.proto +++ b/core/lib/protobuf_config/src/proto/config/contracts.proto @@ -16,6 +16,7 @@ message L1 { optional string default_upgrade_addr = 5; // required; H160 optional string multicall3_addr = 6; // required; H160 optional string base_token_addr = 7; // required; H160 + optional string chain_admin_addr = 8; // required; H160 } message L2 { diff --git a/core/lib/protobuf_config/src/proto/config/da_dispatcher.proto b/core/lib/protobuf_config/src/proto/config/da_dispatcher.proto new file mode 100644 index 000000000000..d1d913498a4e --- /dev/null +++ b/core/lib/protobuf_config/src/proto/config/da_dispatcher.proto @@ -0,0 +1,11 @@ +syntax = "proto3"; + +package zksync.config.da_dispatcher; + +import "zksync/config/object_store.proto"; + +message DataAvailabilityDispatcher { + optional uint32 polling_interval_ms = 1; + optional uint32 max_rows_to_dispatch = 2; + optional uint32 max_retries = 3; +} diff --git a/core/lib/protobuf_config/src/proto/config/en.proto b/core/lib/protobuf_config/src/proto/config/en.proto new file mode 100644 index 000000000000..b0ec165b2f61 --- /dev/null +++ b/core/lib/protobuf_config/src/proto/config/en.proto @@ -0,0 +1,12 @@ +syntax = "proto3"; +import "zksync/config/genesis.proto"; + +package zksync.config.en; + +message ExternalNode { + optional string main_node_url = 1; // required + optional uint64 l2_chain_id = 2; // required + optional uint64 l1_chain_id = 3; // required + optional uint64 main_node_rate_limit_rps = 6; // optional + optional config.genesis.L1BatchCommitDataGeneratorMode l1_batch_commit_data_generator_mode = 7; // optional, default to rollup +} diff --git a/core/lib/protobuf_config/src/proto/config/eth_sender.proto b/core/lib/protobuf_config/src/proto/config/eth_sender.proto index 1eb15f0679a4..839c7f65b973 100644 --- a/core/lib/protobuf_config/src/proto/config/eth_sender.proto +++ b/core/lib/protobuf_config/src/proto/config/eth_sender.proto @@ -23,6 +23,7 @@ enum ProofLoadingMode { enum PubdataSendingMode { CALLDATA = 0; BLOBS = 1; + CUSTOM = 2; } message Sender { diff --git a/core/lib/protobuf_config/src/proto/config/experimental.proto b/core/lib/protobuf_config/src/proto/config/experimental.proto index 4f456b9aca39..1336c4719d26 100644 --- a/core/lib/protobuf_config/src/proto/config/experimental.proto +++ b/core/lib/protobuf_config/src/proto/config/experimental.proto @@ -8,4 +8,13 @@ package zksync.config.experimental; message DB { optional uint64 state_keeper_db_block_cache_capacity_mb = 1; // MB; required optional uint32 state_keeper_db_max_open_files = 2; // optional + optional bool reads_persistence_enabled = 3; + optional uint64 processing_delay_ms = 4; + optional bool include_indices_and_filters_in_block_cache = 5; +} + +// Experimental part of the Snapshot recovery configuration. +message SnapshotRecovery { + optional uint64 tree_recovery_parallel_persistence_buffer = 1; + optional bool drop_storage_key_preimages = 2; // optional; false by default } diff --git a/core/lib/protobuf_config/src/proto/config/external_price_api_client.proto b/core/lib/protobuf_config/src/proto/config/external_price_api_client.proto new file mode 100644 index 000000000000..f47e35782e60 --- /dev/null +++ b/core/lib/protobuf_config/src/proto/config/external_price_api_client.proto @@ -0,0 +1,12 @@ +syntax = "proto3"; + +package zksync.config.external_price_api_client; + +message ExternalPriceApiClient { + optional string source = 1; + optional string base_url = 2; + optional string api_key = 3; + optional uint64 client_timeout_ms = 4; + optional uint64 forced_numerator = 5; + optional uint64 forced_denominator = 6; +} diff --git a/core/lib/protobuf_config/src/proto/config/general.proto b/core/lib/protobuf_config/src/proto/config/general.proto index fdfe257aecf1..37d507b9ab62 100644 --- a/core/lib/protobuf_config/src/proto/config/general.proto +++ b/core/lib/protobuf_config/src/proto/config/general.proto @@ -13,30 +13,45 @@ import "zksync/config/house_keeper.proto"; import "zksync/config/observability.proto"; import "zksync/config/snapshots_creator.proto"; import "zksync/config/utils.proto"; +import "zksync/config/da_dispatcher.proto"; import "zksync/config/vm_runner.proto"; +import "zksync/config/commitment_generator.proto"; +import "zksync/config/snapshot_recovery.proto"; +import "zksync/config/pruning.proto"; import "zksync/config/object_store.proto"; +import "zksync/config/base_token_adjuster.proto"; +import "zksync/config/external_price_api_client.proto"; +import "zksync/core/consensus.proto"; message GeneralConfig { - optional config.database.Postgres postgres = 1; - optional config.api.Api api = 2; - optional config.contract_verifier.ContractVerifier contract_verifier = 3; - optional config.circuit_breaker.CircuitBreaker circuit_breaker = 5; - optional config.chain.Mempool mempool = 6; - optional config.chain.OperationsManager operations_manager = 8; - optional config.chain.StateKeeper state_keeper = 9; - optional config.house_keeper.HouseKeeper house_keeper = 10; - optional config.prover.Prover prover = 12; - optional config.utils.Prometheus prometheus = 15; - optional config.database.DB db = 20; - optional config.eth.ETH eth = 22; - optional config.prover.WitnessGenerator witness_generator = 24; - optional config.prover.WitnessVectorGenerator witness_vector_generator = 25; - optional config.prover.ProofCompressor proof_compressor = 27; - optional config.prover.ProofDataHandler data_handler = 28; - optional config.prover.ProverGroup prover_group = 29; - optional config.prover.ProverGateway prover_gateway = 30; - optional config.snapshot_creator.SnapshotsCreator snapshot_creator = 31; - optional config.observability.Observability observability = 32; - optional config.vm_runner.ProtectiveReadsWriter protective_reads_writer = 33; - optional config.object_store.ObjectStore core_object_store = 34; + optional database.Postgres postgres = 1; + optional api.Api api = 2; + optional contract_verifier.ContractVerifier contract_verifier = 3; + optional circuit_breaker.CircuitBreaker circuit_breaker = 5; + optional chain.Mempool mempool = 6; + optional chain.OperationsManager operations_manager = 8; + optional chain.StateKeeper state_keeper = 9; + optional house_keeper.HouseKeeper house_keeper = 10; + optional prover.Prover prover = 12; + optional utils.Prometheus prometheus = 15; + optional database.DB db = 20; + optional eth.ETH eth = 22; + optional prover.WitnessGenerator witness_generator = 24; + optional prover.WitnessVectorGenerator witness_vector_generator = 25; + optional prover.ProofCompressor proof_compressor = 27; + optional prover.ProofDataHandler data_handler = 28; + optional prover.ProverGroup prover_group = 29; + optional prover.ProverGateway prover_gateway = 30; + optional snapshot_creator.SnapshotsCreator snapshot_creator = 31; + optional observability.Observability observability = 32; + optional vm_runner.ProtectiveReadsWriter protective_reads_writer = 33; + optional object_store.ObjectStore core_object_store = 34; + optional snapshot_recovery.SnapshotRecovery snapshot_recovery = 35; + optional pruning.Pruning pruning = 36; + optional commitment_generator.CommitmentGenerator commitment_generator = 37; + optional da_dispatcher.DataAvailabilityDispatcher da_dispatcher = 38; + optional base_token_adjuster.BaseTokenAdjuster base_token_adjuster = 39; + optional vm_runner.BasicWitnessInputProducer basic_witness_input_producer = 40; + optional external_price_api_client.ExternalPriceApiClient external_price_api_client = 41; + optional core.consensus.Config consensus = 42; } diff --git a/core/lib/protobuf_config/src/proto/config/prover.proto b/core/lib/protobuf_config/src/proto/config/prover.proto index 1eaf8637522a..80d45f40bbcb 100644 --- a/core/lib/protobuf_config/src/proto/config/prover.proto +++ b/core/lib/protobuf_config/src/proto/config/prover.proto @@ -21,6 +21,11 @@ enum SetupLoadMode { FROM_MEMORY = 1; } +enum CloudType { + GCP = 0; + LOCAL = 1; +} + message Prover { optional string setup_data_path = 1; // required; fs path? optional uint32 prometheus_port = 2; // required; u16 @@ -35,6 +40,7 @@ message Prover { optional bool shall_save_to_public_bucket = 13; // required optional config.object_store.ObjectStore public_object_store = 22; optional config.object_store.ObjectStore prover_object_store = 23; + optional CloudType cloud_type = 24; // optional reserved 5, 6, 9; reserved "base_layer_circuit_ids_to_be_verified", "recursive_layer_circuit_ids_to_be_verified", "witness_vector_generator_thread_count"; } @@ -81,6 +87,7 @@ message WitnessGenerator { optional uint32 node_generation_timeout_in_secs = 10; // optional; optional uint32 scheduler_generation_timeout_in_secs = 11; // optional; optional uint32 recursion_tip_timeout_in_secs = 12; // optional; + optional uint32 prometheus_listener_port = 13; // optional; reserved 3, 4, 6; reserved "dump_arguments_for_blocks", "force_process_block", "blocks_proving_percentage"; } diff --git a/core/lib/protobuf_config/src/proto/config/pruning.proto b/core/lib/protobuf_config/src/proto/config/pruning.proto new file mode 100644 index 000000000000..351f353bf060 --- /dev/null +++ b/core/lib/protobuf_config/src/proto/config/pruning.proto @@ -0,0 +1,10 @@ +syntax = "proto3"; + +package zksync.config.pruning; + +message Pruning { + optional bool enabled = 1; + optional uint32 chunk_size = 2; + optional uint64 removal_delay_sec = 3; + optional uint64 data_retention_sec = 4; +} diff --git a/core/lib/protobuf_config/src/proto/config/secrets.proto b/core/lib/protobuf_config/src/proto/config/secrets.proto index fb328883f99d..b711d81d5754 100644 --- a/core/lib/protobuf_config/src/proto/config/secrets.proto +++ b/core/lib/protobuf_config/src/proto/config/secrets.proto @@ -16,6 +16,7 @@ message L1Secrets { message ConsensusSecrets { optional string validator_key = 1; // required for validator nodes; ValidatorSecretKey optional string node_key = 2; // required for any node; NodeSecretKey + optional string attester_key = 3; // required for attester nodes; AttesterSecretKey } message Secrets { diff --git a/core/lib/protobuf_config/src/proto/config/snapshot_recovery.proto b/core/lib/protobuf_config/src/proto/config/snapshot_recovery.proto new file mode 100644 index 000000000000..9eceda12ad86 --- /dev/null +++ b/core/lib/protobuf_config/src/proto/config/snapshot_recovery.proto @@ -0,0 +1,22 @@ +syntax = "proto3"; +import "zksync/config/object_store.proto"; +import "zksync/config/experimental.proto"; + +package zksync.config.snapshot_recovery; + +message Tree { + optional uint64 chunk_size = 1; +} + +message Postgres { + optional uint64 max_concurrency = 1; +} + +message SnapshotRecovery { + optional bool enabled = 1; + optional Postgres postgres = 2; + optional Tree tree = 3; + optional uint32 l1_batch = 4; + optional config.object_store.ObjectStore object_store = 5; + optional experimental.SnapshotRecovery experimental = 6; +} diff --git a/core/lib/protobuf_config/src/proto/config/snapshots_creator.proto b/core/lib/protobuf_config/src/proto/config/snapshots_creator.proto index 7aaa39a57f62..3846d86d6291 100644 --- a/core/lib/protobuf_config/src/proto/config/snapshots_creator.proto +++ b/core/lib/protobuf_config/src/proto/config/snapshots_creator.proto @@ -7,4 +7,6 @@ message SnapshotsCreator { optional uint64 storage_logs_chunk_size = 1; // optional optional uint32 concurrent_queries_count = 2; // optional optional config.object_store.ObjectStore object_store = 3; + optional uint32 version = 4; // optional; defaults to 0 + optional uint32 l1_batch_number = 5; // optional } diff --git a/core/lib/protobuf_config/src/proto/config/vm_runner.proto b/core/lib/protobuf_config/src/proto/config/vm_runner.proto index c0c82d4d415f..93521a5fd893 100644 --- a/core/lib/protobuf_config/src/proto/config/vm_runner.proto +++ b/core/lib/protobuf_config/src/proto/config/vm_runner.proto @@ -7,3 +7,9 @@ message ProtectiveReadsWriter { optional uint64 window_size = 2; // required optional uint64 first_processed_batch = 3; // required } + +message BasicWitnessInputProducer { + optional string db_path = 1; // required; fs path + optional uint64 window_size = 2; // required + optional uint64 first_processed_batch = 3; // required +} diff --git a/core/lib/protobuf_config/src/proto/core/consensus.proto b/core/lib/protobuf_config/src/proto/core/consensus.proto index aa23ad9192f9..c64c993be7c8 100644 --- a/core/lib/protobuf_config/src/proto/core/consensus.proto +++ b/core/lib/protobuf_config/src/proto/core/consensus.proto @@ -29,6 +29,8 @@ syntax = "proto3"; package zksync.core.consensus; +import "zksync/std.proto"; + // (public key, ip address) of a gossip network node. message NodeAddr { optional string key = 1; // required; NodePublicKey @@ -41,12 +43,24 @@ message WeightedValidator { optional uint64 weight = 2; // required } +// Weighted member of an attester committee. +message WeightedAttester { + optional string key = 1; // required; AttesterPublic + optional uint64 weight = 2; // required +} + // Consensus genesis specification. message GenesisSpec { optional uint64 chain_id = 1; // required; L2ChainId, should be the same as `l2_chain_id` in the `zksync.config.genesis.Genesis`. optional uint32 protocol_version = 2; // required; validator::ProtocolVersion repeated WeightedValidator validators = 3; // must be non-empty; validator committee. optional string leader = 4; // required; ValidatorPublicKey + repeated WeightedAttester attesters = 5; // can be empty; attester committee. +} + +// Per peer connection RPC rate limits. +message RpcConfig { + optional std.RateLimit get_block_rate = 1; // optional; defaults to 10 blocks/s. } message Config { @@ -64,6 +78,9 @@ message Config { // Maximal allowed size of the payload. optional uint64 max_payload_size = 4; // required; bytes + // Maximal allowed size of the sync batches. + optional uint64 max_batch_size = 10; // required; bytes + // Inbound connections that should be unconditionally accepted on the gossip network. repeated string gossip_static_inbound = 5; // required; NodePublicKey @@ -79,5 +96,9 @@ message Config { // Used to (re)initialize genesis if needed. // External nodes fetch the genesis from the main node. optional GenesisSpec genesis_spec = 8; + + // RPC rate limits configuration. + // If missing, defaults are used. + optional RpcConfig rpc_config = 9; // optional } diff --git a/core/lib/protobuf_config/src/prover.rs b/core/lib/protobuf_config/src/prover.rs index 9a41e433433c..e1c31ee1fccd 100644 --- a/core/lib/protobuf_config/src/prover.rs +++ b/core/lib/protobuf_config/src/prover.rs @@ -193,6 +193,11 @@ impl ProtoRepr for proto::WitnessGenerator { .map(|x| x.try_into()) .transpose() .context("scheduler_generation_timeout_in_secs")?, + prometheus_listener_port: self + .prometheus_listener_port + .map(|x| x.try_into()) + .transpose() + .context("prometheus_listener_port")?, }) } @@ -213,6 +218,7 @@ impl ProtoRepr for proto::WitnessGenerator { scheduler_generation_timeout_in_secs: this .scheduler_generation_timeout_in_secs .map(|x| x.into()), + prometheus_listener_port: this.prometheus_listener_port.map(|x| x.into()), } } } @@ -286,6 +292,24 @@ impl proto::SetupLoadMode { } } +impl proto::CloudType { + fn new(x: &configs::fri_prover::CloudType) -> Self { + use configs::fri_prover::CloudType as From; + match x { + From::GCP => Self::Gcp, + From::Local => Self::Local, + } + } + + fn parse(&self) -> configs::fri_prover::CloudType { + use configs::fri_prover::CloudType as To; + match self { + Self::Gcp => To::GCP, + Self::Local => To::Local, + } + } +} + impl ProtoRepr for proto::Prover { type Type = configs::FriProverConfig; fn read(&self) -> anyhow::Result { @@ -332,6 +356,13 @@ impl ProtoRepr for proto::Prover { .context("shall_save_to_public_bucket")?, public_object_store, prover_object_store, + cloud_type: self + .cloud_type + .map(proto::CloudType::try_from) + .transpose() + .context("cloud_type")? + .map(|x| x.parse()) + .unwrap_or_default(), }) } @@ -350,6 +381,7 @@ impl ProtoRepr for proto::Prover { shall_save_to_public_bucket: Some(this.shall_save_to_public_bucket), prover_object_store: this.prover_object_store.as_ref().map(ProtoRepr::build), public_object_store: this.public_object_store.as_ref().map(ProtoRepr::build), + cloud_type: Some(proto::CloudType::new(&this.cloud_type).into()), } } } diff --git a/core/lib/protobuf_config/src/pruning.rs b/core/lib/protobuf_config/src/pruning.rs new file mode 100644 index 000000000000..ed0ebb10b92f --- /dev/null +++ b/core/lib/protobuf_config/src/pruning.rs @@ -0,0 +1,28 @@ +use std::num::NonZeroU64; + +use zksync_config::configs::PruningConfig; +use zksync_protobuf::ProtoRepr; + +use crate::proto::pruning as proto; + +impl ProtoRepr for proto::Pruning { + type Type = PruningConfig; + + fn read(&self) -> anyhow::Result { + Ok(Self::Type { + enabled: self.enabled.unwrap_or_default(), + chunk_size: self.chunk_size, + removal_delay_sec: self.removal_delay_sec.and_then(NonZeroU64::new), + data_retention_sec: self.data_retention_sec, + }) + } + + fn build(this: &Self::Type) -> Self { + Self { + enabled: Some(this.enabled), + chunk_size: this.chunk_size, + removal_delay_sec: this.removal_delay_sec.map(|a| a.get()), + data_retention_sec: this.data_retention_sec, + } + } +} diff --git a/core/lib/protobuf_config/src/secrets.rs b/core/lib/protobuf_config/src/secrets.rs index 91a05b31f196..43f537a5fbfa 100644 --- a/core/lib/protobuf_config/src/secrets.rs +++ b/core/lib/protobuf_config/src/secrets.rs @@ -4,7 +4,7 @@ use anyhow::Context; use secrecy::ExposeSecret; use zksync_basic_types::url::SensitiveUrl; use zksync_config::configs::{ - consensus::{ConsensusSecrets, NodeSecretKey, ValidatorSecretKey}, + consensus::{AttesterSecretKey, ConsensusSecrets, NodeSecretKey, ValidatorSecretKey}, secrets::Secrets, DatabaseSecrets, L1Secrets, }; @@ -98,6 +98,10 @@ impl ProtoRepr for proto::ConsensusSecrets { .validator_key .as_ref() .map(|x| ValidatorSecretKey(x.clone().into())), + attester_key: self + .attester_key + .as_ref() + .map(|x| AttesterSecretKey(x.clone().into())), node_key: self .node_key .as_ref() @@ -111,6 +115,10 @@ impl ProtoRepr for proto::ConsensusSecrets { .validator_key .as_ref() .map(|x| x.0.expose_secret().clone()), + attester_key: this + .attester_key + .as_ref() + .map(|x| x.0.expose_secret().clone()), node_key: this.node_key.as_ref().map(|x| x.0.expose_secret().clone()), } } diff --git a/core/lib/protobuf_config/src/snapshot_recovery.rs b/core/lib/protobuf_config/src/snapshot_recovery.rs new file mode 100644 index 000000000000..0c195abffe7a --- /dev/null +++ b/core/lib/protobuf_config/src/snapshot_recovery.rs @@ -0,0 +1,102 @@ +use std::num::NonZeroUsize; + +use anyhow::Context; +use zksync_basic_types::L1BatchNumber; +use zksync_config::configs::{ + snapshot_recovery::{PostgresRecoveryConfig, TreeRecoveryConfig}, + SnapshotRecoveryConfig, +}; +use zksync_protobuf::ProtoRepr; + +use crate::{proto::snapshot_recovery as proto, read_optional_repr}; + +impl ProtoRepr for proto::Postgres { + type Type = PostgresRecoveryConfig; + + fn read(&self) -> anyhow::Result { + Ok(Self::Type { + max_concurrency: self + .max_concurrency + .and_then(|a| NonZeroUsize::new(a as usize)), + }) + } + + fn build(this: &Self::Type) -> Self { + Self { + max_concurrency: this.max_concurrency.map(|a| a.get() as u64), + } + } +} + +impl ProtoRepr for proto::SnapshotRecovery { + type Type = SnapshotRecoveryConfig; + + fn read(&self) -> anyhow::Result { + let tree = self + .tree + .as_ref() + .map(|tree| { + let chunk_size = tree.chunk_size; + let parallel_persistence_buffer = self + .experimental + .as_ref() + .and_then(|a| { + a.tree_recovery_parallel_persistence_buffer + .map(|a| NonZeroUsize::new(a as usize)) + }) + .flatten(); + TreeRecoveryConfig { + chunk_size, + parallel_persistence_buffer, + } + }) + .unwrap_or_default(); + + Ok(Self::Type { + enabled: self.enabled.unwrap_or_default(), + tree, + postgres: read_optional_repr(&self.postgres) + .context("postgres")? + .unwrap_or_default(), + l1_batch: self.l1_batch.map(L1BatchNumber), + object_store: read_optional_repr(&self.object_store).context("object store")?, + drop_storage_key_preimages: self + .experimental + .as_ref() + .and_then(|experimental| experimental.drop_storage_key_preimages) + .unwrap_or_default(), + }) + } + + fn build(this: &Self::Type) -> Self { + let (tree, experimental) = if this.tree == TreeRecoveryConfig::default() { + (None, None) + } else { + ( + Some(proto::Tree { + chunk_size: this.tree.chunk_size, + }), + Some(crate::proto::experimental::SnapshotRecovery { + tree_recovery_parallel_persistence_buffer: this + .tree + .parallel_persistence_buffer + .map(|a| a.get() as u64), + drop_storage_key_preimages: Some(this.drop_storage_key_preimages), + }), + ) + }; + let postgres = if this.postgres == PostgresRecoveryConfig::default() { + None + } else { + Some(this.postgres.clone()) + }; + Self { + enabled: Some(this.enabled), + postgres: postgres.as_ref().map(ProtoRepr::build), + tree, + experimental, + l1_batch: this.l1_batch.map(|a| a.0), + object_store: this.object_store.as_ref().map(ProtoRepr::build), + } + } +} diff --git a/core/lib/protobuf_config/src/snapshots_creator.rs b/core/lib/protobuf_config/src/snapshots_creator.rs index b13d11915b10..d21fb2c321fd 100644 --- a/core/lib/protobuf_config/src/snapshots_creator.rs +++ b/core/lib/protobuf_config/src/snapshots_creator.rs @@ -1,4 +1,5 @@ use anyhow::Context as _; +use zksync_basic_types::L1BatchNumber; use zksync_config::configs; use zksync_protobuf::{repr::ProtoRepr, required}; @@ -13,6 +14,12 @@ impl ProtoRepr for proto::SnapshotsCreator { None }; Ok(Self::Type { + version: self + .version + .unwrap_or_default() + .try_into() + .context("version")?, + l1_batch_number: self.l1_batch_number.map(L1BatchNumber), storage_logs_chunk_size: *required(&self.storage_logs_chunk_size) .context("storage_logs_chunk_size")?, concurrent_queries_count: *required(&self.concurrent_queries_count) @@ -23,6 +30,8 @@ impl ProtoRepr for proto::SnapshotsCreator { fn build(this: &Self::Type) -> Self { Self { + version: Some(this.version.into()), + l1_batch_number: this.l1_batch_number.map(|num| num.0), storage_logs_chunk_size: Some(this.storage_logs_chunk_size), concurrent_queries_count: Some(this.concurrent_queries_count), object_store: this.object_store.as_ref().map(ProtoRepr::build), diff --git a/core/lib/protobuf_config/src/testonly.rs b/core/lib/protobuf_config/src/testonly.rs deleted file mode 100644 index 8b137891791f..000000000000 --- a/core/lib/protobuf_config/src/testonly.rs +++ /dev/null @@ -1 +0,0 @@ - diff --git a/core/lib/protobuf_config/src/tests.rs b/core/lib/protobuf_config/src/tests.rs index fad37700ae5f..695f404f64d1 100644 --- a/core/lib/protobuf_config/src/tests.rs +++ b/core/lib/protobuf_config/src/tests.rs @@ -1,12 +1,8 @@ use std::{path::PathBuf, str::FromStr}; -use anyhow::Context; -use zksync_protobuf::{ - testonly::{test_encode_all_formats, ReprConv}, - ProtoRepr, -}; +use zksync_protobuf::testonly::{test_encode_all_formats, ReprConv}; -use crate::proto; +use crate::{decode_yaml_repr, proto}; /// Tests config <-> proto (boilerplate) conversions. #[test] @@ -20,6 +16,7 @@ fn test_encoding() { test_encode_all_formats::>(rng); test_encode_all_formats::>(rng); test_encode_all_formats::>(rng); + test_encode_all_formats::>(rng); test_encode_all_formats::>(rng); test_encode_all_formats::>(rng); test_encode_all_formats::>(rng); @@ -42,16 +39,21 @@ fn test_encoding() { test_encode_all_formats::>(rng); test_encode_all_formats::>(rng); test_encode_all_formats::>(rng); -} - -pub fn decode_yaml_repr( - path: &PathBuf, - deny_unknown_fields: bool, -) -> anyhow::Result { - let yaml = std::fs::read_to_string(path).with_context(|| path.display().to_string())?; - let d = serde_yaml::Deserializer::from_str(&yaml); - let this: T = zksync_protobuf::serde::deserialize_proto_with_options(d, deny_unknown_fields)?; - this.read() + test_encode_all_formats::>(rng); + test_encode_all_formats::>(rng); + test_encode_all_formats::>(rng); + test_encode_all_formats::>(rng); + test_encode_all_formats::>(rng); + test_encode_all_formats::>(rng); + test_encode_all_formats::>(rng); + test_encode_all_formats::>(rng); + test_encode_all_formats::>(rng); + test_encode_all_formats::>(rng); + test_encode_all_formats::>(rng); + test_encode_all_formats::>( + rng, + ); + test_encode_all_formats::>(rng); } #[test] @@ -65,4 +67,6 @@ fn verify_file_parsing() { decode_yaml_repr::(&base_path.join("contracts.yaml"), true) .unwrap(); decode_yaml_repr::(&base_path.join("secrets.yaml"), true).unwrap(); + decode_yaml_repr::(&base_path.join("external_node.yaml"), true) + .unwrap(); } diff --git a/core/lib/protobuf_config/src/utils.rs b/core/lib/protobuf_config/src/utils.rs index e528e156248b..0fd3ac20effa 100644 --- a/core/lib/protobuf_config/src/utils.rs +++ b/core/lib/protobuf_config/src/utils.rs @@ -11,9 +11,7 @@ impl ProtoRepr for proto::Prometheus { listener_port: required(&self.listener_port) .and_then(|p| Ok((*p).try_into()?)) .context("listener_port")?, - pushgateway_url: required(&self.pushgateway_url) - .context("pushgateway_url")? - .clone(), + pushgateway_url: self.pushgateway_url.clone(), push_interval_ms: self.push_interval_ms, }) } @@ -21,7 +19,7 @@ impl ProtoRepr for proto::Prometheus { fn build(this: &Self::Type) -> Self { Self { listener_port: Some(this.listener_port.into()), - pushgateway_url: Some(this.pushgateway_url.clone()), + pushgateway_url: this.pushgateway_url.clone(), push_interval_ms: this.push_interval_ms, } } diff --git a/core/lib/protobuf_config/src/vm_runner.rs b/core/lib/protobuf_config/src/vm_runner.rs index 78bfee750521..cc0d53ad519e 100644 --- a/core/lib/protobuf_config/src/vm_runner.rs +++ b/core/lib/protobuf_config/src/vm_runner.rs @@ -26,3 +26,25 @@ impl ProtoRepr for proto::ProtectiveReadsWriter { } } } + +impl ProtoRepr for proto::BasicWitnessInputProducer { + type Type = configs::BasicWitnessInputProducerConfig; + + fn read(&self) -> anyhow::Result { + Ok(Self::Type { + db_path: required(&self.db_path).context("db_path")?.clone(), + window_size: *required(&self.window_size).context("window_size")? as u32, + first_processed_batch: L1BatchNumber( + *required(&self.first_processed_batch).context("first_batch")? as u32, + ), + }) + } + + fn build(this: &Self::Type) -> Self { + Self { + db_path: Some(this.db_path.clone()), + window_size: Some(this.window_size as u64), + first_processed_batch: Some(this.first_processed_batch.0 as u64), + } + } +} diff --git a/core/lib/protobuf_config/src/wallets.rs b/core/lib/protobuf_config/src/wallets.rs index 1c3b7413de60..31fa63fd2702 100644 --- a/core/lib/protobuf_config/src/wallets.rs +++ b/core/lib/protobuf_config/src/wallets.rs @@ -66,12 +66,20 @@ impl ProtoRepr for proto::Wallets { .as_ref() .map(|blob| proto::PrivateKeyWallet { address: Some(format!("{:?}", blob.address())), - private_key: Some(format!("{:?}", blob.private_key())), + private_key: Some(hex::encode( + blob.private_key().expose_secret().secret_bytes(), + )), }); ( Some(proto::PrivateKeyWallet { address: Some(format!("{:?}", eth_sender.operator.address())), - private_key: Some(format!("{:?}", eth_sender.operator.private_key())), + private_key: Some(hex::encode( + eth_sender + .operator + .private_key() + .expose_secret() + .secret_bytes(), + )), }), blob, ) diff --git a/core/lib/prover_interface/Cargo.toml b/core/lib/prover_interface/Cargo.toml index 869338a8830d..89e402b27759 100644 --- a/core/lib/prover_interface/Cargo.toml +++ b/core/lib/prover_interface/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "zksync_prover_interface" -version = "0.1.0" +description = "Interfaces for interaction with ZKsync prover subsystem" +version.workspace = true edition.workspace = true authors.workspace = true homepage.workspace = true @@ -10,8 +11,10 @@ keywords.workspace = true categories.workspace = true [dependencies] +zksync_multivm.workspace = true zksync_object_store.workspace = true zksync_types.workspace = true +zksync_state.workspace = true # We can use the newest api to send proofs to L1. circuit_sequencer_api_1_5_0.workspace = true diff --git a/core/lib/prover_interface/src/api.rs b/core/lib/prover_interface/src/api.rs index fb96c62d38c7..00ac85a40739 100644 --- a/core/lib/prover_interface/src/api.rs +++ b/core/lib/prover_interface/src/api.rs @@ -3,13 +3,12 @@ use serde::{Deserialize, Serialize}; use zksync_types::{ - basic_fri_types::Eip4844Blobs, protocol_version::{L1VerifierConfig, ProtocolSemanticVersion}, L1BatchNumber, }; use crate::{ - inputs::PrepareBasicCircuitsJob, + inputs::{TeeVerifierInput, WitnessInputData}, outputs::{L1BatchProofForL1, L1BatchTeeProofForL1}, }; @@ -18,28 +17,35 @@ use crate::{ #[derive(Debug, Serialize, Deserialize)] pub struct ProofGenerationData { pub l1_batch_number: L1BatchNumber, - pub data: PrepareBasicCircuitsJob, + pub witness_input_data: WitnessInputData, pub protocol_version: ProtocolSemanticVersion, pub l1_verifier_config: L1VerifierConfig, - pub eip_4844_blobs: Eip4844Blobs, } #[derive(Debug, Serialize, Deserialize)] -pub enum GenericProofGenerationDataResponse { - Success(Option>), +pub enum ProofGenerationDataResponse { + Success(Option>), Error(String), } -pub type ProofGenerationDataResponse = GenericProofGenerationDataResponse; +#[derive(Debug, Serialize, Deserialize)] +pub struct TeeProofGenerationDataResponse(pub Option>); #[derive(Debug, Serialize, Deserialize)] -pub enum SimpleResponse { +pub enum SubmitProofResponse { Success, Error(String), } -pub type SubmitProofResponse = SimpleResponse; -pub type RegisterTeeAttestationResponse = SimpleResponse; +#[derive(Debug, Serialize, Deserialize)] +pub enum SubmitTeeProofResponse { + Success, +} + +#[derive(Debug, Serialize, Deserialize)] +pub enum RegisterTeeAttestationResponse { + Success, +} // Structs to hold data necessary for making HTTP requests diff --git a/core/lib/prover_interface/src/inputs.rs b/core/lib/prover_interface/src/inputs.rs index e4c0a0d3846b..8f2403d3369a 100644 --- a/core/lib/prover_interface/src/inputs.rs +++ b/core/lib/prover_interface/src/inputs.rs @@ -1,9 +1,14 @@ -use std::{convert::TryInto, fmt::Debug}; +use std::{collections::HashMap, convert::TryInto, fmt::Debug}; use serde::{Deserialize, Serialize}; use serde_with::{serde_as, Bytes}; +use zksync_multivm::interface::{L1BatchEnv, SystemEnv}; use zksync_object_store::{serialize_using_bincode, Bucket, StoredObject}; -use zksync_types::{L1BatchNumber, H256, U256}; +pub use zksync_state::WitnessStorage; +use zksync_types::{ + basic_fri_types::Eip4844Blobs, block::L2BlockExecutionData, + witness_block_state::WitnessStorageState, L1BatchNumber, ProtocolVersionId, H256, U256, +}; const HASH_LEN: usize = H256::len_bytes(); @@ -59,13 +64,13 @@ impl StorageLogMetadata { /// Merkle paths; if this is the case, the starting hashes are skipped and are the same /// as in the first path. #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub struct PrepareBasicCircuitsJob { +pub struct WitnessInputMerklePaths { // Merkle paths and some auxiliary information for each read / write operation in a block. merkle_paths: Vec, next_enumeration_index: u64, } -impl StoredObject for PrepareBasicCircuitsJob { +impl StoredObject for WitnessInputMerklePaths { const BUCKET: Bucket = Bucket::WitnessInput; type Key<'a> = L1BatchNumber; @@ -76,7 +81,7 @@ impl StoredObject for PrepareBasicCircuitsJob { serialize_using_bincode!(); } -impl PrepareBasicCircuitsJob { +impl WitnessInputMerklePaths { /// Creates a new job with the specified leaf index and no included paths. pub fn new(next_enumeration_index: u64) -> Self { Self { @@ -132,16 +137,111 @@ impl PrepareBasicCircuitsJob { } } -/// Enriched `PrepareBasicCircuitsJob`. All the other fields are taken from the `l1_batches` table. -#[derive(Debug, Clone)] -pub struct BasicCircuitWitnessGeneratorInput { - pub block_number: L1BatchNumber, - pub previous_block_hash: H256, - pub previous_block_timestamp: u64, - pub block_timestamp: u64, - pub used_bytecodes_hashes: Vec, +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct VMRunWitnessInputData { + pub l1_batch_number: L1BatchNumber, + pub used_bytecodes: HashMap>, pub initial_heap_content: Vec<(usize, U256)>, - pub merkle_paths_input: PrepareBasicCircuitsJob, + pub protocol_version: ProtocolVersionId, + pub bootloader_code: Vec<[u8; 32]>, + pub default_account_code_hash: U256, + pub storage_refunds: Vec, + pub pubdata_costs: Vec, + pub witness_block_state: WitnessStorageState, +} + +impl StoredObject for VMRunWitnessInputData { + const BUCKET: Bucket = Bucket::WitnessInput; + + type Key<'a> = L1BatchNumber; + + fn encode_key(key: Self::Key<'_>) -> String { + format!("vm_run_data_{key}.bin") + } + + serialize_using_bincode!(); +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct WitnessInputData { + pub vm_run_data: VMRunWitnessInputData, + pub merkle_paths: WitnessInputMerklePaths, + pub previous_batch_metadata: L1BatchMetadataHashes, + pub eip_4844_blobs: Eip4844Blobs, +} + +impl StoredObject for WitnessInputData { + const BUCKET: Bucket = Bucket::WitnessInput; + + type Key<'a> = L1BatchNumber; + + fn encode_key(key: Self::Key<'_>) -> String { + format!("witness_inputs_{key}.bin") + } + + serialize_using_bincode!(); +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct L1BatchMetadataHashes { + pub root_hash: H256, + pub meta_hash: H256, + pub aux_hash: H256, +} + +/// Version 1 of the data used as input for the TEE verifier. +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +pub struct V1TeeVerifierInput { + pub witness_input_merkle_paths: WitnessInputMerklePaths, + pub l2_blocks_execution_data: Vec, + pub l1_batch_env: L1BatchEnv, + pub system_env: SystemEnv, + pub used_contracts: Vec<(H256, Vec)>, +} + +impl V1TeeVerifierInput { + pub fn new( + witness_input_merkle_paths: WitnessInputMerklePaths, + l2_blocks_execution_data: Vec, + l1_batch_env: L1BatchEnv, + system_env: SystemEnv, + used_contracts: Vec<(H256, Vec)>, + ) -> Self { + V1TeeVerifierInput { + witness_input_merkle_paths, + l2_blocks_execution_data, + l1_batch_env, + system_env, + used_contracts, + } + } +} + +/// Data used as input for the TEE verifier. +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +#[non_exhaustive] +#[allow(clippy::large_enum_variant)] +pub enum TeeVerifierInput { + /// `V0` suppresses warning about irrefutable `let...else` pattern + V0, + V1(V1TeeVerifierInput), +} + +impl TeeVerifierInput { + pub fn new(input: V1TeeVerifierInput) -> Self { + TeeVerifierInput::V1(input) + } +} + +impl StoredObject for TeeVerifierInput { + const BUCKET: Bucket = Bucket::TeeVerifierInput; + type Key<'a> = L1BatchNumber; + + fn encode_key(key: Self::Key<'_>) -> String { + format!("tee_verifier_input_for_l1_batch_{key}.bin") + } + + serialize_using_bincode!(); } #[cfg(test)] @@ -167,7 +267,7 @@ mod tests { }); let logs: Vec<_> = logs.collect(); - let mut job = PrepareBasicCircuitsJob::new(4); + let mut job = WitnessInputMerklePaths::new(4); job.reserve(logs.len()); for log in &logs { job.push_merkle_path(log.clone()); diff --git a/core/lib/prover_interface/src/outputs.rs b/core/lib/prover_interface/src/outputs.rs index a4035a21ec2b..9672bfb2142b 100644 --- a/core/lib/prover_interface/src/outputs.rs +++ b/core/lib/prover_interface/src/outputs.rs @@ -3,7 +3,7 @@ use core::fmt; use circuit_sequencer_api_1_5_0::proof::FinalProof; use serde::{Deserialize, Serialize}; use zksync_object_store::{serialize_using_bincode, Bucket, StoredObject}; -use zksync_types::{protocol_version::ProtocolSemanticVersion, L1BatchNumber}; +use zksync_types::{protocol_version::ProtocolSemanticVersion, tee_types::TeeType, L1BatchNumber}; /// A "final" ZK proof that can be sent to the L1 contract. #[derive(Clone, Serialize, Deserialize)] @@ -23,6 +23,8 @@ pub struct L1BatchTeeProofForL1 { pub pubkey: Vec, // data that was signed pub proof: Vec, + // type of TEE used for attestation + pub tee_type: TeeType, } impl fmt::Debug for L1BatchProofForL1 { diff --git a/core/lib/prover_interface/tests/job_serialization.rs b/core/lib/prover_interface/tests/job_serialization.rs index 60a80f91ed8d..a2d55a140655 100644 --- a/core/lib/prover_interface/tests/job_serialization.rs +++ b/core/lib/prover_interface/tests/job_serialization.rs @@ -5,10 +5,12 @@ use tokio::fs; use zksync_object_store::{Bucket, MockObjectStore}; use zksync_prover_interface::{ api::{SubmitProofRequest, SubmitTeeProofRequest}, - inputs::{PrepareBasicCircuitsJob, StorageLogMetadata}, + inputs::{StorageLogMetadata, WitnessInputMerklePaths}, outputs::{L1BatchProofForL1, L1BatchTeeProofForL1}, }; -use zksync_types::{protocol_version::ProtocolSemanticVersion, L1BatchNumber, ProtocolVersionId}; +use zksync_types::{ + protocol_version::ProtocolSemanticVersion, tee_types::TeeType, L1BatchNumber, ProtocolVersionId, +}; /// Tests compatibility of the `PrepareBasicCircuitsJob` serialization to the previously used /// one. @@ -29,7 +31,7 @@ async fn prepare_basic_circuits_job_serialization() { .await .unwrap(); - let job: PrepareBasicCircuitsJob = store.get(L1BatchNumber(1)).await.unwrap(); + let job: WitnessInputMerklePaths = store.get(L1BatchNumber(1)).await.unwrap(); let key = store.put(L1BatchNumber(2), &job).await.unwrap(); let serialized_job = store.get_raw(Bucket::WitnessInput, &key).await.unwrap(); @@ -60,7 +62,7 @@ async fn prepare_basic_circuits_job_compatibility() { let serialized = bincode::serialize(&job_tuple).unwrap(); assert_eq!(serialized, snapshot); - let job: PrepareBasicCircuitsJob = bincode::deserialize(&snapshot).unwrap(); + let job: WitnessInputMerklePaths = bincode::deserialize(&snapshot).unwrap(); assert_eq!(job.next_enumeration_index(), job_tuple.1); let job_merkle_paths: Vec<_> = job.into_merkle_paths().collect(); assert_eq!(job_merkle_paths, job_tuple.0); @@ -167,13 +169,15 @@ fn test_tee_proof_request_serialization() { let tee_proof_str = r#"{ "signature": [ 0, 1, 2, 3, 4 ], "pubkey": [ 5, 6, 7, 8, 9 ], - "proof": [ 10, 11, 12, 13, 14 ] + "proof": [ 10, 11, 12, 13, 14 ], + "tee_type": "Sgx" }"#; let tee_proof_result = serde_json::from_str::(tee_proof_str).unwrap(); let tee_proof_expected = SubmitTeeProofRequest(Box::new(L1BatchTeeProofForL1 { signature: vec![0, 1, 2, 3, 4], pubkey: vec![5, 6, 7, 8, 9], proof: vec![10, 11, 12, 13, 14], + tee_type: TeeType::Sgx, })); assert_eq!(tee_proof_result, tee_proof_expected); } diff --git a/core/lib/queued_job_processor/Cargo.toml b/core/lib/queued_job_processor/Cargo.toml index 68817cb6e4ce..f71251541299 100644 --- a/core/lib/queued_job_processor/Cargo.toml +++ b/core/lib/queued_job_processor/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "zksync_queued_job_processor" -version = "0.1.0" +description = "Abstract queued job processor" +version.workspace = true edition.workspace = true authors.workspace = true homepage.workspace = true diff --git a/core/lib/queued_job_processor/src/lib.rs b/core/lib/queued_job_processor/src/lib.rs index 569a2b7f59da..a5a4fa39fcae 100644 --- a/core/lib/queued_job_processor/src/lib.rs +++ b/core/lib/queued_job_processor/src/lib.rs @@ -5,7 +5,7 @@ use std::{ use anyhow::Context as _; pub use async_trait::async_trait; -use tokio::{sync::watch, task::JoinHandle, time::sleep}; +use tokio::{sync::watch, task::JoinHandle}; use vise::{Buckets, Counter, Histogram, LabeledFamily, Metrics}; use zksync_utils::panic_extractor::try_extract_panic_message; @@ -57,7 +57,7 @@ pub trait JobProcessor: Sync + Send { /// To process a batch, pass `Some(batch_size)`. async fn run( self, - stop_receiver: watch::Receiver, + mut stop_receiver: watch::Receiver, mut iterations_left: Option, ) -> anyhow::Result<()> where @@ -86,7 +86,7 @@ pub trait JobProcessor: Sync + Send { ); let task = self.process_job(&job_id, job, started_at).await; - self.wait_for_task(job_id, started_at, task) + self.wait_for_task(job_id, started_at, task, &mut stop_receiver) .await .context("wait_for_task")?; } else if iterations_left.is_some() { @@ -94,7 +94,10 @@ pub trait JobProcessor: Sync + Send { return Ok(()); } else { tracing::trace!("Backing off for {} ms", backoff); - sleep(Duration::from_millis(backoff)).await; + // Error here corresponds to a timeout w/o `stop_receiver` changed; we're OK with this. + tokio::time::timeout(Duration::from_millis(backoff), stop_receiver.changed()) + .await + .ok(); backoff = (backoff * Self::BACKOFF_MULTIPLIER).min(Self::MAX_BACKOFF_MS); } } @@ -108,6 +111,7 @@ pub trait JobProcessor: Sync + Send { job_id: Self::JobId, started_at: Instant, task: JoinHandle>, + stop_receiver: &mut watch::Receiver, ) -> anyhow::Result<()> { let attempts = self.get_job_attempts(&job_id).await?; let max_attempts = self.max_attempts(); @@ -130,7 +134,17 @@ pub trait JobProcessor: Sync + Send { if task.is_finished() { break task.await; } - sleep(Duration::from_millis(Self::POLLING_INTERVAL_MS)).await; + if tokio::time::timeout( + Duration::from_millis(Self::POLLING_INTERVAL_MS), + stop_receiver.changed(), + ) + .await + .is_ok() + { + // Stop signal received, return early. + // Exit will be processed/reported by the main loop. + return Ok(()); + } }; let error_message = match result { Ok(Ok(data)) => { diff --git a/core/lib/snapshots_applier/Cargo.toml b/core/lib/snapshots_applier/Cargo.toml index a293b7714b94..4ab0c86843ef 100644 --- a/core/lib/snapshots_applier/Cargo.toml +++ b/core/lib/snapshots_applier/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "zksync_snapshots_applier" -version = "0.1.0" +description = "Library for applying ZKsync state snapshots" +version.workspace = true edition.workspace = true authors.workspace = true homepage.workspace = true diff --git a/core/lib/snapshots_applier/README.md b/core/lib/snapshots_applier/README.md new file mode 100644 index 000000000000..60f17344f5b1 --- /dev/null +++ b/core/lib/snapshots_applier/README.md @@ -0,0 +1,29 @@ +# `zksync_snapshots_applier` + +Library responsible for recovering Postgres from a protocol-level snapshot. + +## Recovery workflow + +_(See [node docs](../../../docs/guides/external-node/07_snapshots_recovery.md) for a high-level snapshot recovery +overview and [snapshot creator docs](../../bin/snapshots_creator/README.md) for the snapshot format details)_ + +1. Recovery is started by querying the main node and determining the snapshot parameters. By default, recovery is + performed from the latest snapshot, but it is possible to provide a manual override (L1 batch number of the + snapshot). +2. Factory dependencies (= contract bytecodes) are downloaded from the object store and are atomically saved to Postgres + together with the snapshot metadata (L1 batch number / L2 block numbers and timestamps, L1 batch state root hash, L2 + block hash etc.). +3. Storage log chunks are downloaded from the object store; each chunk is atomically saved to Postgres (`storage_logs` + and `initial_writes` tables). This step has a configurable degree of concurrency to control speed – I/O load + trade-off. +4. After all storage logs are restored, token information is fetched from the main node and saved in the corresponding + table. Tokens are double-checked against storage logs. + +Recovery is resilient to stops / failures; if the recovery process is interrupted, it will restart from the same +snapshot and will skip saving data that is already present in Postgres. + +Recovery logic for node components (such as metadata calculator and state keeper) is intentionally isolated from +Postgres recovery. A component requiring recovery must organize it on its own. This is motivated by the fact that at +least some components requiring recovery may initialize after an arbitrary delay after Postgres recovery (or not run at +all) and/or may be instantiated multiple times for a single node. As an example, both of these requirements hold for +metadata calculator / Merkle tree. diff --git a/core/lib/snapshots_applier/src/lib.rs b/core/lib/snapshots_applier/src/lib.rs index ea1c11f40c2c..d2231f730b17 100644 --- a/core/lib/snapshots_applier/src/lib.rs +++ b/core/lib/snapshots_applier/src/lib.rs @@ -1,11 +1,13 @@ //! Logic for applying application-level snapshots to Postgres storage. -use std::{collections::HashMap, fmt, num::NonZeroUsize, sync::Arc, time::Duration}; +use std::{ + cmp::Ordering, collections::HashMap, fmt, mem, num::NonZeroUsize, sync::Arc, time::Duration, +}; use anyhow::Context as _; use async_trait::async_trait; use serde::Serialize; -use tokio::sync::Semaphore; +use tokio::sync::{watch, Semaphore}; use zksync_dal::{Connection, ConnectionPool, Core, CoreDal, DalError, SqlxError}; use zksync_health_check::{Health, HealthStatus, HealthUpdater, ReactiveHealthCheck}; use zksync_object_store::{ObjectStore, ObjectStoreError}; @@ -16,7 +18,7 @@ use zksync_types::{ SnapshotStorageLogsChunk, SnapshotStorageLogsStorageKey, SnapshotVersion, }, tokens::TokenInfo, - L1BatchNumber, L2BlockNumber, H256, + L1BatchNumber, L2BlockNumber, StorageKey, H256, }; use zksync_utils::bytecode::hash_bytecode; use zksync_web3_decl::{ @@ -74,6 +76,8 @@ enum SnapshotsApplierError { Fatal(#[from] anyhow::Error), #[error(transparent)] Retryable(anyhow::Error), + #[error("Snapshot recovery has been canceled")] + Canceled, } impl SnapshotsApplierError { @@ -191,6 +195,17 @@ impl SnapshotsApplierMainNodeClient for Box> { } } +/// Reported status of the snapshot recovery progress. +#[derive(Debug, Clone, Copy, PartialEq)] +pub enum RecoveryCompletionStatus { + /// There is no infomration about snapshot recovery in the database. + NoRecoveryDetected, + /// Snapshot recovery is not finished yet. + InProgress, + /// Snapshot recovery is completed. + Completed, +} + /// Snapshot applier configuration options. #[derive(Debug, Clone)] pub struct SnapshotsApplierConfig { @@ -232,11 +247,14 @@ impl SnapshotsApplierConfig { pub struct SnapshotApplierTaskStats { /// Did the task do any work? pub done_work: bool, + /// Was the task canceled? + pub canceled: bool, } #[derive(Debug)] pub struct SnapshotsApplierTask { snapshot_l1_batch: Option, + drop_storage_key_preimages: bool, config: SnapshotsApplierConfig, health_updater: HealthUpdater, connection_pool: ConnectionPool, @@ -253,6 +271,7 @@ impl SnapshotsApplierTask { ) -> Self { Self { snapshot_l1_batch: None, + drop_storage_key_preimages: false, config, health_updater: ReactiveHealthCheck::new("snapshot_recovery").1, connection_pool, @@ -261,11 +280,56 @@ impl SnapshotsApplierTask { } } + /// Checks whether the snapshot recovery is already completed. + /// + /// Returns `None` if no snapshot recovery information is detected in the DB. + /// Returns `Some(true)` if the recovery is completed. + /// Returns `Some(false)` if the recovery is not completed. + pub async fn is_recovery_completed( + conn: &mut Connection<'_, Core>, + client: &dyn SnapshotsApplierMainNodeClient, + ) -> anyhow::Result { + let Some(applied_snapshot_status) = conn + .snapshot_recovery_dal() + .get_applied_snapshot_status() + .await? + else { + return Ok(RecoveryCompletionStatus::NoRecoveryDetected); + }; + // If there are unprocessed storage logs chunks, the recovery is not complete. + if applied_snapshot_status.storage_logs_chunks_left_to_process() != 0 { + return Ok(RecoveryCompletionStatus::InProgress); + } + // Currently, migrating tokens is the last step of the recovery. + // The number of tokens is not a part of the snapshot header, so we have to re-query the main node. + let added_tokens = conn + .tokens_web3_dal() + .get_all_tokens(Some(applied_snapshot_status.l2_block_number)) + .await? + .len(); + let tokens_on_main_node = client + .fetch_tokens(applied_snapshot_status.l2_block_number) + .await? + .len(); + + match added_tokens.cmp(&tokens_on_main_node) { + Ordering::Less => Ok(RecoveryCompletionStatus::InProgress), + Ordering::Equal => Ok(RecoveryCompletionStatus::Completed), + Ordering::Greater => anyhow::bail!("DB contains more tokens than the main node"), + } + } + /// Specifies the L1 batch to recover from. This setting is ignored if recovery is complete or resumed. pub fn set_snapshot_l1_batch(&mut self, number: L1BatchNumber) { self.snapshot_l1_batch = Some(number); } + /// Enables dropping storage key preimages when recovering storage logs from a snapshot with version 0. + /// This is a temporary flag that will eventually be removed together with version 0 snapshot support. + pub fn drop_storage_key_preimages(&mut self) { + self.drop_storage_key_preimages = true; + } + /// Returns the health check for snapshot recovery. pub fn health_check(&self) -> ReactiveHealthCheck { self.health_updater.subscribe() @@ -279,21 +343,23 @@ impl SnapshotsApplierTask { /// or under any of the following conditions: /// /// - There are no snapshots on the main node - pub async fn run(self) -> anyhow::Result { + pub async fn run( + self, + mut stop_receiver: watch::Receiver, + ) -> anyhow::Result { tracing::info!("Starting snapshot recovery with config: {:?}", self.config); let mut backoff = self.config.initial_retry_backoff; let mut last_error = None; for retry_id in 0..self.config.retry_count { - let result = SnapshotsApplier::load_snapshot( - &self.connection_pool, - self.main_node_client.as_ref(), - self.blob_store.as_ref(), - &self.health_updater, - self.snapshot_l1_batch, - self.config.max_concurrency.get(), - ) - .await; + if *stop_receiver.borrow() { + return Ok(SnapshotApplierTaskStats { + done_work: false, // Not really relevant, since the node will be shut down. + canceled: true, + }); + } + + let result = SnapshotsApplier::load_snapshot(&self, &mut stop_receiver).await; match result { Ok((strategy, final_status)) => { @@ -305,6 +371,7 @@ impl SnapshotsApplierTask { self.health_updater.freeze(); return Ok(SnapshotApplierTaskStats { done_work: !matches!(strategy, SnapshotRecoveryStrategy::Completed), + canceled: false, }); } Err(SnapshotsApplierError::Fatal(err)) => { @@ -318,9 +385,19 @@ impl SnapshotsApplierTask { "Recovering from error; attempt {retry_id} / {}, retrying in {backoff:?}", self.config.retry_count ); - tokio::time::sleep(backoff).await; + tokio::time::timeout(backoff, stop_receiver.changed()) + .await + .ok(); + // Stop receiver will be checked on the next iteration. backoff = backoff.mul_f32(self.config.retry_backoff_multiplier); } + Err(SnapshotsApplierError::Canceled) => { + tracing::info!("Snapshot recovery has been canceled"); + return Ok(SnapshotApplierTaskStats { + done_work: false, + canceled: true, + }); + } } } @@ -334,9 +411,9 @@ impl SnapshotsApplierTask { #[derive(Debug, Clone, Copy)] enum SnapshotRecoveryStrategy { /// Snapshot recovery should proceed from scratch with the specified params. - New, + New(SnapshotVersion), /// Snapshot recovery should continue with the specified params. - Resumed, + Resumed(SnapshotVersion), /// Snapshot recovery has already been completed. Completed, } @@ -360,9 +437,20 @@ impl SnapshotRecoveryStrategy { return Ok((Self::Completed, applied_snapshot_status)); } + let l1_batch_number = applied_snapshot_status.l1_batch_number; + let snapshot_header = main_node_client + .fetch_snapshot(l1_batch_number) + .await? + .with_context(|| { + format!("snapshot for L1 batch #{l1_batch_number} is no longer present on main node") + })?; + // Old snapshots can theoretically be removed by the node, but in this case the snapshot data may be removed as well, + // so returning an error looks appropriate here. + let snapshot_version = Self::check_snapshot_version(snapshot_header.version)?; + let latency = latency.observe(); tracing::info!("Re-initialized snapshots applier after reset/failure in {latency:?}"); - Ok((Self::Resumed, applied_snapshot_status)) + Ok((Self::Resumed(snapshot_version), applied_snapshot_status)) } else { let is_genesis_needed = storage.blocks_dal().is_genesis_needed().await?; if !is_genesis_needed { @@ -372,7 +460,7 @@ impl SnapshotRecoveryStrategy { return Err(SnapshotsApplierError::Fatal(err)); } - let recovery_status = + let (recovery_status, snapshot_version) = Self::create_fresh_recovery_status(main_node_client, snapshot_l1_batch).await?; let storage_logs_count = storage @@ -390,14 +478,14 @@ impl SnapshotRecoveryStrategy { let latency = latency.observe(); tracing::info!("Initialized fresh snapshots applier in {latency:?}"); - Ok((Self::New, recovery_status)) + Ok((Self::New(snapshot_version), recovery_status)) } } async fn create_fresh_recovery_status( main_node_client: &dyn SnapshotsApplierMainNodeClient, snapshot_l1_batch: Option, - ) -> Result { + ) -> Result<(SnapshotRecoveryStatus, SnapshotVersion), SnapshotsApplierError> { let l1_batch_number = match snapshot_l1_batch { Some(num) => num, None => main_node_client @@ -417,7 +505,7 @@ impl SnapshotRecoveryStrategy { version = snapshot.version, chunk_count = snapshot.storage_logs_chunks.len() ); - Self::check_snapshot_version(snapshot.version)?; + let snapshot_version = Self::check_snapshot_version(snapshot.version)?; let l1_batch = main_node_client .fetch_l1_batch_details(l1_batch_number) @@ -445,7 +533,7 @@ impl SnapshotRecoveryStrategy { return Err(err.into()); } - Ok(SnapshotRecoveryStatus { + let status = SnapshotRecoveryStatus { l1_batch_number, l1_batch_timestamp: l1_batch.base.timestamp, l1_batch_root_hash, @@ -454,22 +542,105 @@ impl SnapshotRecoveryStrategy { l2_block_hash, protocol_version, storage_logs_chunks_processed: vec![false; snapshot.storage_logs_chunks.len()], - }) + }; + Ok((status, snapshot_version)) } - fn check_snapshot_version(raw_version: u16) -> anyhow::Result<()> { + fn check_snapshot_version(raw_version: u16) -> anyhow::Result { let version = SnapshotVersion::try_from(raw_version).with_context(|| { format!( "Unrecognized snapshot version: {raw_version}; make sure you're running the latest version of the node" ) })?; anyhow::ensure!( - matches!(version, SnapshotVersion::Version0), - "Cannot recover from a snapshot with version {version:?}; the only supported version is {:?}", - SnapshotVersion::Version0 + matches!(version, SnapshotVersion::Version0 | SnapshotVersion::Version1), + "Cannot recover from a snapshot with version {version:?}; the only supported versions are {:?}", + [SnapshotVersion::Version0, SnapshotVersion::Version1] ); + Ok(version) + } +} + +/// Versioned storage logs chunk. +#[derive(Debug)] +enum StorageLogs { + V0(Vec>), + V1(Vec), +} + +impl StorageLogs { + async fn load( + blob_store: &dyn ObjectStore, + key: SnapshotStorageLogsStorageKey, + version: SnapshotVersion, + ) -> Result { + match version { + SnapshotVersion::Version0 => { + let logs: SnapshotStorageLogsChunk = blob_store.get(key).await?; + Ok(Self::V0(logs.storage_logs)) + } + SnapshotVersion::Version1 => { + let logs: SnapshotStorageLogsChunk = blob_store.get(key).await?; + Ok(Self::V1(logs.storage_logs)) + } + } + } + + fn len(&self) -> usize { + match self { + Self::V0(logs) => logs.len(), + Self::V1(logs) => logs.len(), + } + } + + /// Performs basic sanity check for a storage logs chunk. + fn validate(&self, snapshot_status: &SnapshotRecoveryStatus) -> anyhow::Result<()> { + match self { + Self::V0(logs) => Self::validate_inner(logs, snapshot_status), + Self::V1(logs) => Self::validate_inner(logs, snapshot_status), + } + } + + fn validate_inner( + storage_logs: &[SnapshotStorageLog], + snapshot_status: &SnapshotRecoveryStatus, + ) -> anyhow::Result<()> { + for log in storage_logs { + anyhow::ensure!( + log.enumeration_index > 0, + "invalid storage log with zero enumeration_index: {log:?}" + ); + anyhow::ensure!( + log.l1_batch_number_of_initial_write <= snapshot_status.l1_batch_number, + "invalid storage log with `l1_batch_number_of_initial_write` from the future: {log:?}" + ); + } Ok(()) } + + fn drop_key_preimages(&mut self) { + match self { + Self::V0(logs) => { + *self = Self::V1( + mem::take(logs) + .into_iter() + .map(SnapshotStorageLog::drop_key_preimage) + .collect(), + ); + } + Self::V1(_) => { /* do nothing */ } + } + } + + fn without_preimages(self) -> Vec { + match self { + Self::V0(logs) => logs + .into_iter() + .map(SnapshotStorageLog::drop_key_preimage) + .collect(), + Self::V1(logs) => logs, + } + } } /// Applying application-level storage snapshots to the Postgres storage. @@ -480,7 +651,9 @@ struct SnapshotsApplier<'a> { blob_store: &'a dyn ObjectStore, applied_snapshot_status: SnapshotRecoveryStatus, health_updater: &'a HealthUpdater, + snapshot_version: SnapshotVersion, max_concurrency: usize, + drop_storage_key_preimages: bool, factory_deps_recovered: bool, tokens_recovered: bool, } @@ -488,13 +661,13 @@ struct SnapshotsApplier<'a> { impl<'a> SnapshotsApplier<'a> { /// Returns final snapshot recovery status. async fn load_snapshot( - connection_pool: &'a ConnectionPool, - main_node_client: &'a dyn SnapshotsApplierMainNodeClient, - blob_store: &'a dyn ObjectStore, - health_updater: &'a HealthUpdater, - snapshot_l1_batch: Option, - max_concurrency: usize, + task: &'a SnapshotsApplierTask, + stop_receiver: &mut watch::Receiver, ) -> Result<(SnapshotRecoveryStrategy, SnapshotRecoveryStatus), SnapshotsApplierError> { + let health_updater = &task.health_updater; + let connection_pool = &task.connection_pool; + let main_node_client = task.main_node_client.as_ref(); + // While the recovery is in progress, the node is healthy (no error has occurred), // but is affected (its usual APIs don't work). health_updater.update(HealthStatus::Affected.into()); @@ -507,23 +680,25 @@ impl<'a> SnapshotsApplier<'a> { let (strategy, applied_snapshot_status) = SnapshotRecoveryStrategy::new( &mut storage_transaction, main_node_client, - snapshot_l1_batch, + task.snapshot_l1_batch, ) .await?; tracing::info!("Chosen snapshot recovery strategy: {strategy:?} with status: {applied_snapshot_status:?}"); - let created_from_scratch = match strategy { + let (created_from_scratch, snapshot_version) = match strategy { SnapshotRecoveryStrategy::Completed => return Ok((strategy, applied_snapshot_status)), - SnapshotRecoveryStrategy::New => true, - SnapshotRecoveryStrategy::Resumed => false, + SnapshotRecoveryStrategy::New(version) => (true, version), + SnapshotRecoveryStrategy::Resumed(version) => (false, version), }; let mut this = Self { connection_pool, main_node_client, - blob_store, + blob_store: task.blob_store.as_ref(), applied_snapshot_status, health_updater, - max_concurrency, + snapshot_version, + max_concurrency: task.config.max_concurrency.get(), + drop_storage_key_preimages: task.drop_storage_key_preimages, factory_deps_recovered: !created_from_scratch, tokens_recovered: false, }; @@ -568,7 +743,7 @@ impl<'a> SnapshotsApplier<'a> { this.factory_deps_recovered = true; this.update_health(); - this.recover_storage_logs().await?; + this.recover_storage_logs(stop_receiver).await?; for is_chunk_processed in &mut this.applied_snapshot_status.storage_logs_chunks_processed { *is_chunk_processed = true; } @@ -658,16 +833,30 @@ impl<'a> SnapshotsApplier<'a> { async fn insert_storage_logs_chunk( &self, - storage_logs: &[SnapshotStorageLog], + storage_logs: &StorageLogs, storage: &mut Connection<'_, Core>, ) -> Result<(), SnapshotsApplierError> { - storage - .storage_logs_dal() - .insert_storage_logs_from_snapshot( - self.applied_snapshot_status.l2_block_number, - storage_logs, - ) - .await?; + match storage_logs { + StorageLogs::V0(logs) => { + #[allow(deprecated)] + storage + .storage_logs_dal() + .insert_storage_logs_with_preimages_from_snapshot( + self.applied_snapshot_status.l2_block_number, + logs, + ) + .await?; + } + StorageLogs::V1(logs) => { + storage + .storage_logs_dal() + .insert_storage_logs_from_snapshot( + self.applied_snapshot_status.l2_block_number, + logs, + ) + .await?; + } + } Ok(()) } @@ -688,14 +877,19 @@ impl<'a> SnapshotsApplier<'a> { chunk_id, l1_batch_number: self.applied_snapshot_status.l1_batch_number, }; - let storage_snapshot_chunk: SnapshotStorageLogsChunk = - self.blob_store.get(storage_key).await.map_err(|err| { - let context = - format!("cannot fetch storage logs {storage_key:?} from object store"); - SnapshotsApplierError::object_store(err, context) - })?; - let storage_logs = &storage_snapshot_chunk.storage_logs; - self.validate_storage_logs_chunk(storage_logs)?; + let mut storage_logs = + StorageLogs::load(self.blob_store, storage_key, self.snapshot_version) + .await + .map_err(|err| { + let context = + format!("cannot fetch storage logs {storage_key:?} from object store"); + SnapshotsApplierError::object_store(err, context) + })?; + + storage_logs.validate(&self.applied_snapshot_status)?; + if self.drop_storage_key_preimages { + storage_logs.drop_key_preimages(); + } let latency = latency.observe(); tracing::info!( "Loaded {} storage logs from GCS for chunk {chunk_id} in {latency:?}", @@ -712,9 +906,11 @@ impl<'a> SnapshotsApplier<'a> { let mut storage_transaction = storage.start_transaction().await?; tracing::info!("Loading {} storage logs into Postgres", storage_logs.len()); - self.insert_storage_logs_chunk(storage_logs, &mut storage_transaction) + + self.insert_storage_logs_chunk(&storage_logs, &mut storage_transaction) .await?; - self.insert_initial_writes_chunk(storage_logs, &mut storage_transaction) + let storage_logs = storage_logs.without_preimages(); + self.insert_initial_writes_chunk(&storage_logs, &mut storage_transaction) .await?; storage_transaction @@ -730,25 +926,10 @@ impl<'a> SnapshotsApplier<'a> { Ok(()) } - /// Performs basic sanity check for a storage logs chunk. - fn validate_storage_logs_chunk( + async fn recover_storage_logs( &self, - storage_logs: &[SnapshotStorageLog], - ) -> anyhow::Result<()> { - for log in storage_logs { - anyhow::ensure!( - log.enumeration_index > 0, - "invalid storage log with zero enumeration_index: {log:?}" - ); - anyhow::ensure!( - log.l1_batch_number_of_initial_write <= self.applied_snapshot_status.l1_batch_number, - "invalid storage log with `l1_batch_number_of_initial_write` from the future: {log:?}" - ); - } - Ok(()) - } - - async fn recover_storage_logs(&self) -> Result<(), SnapshotsApplierError> { + stop_receiver: &mut watch::Receiver, + ) -> Result<(), SnapshotsApplierError> { let effective_concurrency = (self.connection_pool.max_size() as usize).min(self.max_concurrency); tracing::info!( @@ -765,7 +946,16 @@ impl<'a> SnapshotsApplier<'a> { .map(|(chunk_id, _)| { self.recover_storage_logs_single_chunk(&semaphore, chunk_id as u64) }); - futures::future::try_join_all(tasks).await?; + let job_completion = futures::future::try_join_all(tasks); + + tokio::select! { + res = job_completion => { + res?; + }, + _ = stop_receiver.changed() => { + return Err(SnapshotsApplierError::Canceled); + } + } let mut storage = self .connection_pool diff --git a/core/lib/snapshots_applier/src/tests/mod.rs b/core/lib/snapshots_applier/src/tests/mod.rs index b15f8bc657bf..379808b365ca 100644 --- a/core/lib/snapshots_applier/src/tests/mod.rs +++ b/core/lib/snapshots_applier/src/tests/mod.rs @@ -25,6 +25,16 @@ use crate::tests::utils::HangingObjectStore; mod utils; +async fn is_recovery_completed( + pool: &ConnectionPool, + client: &MockMainNodeClient, +) -> RecoveryCompletionStatus { + let mut connection = pool.connection().await.unwrap(); + SnapshotsApplierTask::is_recovery_completed(&mut connection, client) + .await + .unwrap() +} + #[test_casing(3, [(None, false), (Some(2), false), (None, true)])] #[tokio::test] async fn snapshots_creator_can_successfully_recover_db( @@ -36,13 +46,12 @@ async fn snapshots_creator_can_successfully_recover_db( } else { ConnectionPool::::test_pool().await }; + let expected_status = mock_recovery_status(); let storage_logs = random_storage_logs(expected_status.l1_batch_number, 200); let (object_store, client) = prepare_clients(&expected_status, &storage_logs).await; - let storage_logs_by_hashed_key: HashMap<_, _> = storage_logs - .into_iter() - .map(|log| (log.key.hashed_key(), log)) - .collect(); + let storage_logs_by_hashed_key: HashMap<_, _> = + storage_logs.into_iter().map(|log| (log.key, log)).collect(); let object_store_with_errors; let object_store = if with_object_store_errors { @@ -62,6 +71,12 @@ async fn snapshots_creator_can_successfully_recover_db( object_store }; + assert_eq!( + is_recovery_completed(&pool, &client).await, + RecoveryCompletionStatus::NoRecoveryDetected, + "No snapshot information in the DB" + ); + let task = SnapshotsApplierTask::new( SnapshotsApplierConfig::for_tests(), pool.clone(), @@ -69,13 +84,20 @@ async fn snapshots_creator_can_successfully_recover_db( object_store.clone(), ); let task_health = task.health_check(); - let stats = task.run().await.unwrap(); + let (_stop_sender, stop_receiver) = watch::channel(false); + let stats = task.run(stop_receiver).await.unwrap(); assert!(stats.done_work); assert_matches!( task_health.check_health().await.status(), HealthStatus::Ready ); + assert_eq!( + is_recovery_completed(&pool, &client).await, + RecoveryCompletionStatus::Completed, + "Recovery has been completed" + ); + let mut storage = pool.connection().await.unwrap(); let mut recovery_dal = storage.snapshot_recovery_dal(); @@ -103,8 +125,9 @@ async fn snapshots_creator_can_successfully_recover_db( assert_eq!(all_storage_logs.len(), storage_logs_by_hashed_key.len()); for db_log in all_storage_logs { let expected_log = &storage_logs_by_hashed_key[&db_log.hashed_key]; - assert_eq!(db_log.address, *expected_log.key.address()); - assert_eq!(db_log.key, *expected_log.key.key()); + assert_eq!(db_log.hashed_key, expected_log.key); + assert!(db_log.key.is_none()); + assert!(db_log.address.is_none()); assert_eq!(db_log.value, expected_log.value); assert_eq!(db_log.l2_block_number, expected_status.l2_block_number); } @@ -116,7 +139,9 @@ async fn snapshots_creator_can_successfully_recover_db( Box::new(client.clone()), object_store.clone(), ); - task.run().await.unwrap(); + + let (_stop_sender, stop_receiver) = watch::channel(false); + task.run(stop_receiver).await.unwrap(); // Here, stats would unfortunately have `done_work: true` because work detection isn't smart enough. // Emulate a node processing data after recovery. @@ -139,15 +164,64 @@ async fn snapshots_creator_can_successfully_recover_db( Box::new(client), object_store, ); - let stats = task.run().await.unwrap(); + let (_stop_sender, stop_receiver) = watch::channel(false); + let stats = task.run(stop_receiver).await.unwrap(); assert!(!stats.done_work); } +#[test_casing(2, [false, true])] +#[tokio::test] +async fn applier_recovers_v0_snapshot(drop_storage_key_preimages: bool) { + let pool = ConnectionPool::::test_pool().await; + let expected_status = mock_recovery_status(); + let storage_logs = random_storage_logs::(expected_status.l1_batch_number, 200); + let (object_store, client) = prepare_clients(&expected_status, &storage_logs).await; + + let mut task = SnapshotsApplierTask::new( + SnapshotsApplierConfig::for_tests(), + pool.clone(), + Box::new(client), + object_store, + ); + if drop_storage_key_preimages { + task.drop_storage_key_preimages(); + } + let (_stop_sender, stop_receiver) = watch::channel(false); + let stats = task.run(stop_receiver).await.unwrap(); + assert!(stats.done_work); + + let mut storage = pool.connection().await.unwrap(); + let all_storage_logs = storage + .storage_logs_dal() + .dump_all_storage_logs_for_tests() + .await; + assert_eq!(all_storage_logs.len(), storage_logs.len()); + + let storage_logs_by_hashed_key: HashMap<_, _> = storage_logs + .into_iter() + .map(|log| (log.key.hashed_key(), log)) + .collect(); + for db_log in all_storage_logs { + let expected_log = &storage_logs_by_hashed_key[&db_log.hashed_key]; + assert_eq!(db_log.hashed_key, expected_log.key.hashed_key()); + assert_eq!(db_log.value, expected_log.value); + assert_eq!(db_log.l2_block_number, expected_status.l2_block_number); + + if drop_storage_key_preimages { + assert!(db_log.key.is_none()); + assert!(db_log.address.is_none()); + } else { + assert_eq!(db_log.key, Some(*expected_log.key.key())); + assert_eq!(db_log.address, Some(*expected_log.key.address())); + } + } +} + #[tokio::test] async fn applier_recovers_explicitly_specified_snapshot() { let pool = ConnectionPool::::test_pool().await; let expected_status = mock_recovery_status(); - let storage_logs = random_storage_logs(expected_status.l1_batch_number, 200); + let storage_logs = random_storage_logs::(expected_status.l1_batch_number, 200); let (object_store, client) = prepare_clients(&expected_status, &storage_logs).await; let mut task = SnapshotsApplierTask::new( @@ -157,7 +231,8 @@ async fn applier_recovers_explicitly_specified_snapshot() { object_store, ); task.set_snapshot_l1_batch(expected_status.l1_batch_number); - let stats = task.run().await.unwrap(); + let (_stop_sender, stop_receiver) = watch::channel(false); + let stats = task.run(stop_receiver).await.unwrap(); assert!(stats.done_work); let mut storage = pool.connection().await.unwrap(); @@ -172,7 +247,7 @@ async fn applier_recovers_explicitly_specified_snapshot() { async fn applier_error_for_missing_explicitly_specified_snapshot() { let pool = ConnectionPool::::test_pool().await; let expected_status = mock_recovery_status(); - let storage_logs = random_storage_logs(expected_status.l1_batch_number, 200); + let storage_logs = random_storage_logs::(expected_status.l1_batch_number, 200); let (object_store, client) = prepare_clients(&expected_status, &storage_logs).await; let mut task = SnapshotsApplierTask::new( @@ -183,7 +258,8 @@ async fn applier_error_for_missing_explicitly_specified_snapshot() { ); task.set_snapshot_l1_batch(expected_status.l1_batch_number + 1); - let err = task.run().await.unwrap_err(); + let (_stop_sender, stop_receiver) = watch::channel(false); + let err = task.run(stop_receiver).await.unwrap_err(); assert!( format!("{err:#}").contains("not present on main node"), "{err:#}" @@ -195,7 +271,7 @@ async fn snapshot_applier_recovers_after_stopping() { let pool = ConnectionPool::::test_pool().await; let mut expected_status = mock_recovery_status(); expected_status.storage_logs_chunks_processed = vec![true; 10]; - let storage_logs = random_storage_logs(expected_status.l1_batch_number, 200); + let storage_logs = random_storage_logs::(expected_status.l1_batch_number, 200); let (object_store, client) = prepare_clients(&expected_status, &storage_logs).await; let (stopping_object_store, mut stop_receiver) = HangingObjectStore::new(object_store.clone(), 1); @@ -208,13 +284,20 @@ async fn snapshot_applier_recovers_after_stopping() { Box::new(client.clone()), Arc::new(stopping_object_store), ); - let task_handle = tokio::spawn(task.run()); + let (_stop_sender, task_stop_receiver) = watch::channel(false); + let task_handle = tokio::spawn(task.run(task_stop_receiver)); // Wait until the first storage logs chunk is requested (the object store hangs up at this point) stop_receiver.wait_for(|&count| count > 1).await.unwrap(); assert!(!task_handle.is_finished()); task_handle.abort(); + assert_eq!( + is_recovery_completed(&pool, &client).await, + RecoveryCompletionStatus::InProgress, + "Recovery has been aborted" + ); + // Check that factory deps have been persisted, but no storage logs. let mut storage = pool.connection().await.unwrap(); let all_factory_deps = storage @@ -238,12 +321,19 @@ async fn snapshot_applier_recovers_after_stopping() { Box::new(client.clone()), Arc::new(stopping_object_store), ); - let task_handle = tokio::spawn(task.run()); + let (_stop_sender, task_stop_receiver) = watch::channel(false); + let task_handle = tokio::spawn(task.run(task_stop_receiver)); stop_receiver.wait_for(|&count| count > 3).await.unwrap(); assert!(!task_handle.is_finished()); task_handle.abort(); + assert_eq!( + is_recovery_completed(&pool, &client).await, + RecoveryCompletionStatus::InProgress, + "Not all logs have been recovered" + ); + let all_storage_logs = storage .storage_logs_dal() .dump_all_storage_logs_for_tests() @@ -255,11 +345,18 @@ async fn snapshot_applier_recovers_after_stopping() { let mut task = SnapshotsApplierTask::new( config, pool.clone(), - Box::new(client), + Box::new(client.clone()), Arc::new(stopping_object_store), ); task.set_snapshot_l1_batch(expected_status.l1_batch_number); // check that this works fine - task.run().await.unwrap(); + let (_stop_sender, stop_receiver) = watch::channel(false); + task.run(stop_receiver).await.unwrap(); + + assert_eq!( + is_recovery_completed(&pool, &client).await, + RecoveryCompletionStatus::Completed, + "Recovery has been completed" + ); let all_storage_logs = storage .storage_logs_dal() @@ -324,7 +421,8 @@ async fn health_status_immediately_after_task_start() { object_store, ); let task_health = task.health_check(); - let task_handle = tokio::spawn(task.run()); + let (_stop_sender, task_stop_receiver) = watch::channel(false); + let task_handle = tokio::spawn(task.run(task_stop_receiver)); client.0.wait().await; // Wait for the first L2 client call (at which point, the task is certainly initialized) assert_matches!( @@ -378,7 +476,8 @@ async fn applier_errors_after_genesis() { Box::new(client), object_store, ); - task.run().await.unwrap_err(); + let (_stop_sender, task_stop_receiver) = watch::channel(false); + task.run(task_stop_receiver).await.unwrap_err(); } #[tokio::test] @@ -393,7 +492,8 @@ async fn applier_errors_without_snapshots() { Box::new(client), object_store, ); - task.run().await.unwrap_err(); + let (_stop_sender, stop_receiver) = watch::channel(false); + task.run(stop_receiver).await.unwrap_err(); } #[tokio::test] @@ -402,10 +502,7 @@ async fn applier_errors_with_unrecognized_snapshot_version() { let object_store = MockObjectStore::arc(); let expected_status = mock_recovery_status(); let client = MockMainNodeClient { - fetch_newest_snapshot_response: Some(SnapshotHeader { - version: u16::MAX, - ..mock_snapshot_header(&expected_status) - }), + fetch_newest_snapshot_response: Some(mock_snapshot_header(u16::MAX, &expected_status)), ..MockMainNodeClient::default() }; @@ -415,14 +512,15 @@ async fn applier_errors_with_unrecognized_snapshot_version() { Box::new(client), object_store, ); - task.run().await.unwrap_err(); + let (_stop_sender, stop_receiver) = watch::channel(false); + task.run(stop_receiver).await.unwrap_err(); } #[tokio::test] async fn applier_returns_error_on_fatal_object_store_error() { let pool = ConnectionPool::::test_pool().await; let expected_status = mock_recovery_status(); - let storage_logs = random_storage_logs(expected_status.l1_batch_number, 100); + let storage_logs = random_storage_logs::(expected_status.l1_batch_number, 100); let (object_store, client) = prepare_clients(&expected_status, &storage_logs).await; let object_store = ObjectStoreWithErrors::new(object_store, |_| { Err(ObjectStoreError::KeyNotFound("not found".into())) @@ -434,7 +532,8 @@ async fn applier_returns_error_on_fatal_object_store_error() { Box::new(client), Arc::new(object_store), ); - let err = task.run().await.unwrap_err(); + let (_stop_sender, stop_receiver) = watch::channel(false); + let err = task.run(stop_receiver).await.unwrap_err(); assert!(err.chain().any(|cause| { matches!( cause.downcast_ref::(), @@ -447,7 +546,7 @@ async fn applier_returns_error_on_fatal_object_store_error() { async fn applier_returns_error_after_too_many_object_store_retries() { let pool = ConnectionPool::::test_pool().await; let expected_status = mock_recovery_status(); - let storage_logs = random_storage_logs(expected_status.l1_batch_number, 100); + let storage_logs = random_storage_logs::(expected_status.l1_batch_number, 100); let (object_store, client) = prepare_clients(&expected_status, &storage_logs).await; let object_store = ObjectStoreWithErrors::new(object_store, |_| { Err(ObjectStoreError::Other { @@ -462,7 +561,8 @@ async fn applier_returns_error_after_too_many_object_store_retries() { Box::new(client), Arc::new(object_store), ); - let err = task.run().await.unwrap_err(); + let (_stop_sender, stop_receiver) = watch::channel(false); + let err = task.run(stop_receiver).await.unwrap_err(); assert!(err.chain().any(|cause| { matches!( cause.downcast_ref::(), @@ -482,7 +582,7 @@ async fn recovering_tokens() { continue; } storage_logs.push(SnapshotStorageLog { - key: get_code_key(&token.l2_address), + key: get_code_key(&token.l2_address).hashed_key(), value: H256::random(), l1_batch_number_of_initial_write: expected_status.l1_batch_number, enumeration_index: storage_logs.len() as u64 + 1, @@ -492,13 +592,40 @@ async fn recovering_tokens() { client.tokens_response.clone_from(&tokens); + // Make sure that the task will fail when we will start migrating tokens. + client.set_token_response_error(EnrichedClientError::custom("Error", "not_important")); + let task = SnapshotsApplierTask::new( SnapshotsApplierConfig::for_tests(), pool.clone(), Box::new(client.clone()), object_store.clone(), ); - task.run().await.unwrap(); + let (_stop_sender, stop_receiver) = watch::channel(false); + let task_result = task.run(stop_receiver).await; + assert!(task_result.is_err()); + + assert_eq!( + is_recovery_completed(&pool, &client).await, + RecoveryCompletionStatus::InProgress, + "Tokens are not migrated" + ); + + // Now perform the recovery again, tokens should be migrated. + let task = SnapshotsApplierTask::new( + SnapshotsApplierConfig::for_tests(), + pool.clone(), + Box::new(client.clone()), + object_store.clone(), + ); + let (_stop_sender, stop_receiver) = watch::channel(false); + task.run(stop_receiver).await.unwrap(); + + assert_eq!( + is_recovery_completed(&pool, &client).await, + RecoveryCompletionStatus::Completed, + "Recovery is completed" + ); // Check that tokens are successfully restored. let mut storage = pool.connection().await.unwrap(); @@ -526,5 +653,41 @@ async fn recovering_tokens() { Box::new(client), object_store, ); - task.run().await.unwrap(); + let (_stop_sender, stop_receiver) = watch::channel(false); + task.run(stop_receiver).await.unwrap(); +} + +#[tokio::test] +async fn snapshot_applier_can_be_canceled() { + let pool = ConnectionPool::::test_pool().await; + let mut expected_status = mock_recovery_status(); + expected_status.storage_logs_chunks_processed = vec![true; 10]; + let storage_logs = random_storage_logs::(expected_status.l1_batch_number, 200); + let (object_store, client) = prepare_clients(&expected_status, &storage_logs).await; + let (stopping_object_store, mut stop_receiver) = + HangingObjectStore::new(object_store.clone(), 1); + + let mut config = SnapshotsApplierConfig::for_tests(); + config.max_concurrency = NonZeroUsize::new(1).unwrap(); + let task = SnapshotsApplierTask::new( + config.clone(), + pool.clone(), + Box::new(client.clone()), + Arc::new(stopping_object_store), + ); + let (task_stop_sender, task_stop_receiver) = watch::channel(false); + let task_handle = tokio::spawn(task.run(task_stop_receiver)); + + // Wait until the first storage logs chunk is requested (the object store hangs up at this point) + stop_receiver.wait_for(|&count| count > 1).await.unwrap(); + assert!(!task_handle.is_finished()); + + task_stop_sender.send(true).unwrap(); + let result = tokio::time::timeout(Duration::from_secs(5), task_handle) + .await + .expect("Task wasn't canceled") + .unwrap() + .expect("Task erred"); + assert!(result.canceled); + assert!(!result.done_work); } diff --git a/core/lib/snapshots_applier/src/tests/utils.rs b/core/lib/snapshots_applier/src/tests/utils.rs index e683e0cae00f..c546fb60c09b 100644 --- a/core/lib/snapshots_applier/src/tests/utils.rs +++ b/core/lib/snapshots_applier/src/tests/utils.rs @@ -1,10 +1,14 @@ //! Test utils. -use std::{collections::HashMap, fmt, future, sync::Arc}; +use std::{ + collections::HashMap, + fmt, future, + sync::{Arc, RwLock}, +}; use async_trait::async_trait; use tokio::sync::watch; -use zksync_object_store::{Bucket, MockObjectStore, ObjectStore, ObjectStoreError}; +use zksync_object_store::{Bucket, MockObjectStore, ObjectStore, ObjectStoreError, StoredObject}; use zksync_types::{ api, block::L2BlockHeader, @@ -16,18 +20,53 @@ use zksync_types::{ tokens::{TokenInfo, TokenMetadata}, web3::Bytes, AccountTreeId, Address, L1BatchNumber, L2BlockNumber, ProtocolVersionId, StorageKey, - StorageValue, H160, H256, + StorageValue, H256, }; -use zksync_web3_decl::error::EnrichedClientResult; +use zksync_web3_decl::error::{EnrichedClientError, EnrichedClientResult}; use crate::SnapshotsApplierMainNodeClient; +pub(super) trait SnapshotLogKey: Clone { + const VERSION: SnapshotVersion; + + fn random() -> Self; +} + +impl SnapshotLogKey for H256 { + const VERSION: SnapshotVersion = SnapshotVersion::Version1; + + fn random() -> Self { + Self::random() + } +} + +impl SnapshotLogKey for StorageKey { + const VERSION: SnapshotVersion = SnapshotVersion::Version0; + + fn random() -> Self { + Self::new(AccountTreeId::new(Address::random()), H256::random()) + } +} + #[derive(Debug, Clone, Default)] pub(super) struct MockMainNodeClient { pub fetch_l1_batch_responses: HashMap, pub fetch_l2_block_responses: HashMap, pub fetch_newest_snapshot_response: Option, pub tokens_response: Vec, + pub tokens_response_error: Arc>>, +} + +impl MockMainNodeClient { + /// Sets the error to be returned by the `fetch_tokens` method. + /// Error will be returned just once. Next time the request will succeed. + pub(super) fn set_token_response_error(&self, error: EnrichedClientError) { + *self.tokens_response_error.write().unwrap() = Some(error); + } + + fn take_token_response_error(&self) -> Option { + self.tokens_response_error.write().unwrap().take() + } } #[async_trait] @@ -69,6 +108,10 @@ impl SnapshotsApplierMainNodeClient for MockMainNodeClient { &self, _at_l2_block: L2BlockNumber, ) -> EnrichedClientResult> { + if let Some(error) = self.take_token_response_error() { + return Err(error); + } + Ok(self.tokens_response.clone()) } } @@ -182,16 +225,13 @@ fn l1_batch_details(number: L1BatchNumber, root_hash: H256) -> api::L1BatchDetai } } -pub(super) fn random_storage_logs( +pub(super) fn random_storage_logs( l1_batch_number: L1BatchNumber, count: u64, -) -> Vec { +) -> Vec> { (0..count) .map(|i| SnapshotStorageLog { - key: StorageKey::new( - AccountTreeId::from_fixed_bytes(H160::random().to_fixed_bytes()), - H256::random(), - ), + key: K::random(), value: StorageValue::random(), l1_batch_number_of_initial_write: l1_batch_number, enumeration_index: i + 1, @@ -235,9 +275,12 @@ pub(super) fn mock_tokens() -> Vec { ] } -pub(super) fn mock_snapshot_header(status: &SnapshotRecoveryStatus) -> SnapshotHeader { +pub(super) fn mock_snapshot_header( + version: u16, + status: &SnapshotRecoveryStatus, +) -> SnapshotHeader { SnapshotHeader { - version: SnapshotVersion::Version0.into(), + version, l1_batch_number: status.l1_batch_number, l2_block_number: status.l2_block_number, storage_logs_chunks: (0..status.storage_logs_chunks_processed.len() as u64) @@ -250,10 +293,14 @@ pub(super) fn mock_snapshot_header(status: &SnapshotRecoveryStatus) -> SnapshotH } } -pub(super) async fn prepare_clients( +pub(super) async fn prepare_clients( status: &SnapshotRecoveryStatus, - logs: &[SnapshotStorageLog], -) -> (Arc, MockMainNodeClient) { + logs: &[SnapshotStorageLog], +) -> (Arc, MockMainNodeClient) +where + K: SnapshotLogKey, + for<'a> SnapshotStorageLogsChunk: StoredObject = SnapshotStorageLogsStorageKey>, +{ let object_store = MockObjectStore::arc(); let mut client = MockMainNodeClient::default(); let factory_dep_bytes: Vec = (0..32).collect(); @@ -286,7 +333,7 @@ pub(super) async fn prepare_clients( .unwrap(); } - client.fetch_newest_snapshot_response = Some(mock_snapshot_header(status)); + client.fetch_newest_snapshot_response = Some(mock_snapshot_header(K::VERSION.into(), status)); client.fetch_l1_batch_responses.insert( status.l1_batch_number, l1_batch_details(status.l1_batch_number, status.l1_batch_root_hash), diff --git a/core/lib/state/Cargo.toml b/core/lib/state/Cargo.toml index fd1742788eff..119bc800b800 100644 --- a/core/lib/state/Cargo.toml +++ b/core/lib/state/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "zksync_state" -version = "0.1.0" +description = "ZKsync state keeper state" +version.workspace = true edition.workspace = true authors.workspace = true homepage.workspace = true @@ -25,6 +26,7 @@ tracing.workspace = true itertools.workspace = true chrono.workspace = true once_cell.workspace = true +backon.workspace = true [dev-dependencies] assert_matches.workspace = true diff --git a/core/lib/state/src/lib.rs b/core/lib/state/src/lib.rs index 3d0da29cc713..ca312e184e6e 100644 --- a/core/lib/state/src/lib.rs +++ b/core/lib/state/src/lib.rs @@ -17,22 +17,11 @@ use zksync_types::{ H256, }; -mod cache; -mod catchup; -mod in_memory; -mod postgres; -mod rocksdb; -mod shadow_storage; -mod storage_factory; -mod storage_view; -#[cfg(test)] -mod test_utils; - pub use self::{ cache::sequential_cache::SequentialCache, catchup::{AsyncCatchupTask, RocksdbCell}, - in_memory::InMemoryStorage, // Note, that `test_infra` of the bootloader tests relies on this value to be exposed + in_memory::InMemoryStorage, in_memory::IN_MEMORY_STORAGE_DEFAULT_NETWORK_ID, postgres::{PostgresStorage, PostgresStorageCaches, PostgresStorageCachesTask}, rocksdb::{ @@ -40,9 +29,22 @@ pub use self::{ }, shadow_storage::ShadowStorage, storage_factory::{BatchDiff, PgOrRocksdbStorage, ReadStorageFactory, RocksdbWithMemory}, - storage_view::{ImmutableStorageView, StorageView, StorageViewMetrics}, + storage_view::{ImmutableStorageView, StorageView, StorageViewCache, StorageViewMetrics}, + witness::WitnessStorage, }; +mod cache; +mod catchup; +mod in_memory; +mod postgres; +mod rocksdb; +mod shadow_storage; +mod storage_factory; +mod storage_view; +#[cfg(test)] +mod test_utils; +mod witness; + /// Functionality to read from the VM storage. pub trait ReadStorage: fmt::Debug { /// Read value of the key. diff --git a/core/lib/state/src/postgres/mod.rs b/core/lib/state/src/postgres/mod.rs index 17163af0d56f..9d7f6c3f71fa 100644 --- a/core/lib/state/src/postgres/mod.rs +++ b/core/lib/state/src/postgres/mod.rs @@ -1,9 +1,11 @@ use std::{ mem, sync::{Arc, RwLock}, + time::Duration, }; use anyhow::Context as _; +use backon::{BlockingRetryable, ConstantBuilder}; use tokio::{ runtime::Handle, sync::{ @@ -42,12 +44,12 @@ impl CacheValue for TimestampedFactoryDep { } /// Type alias for initial writes caches. -type InitialWritesCache = LruCache; +type InitialWritesCache = LruCache; -impl CacheValue for L1BatchNumber { +impl CacheValue for L1BatchNumber { #[allow(clippy::cast_possible_truncation)] // doesn't happen in practice fn cache_weight(&self) -> u32 { - const WEIGHT: usize = mem::size_of::() + mem::size_of::(); + const WEIGHT: usize = mem::size_of::() + mem::size_of::(); // ^ Since values are small, we want to account for key sizes as well WEIGHT as u32 @@ -122,7 +124,7 @@ impl ValuesCache { /// Gets the cached value for `key` provided that the cache currently holds values /// for `l2_block_number`. - fn get(&self, l2_block_number: L2BlockNumber, key: &StorageKey) -> Option { + fn get(&self, l2_block_number: L2BlockNumber, hashed_key: H256) -> Option { let lock = self.0.read().expect("values cache is poisoned"); if lock.valid_for < l2_block_number { // The request is from the future; we cannot say which values in the cache remain valid, @@ -130,7 +132,7 @@ impl ValuesCache { return None; } - let timestamped_value = lock.values.get(&key.hashed_key())?; + let timestamped_value = lock.values.get(&hashed_key)?; if timestamped_value.loaded_at <= l2_block_number { Some(timestamped_value.value) } else { @@ -139,11 +141,11 @@ impl ValuesCache { } /// Caches `value` for `key`, but only if the cache currently holds values for `l2_block_number`. - fn insert(&self, l2_block_number: L2BlockNumber, key: StorageKey, value: StorageValue) { + fn insert(&self, l2_block_number: L2BlockNumber, hashed_key: H256, value: StorageValue) { let lock = self.0.read().expect("values cache is poisoned"); if lock.valid_for == l2_block_number { lock.values.insert( - key.hashed_key(), + hashed_key, TimestampedStorageValue { value, loaded_at: l2_block_number, @@ -481,19 +483,36 @@ impl<'a> PostgresStorage<'a> { } impl ReadStorage for PostgresStorage<'_> { - fn read_value(&mut self, &key: &StorageKey) -> StorageValue { + fn read_value(&mut self, key: &StorageKey) -> StorageValue { + let hashed_key = key.hashed_key(); let latency = STORAGE_METRICS.storage[&Method::ReadValue].start(); let values_cache = self.values_cache(); - let cached_value = values_cache.and_then(|cache| cache.get(self.l2_block_number, &key)); + let cached_value = + values_cache.and_then(|cache| cache.get(self.l2_block_number, hashed_key)); let value = cached_value.unwrap_or_else(|| { + const RETRY_INTERVAL: Duration = Duration::from_millis(500); + const MAX_TRIES: usize = 20; + let mut dal = self.connection.storage_web3_dal(); - let value = self - .rt_handle - .block_on(dal.get_historical_value_unchecked(&key, self.l2_block_number)) - .expect("Failed executing `read_value`"); + let value = (|| { + self.rt_handle + .block_on(dal.get_historical_value_unchecked(hashed_key, self.l2_block_number)) + }) + .retry( + &ConstantBuilder::default() + .with_delay(RETRY_INTERVAL) + .with_max_times(MAX_TRIES), + ) + .when(|e| { + e.inner() + .as_database_error() + .is_some_and(|e| e.message() == "canceling statement due to statement timeout") + }) + .call() + .expect("Failed executing `read_value`"); if let Some(cache) = self.values_cache() { - cache.insert(self.l2_block_number, key, value); + cache.insert(self.l2_block_number, hashed_key, value); } value }); @@ -503,13 +522,15 @@ impl ReadStorage for PostgresStorage<'_> { } fn is_write_initial(&mut self, key: &StorageKey) -> bool { + let hashed_key = key.hashed_key(); let latency = STORAGE_METRICS.storage[&Method::IsWriteInitial].start(); let caches = self.caches.as_ref(); - let cached_value = caches.and_then(|caches| caches.initial_writes.get(key)); + let cached_value = caches.and_then(|caches| caches.initial_writes.get(&hashed_key)); if cached_value.is_none() { // Write is absent in positive cache, check whether it's present in the negative cache. - let cached_value = caches.and_then(|caches| caches.negative_initial_writes.get(key)); + let cached_value = + caches.and_then(|caches| caches.negative_initial_writes.get(&hashed_key)); if let Some(min_l1_batch_for_initial_write) = cached_value { // We know that this slot was certainly not touched before `min_l1_batch_for_initial_write`. // Try to use this knowledge to decide if the change is certainly initial. @@ -526,17 +547,17 @@ impl ReadStorage for PostgresStorage<'_> { let mut dal = self.connection.storage_web3_dal(); let value = self .rt_handle - .block_on(dal.get_l1_batch_number_for_initial_write(key)) + .block_on(dal.get_l1_batch_number_for_initial_write(hashed_key)) .expect("Failed executing `is_write_initial`"); if let Some(caches) = &self.caches { if let Some(l1_batch_number) = value { - caches.negative_initial_writes.remove(key); - caches.initial_writes.insert(*key, l1_batch_number); + caches.negative_initial_writes.remove(&hashed_key); + caches.initial_writes.insert(hashed_key, l1_batch_number); } else { caches .negative_initial_writes - .insert(*key, self.pending_l1_batch_number); + .insert(hashed_key, self.pending_l1_batch_number); // The pending L1 batch might have been sealed since its number was requested from Postgres // in `Self::new()`, so this is a somewhat conservative estimate. } @@ -589,13 +610,11 @@ impl ReadStorage for PostgresStorage<'_> { } fn get_enumeration_index(&mut self, key: &StorageKey) -> Option { + let hashed_key = key.hashed_key(); let mut dal = self.connection.storage_logs_dedup_dal(); - let value = self - .rt_handle - .block_on(dal.get_enumeration_index_in_l1_batch( - key.hashed_key(), - self.l1_batch_number_for_l2_block, - )); + let value = self.rt_handle.block_on( + dal.get_enumeration_index_in_l1_batch(hashed_key, self.l1_batch_number_for_l2_block), + ); value.expect("failed getting enumeration index for key") } } diff --git a/core/lib/state/src/postgres/tests.rs b/core/lib/state/src/postgres/tests.rs index 4ab8ebb12a7b..f88055fa0479 100644 --- a/core/lib/state/src/postgres/tests.rs +++ b/core/lib/state/src/postgres/tests.rs @@ -318,11 +318,15 @@ fn test_initial_writes_cache(pool: &ConnectionPool, rt_handle: Handle) { assert!(storage.is_write_initial(&logs[0].key)); assert!(storage.is_write_initial(&non_existing_key)); assert_eq!( - caches.negative_initial_writes.get(&logs[0].key), + caches + .negative_initial_writes + .get(&logs[0].key.hashed_key()), Some(L1BatchNumber(0)) ); assert_eq!( - caches.negative_initial_writes.get(&non_existing_key), + caches + .negative_initial_writes + .get(&non_existing_key.hashed_key()), Some(L1BatchNumber(0)) ); assert!(storage.is_write_initial(&logs[0].key)); @@ -353,12 +357,19 @@ fn test_initial_writes_cache(pool: &ConnectionPool, rt_handle: Handle) { // Check that the cache entries have been updated assert_eq!( - caches.initial_writes.get(&logs[0].key), + caches.initial_writes.get(&logs[0].key.hashed_key()), Some(L1BatchNumber(1)) ); - assert_eq!(caches.negative_initial_writes.get(&logs[0].key), None); assert_eq!( - caches.negative_initial_writes.get(&non_existing_key), + caches + .negative_initial_writes + .get(&logs[0].key.hashed_key()), + None + ); + assert_eq!( + caches + .negative_initial_writes + .get(&non_existing_key.hashed_key()), Some(L1BatchNumber(2)) ); assert!(storage.is_write_initial(&logs[0].key)); @@ -376,11 +387,13 @@ fn test_initial_writes_cache(pool: &ConnectionPool, rt_handle: Handle) { // Check that the cache entries are still as expected. assert_eq!( - caches.initial_writes.get(&logs[0].key), + caches.initial_writes.get(&logs[0].key.hashed_key()), Some(L1BatchNumber(1)) ); assert_eq!( - caches.negative_initial_writes.get(&non_existing_key), + caches + .negative_initial_writes + .get(&non_existing_key.hashed_key()), Some(L1BatchNumber(2)) ); @@ -415,7 +428,10 @@ struct ValueCacheAssertions<'a> { impl ValueCacheAssertions<'_> { fn assert_entries(&self, expected_entries: &[(StorageKey, Option)]) { for (key, expected_value) in expected_entries { - assert_eq!(self.cache.get(self.l2_block_number, key), *expected_value); + assert_eq!( + self.cache.get(self.l2_block_number, key.hashed_key()), + *expected_value + ); } } } diff --git a/core/lib/state/src/rocksdb/mod.rs b/core/lib/state/src/rocksdb/mod.rs index bda416cb433b..aab33c7dfe83 100644 --- a/core/lib/state/src/rocksdb/mod.rs +++ b/core/lib/state/src/rocksdb/mod.rs @@ -400,7 +400,7 @@ impl RocksdbStorage { async fn apply_storage_logs( &mut self, - storage_logs: HashMap, + storage_logs: HashMap, storage: &mut Connection<'_, Core>, ) -> anyhow::Result<()> { let db = self.db.clone(); @@ -409,12 +409,13 @@ impl RocksdbStorage { .await .context("panicked processing storage logs")?; - let (logs_with_known_indices, logs_with_unknown_indices): (Vec<_>, Vec<_>) = processed_logs - .into_iter() - .partition_map(|(key, StateValue { value, enum_index })| match enum_index { - Some(index) => Either::Left((key.hashed_key(), (value, index))), - None => Either::Right((key.hashed_key(), value)), - }); + let (logs_with_known_indices, logs_with_unknown_indices): (Vec<_>, Vec<_>) = + processed_logs.into_iter().partition_map( + |(hashed_key, StateValue { value, enum_index })| match enum_index { + Some(index) => Either::Left((hashed_key, (value, index))), + None => Either::Right((hashed_key, value)), + }, + ); let keys_with_unknown_indices: Vec<_> = logs_with_unknown_indices .iter() .map(|&(key, _)| key) @@ -440,8 +441,8 @@ impl RocksdbStorage { Ok(()) } - fn read_value_inner(&self, key: &StorageKey) -> Option { - Self::read_state_value(&self.db, key.hashed_key()).map(|state_value| state_value.value) + fn read_value_inner(&self, hashed_key: H256) -> Option { + Self::read_state_value(&self.db, hashed_key).map(|state_value| state_value.value) } fn read_state_value( @@ -457,15 +458,20 @@ impl RocksdbStorage { /// Returns storage logs to apply. fn process_transaction_logs( db: &RocksDB, - updates: HashMap, - ) -> Vec<(StorageKey, StateValue)> { - let it = updates.into_iter().filter_map(move |(key, new_value)| { - if let Some(state_value) = Self::read_state_value(db, key.hashed_key()) { - Some((key, StateValue::new(new_value, state_value.enum_index))) - } else { - (!new_value.is_zero()).then_some((key, StateValue::new(new_value, None))) - } - }); + updates: HashMap, + ) -> Vec<(H256, StateValue)> { + let it = updates + .into_iter() + .filter_map(move |(hashed_key, new_value)| { + if let Some(state_value) = Self::read_state_value(db, hashed_key) { + Some(( + hashed_key, + StateValue::new(new_value, state_value.enum_index), + )) + } else { + (!new_value.is_zero()).then_some((hashed_key, StateValue::new(new_value, None))) + } + }); it.collect() } @@ -617,11 +623,12 @@ impl RocksdbStorage { impl ReadStorage for RocksdbStorage { fn read_value(&mut self, key: &StorageKey) -> StorageValue { - self.read_value_inner(key).unwrap_or_else(H256::zero) + self.read_value_inner(key.hashed_key()) + .unwrap_or_else(H256::zero) } fn is_write_initial(&mut self, key: &StorageKey) -> bool { - self.read_value_inner(key).is_none() + self.read_value_inner(key.hashed_key()).is_none() } fn load_factory_dep(&mut self, hash: H256) -> Option> { diff --git a/core/lib/state/src/rocksdb/tests.rs b/core/lib/state/src/rocksdb/tests.rs index a006fcba4750..e73590015079 100644 --- a/core/lib/state/src/rocksdb/tests.rs +++ b/core/lib/state/src/rocksdb/tests.rs @@ -40,6 +40,12 @@ impl Default for RocksdbStorageEventListener { } } +fn hash_storage_log_keys(logs: &HashMap) -> HashMap { + logs.iter() + .map(|(key, value)| (key.hashed_key(), *value)) + .collect() +} + #[tokio::test] async fn rocksdb_storage_basics() { let dir = TempDir::new().expect("cannot create temporary dir for state keeper"); @@ -50,10 +56,11 @@ async fn rocksdb_storage_basics() { .into_iter() .map(|log| (log.key, log.value)) .collect(); - let changed_keys = RocksdbStorage::process_transaction_logs(&storage.db, storage_logs.clone()); + let changed_keys = + RocksdbStorage::process_transaction_logs(&storage.db, hash_storage_log_keys(&storage_logs)); storage.pending_patch.state = changed_keys .into_iter() - .map(|(key, state_value)| (key.hashed_key(), (state_value.value, 1))) // enum index doesn't matter in the test + .map(|(key, state_value)| (key, (state_value.value, 1))) // enum index doesn't matter in the test .collect(); storage.save(Some(L1BatchNumber(0))).await.unwrap(); { @@ -64,13 +71,14 @@ async fn rocksdb_storage_basics() { } // Overwrite some of the logs. - for log in storage_logs.values_mut().step_by(2) { - *log = StorageValue::zero(); + for log_value in storage_logs.values_mut().step_by(2) { + *log_value = StorageValue::zero(); } - let changed_keys = RocksdbStorage::process_transaction_logs(&storage.db, storage_logs.clone()); + let changed_keys = + RocksdbStorage::process_transaction_logs(&storage.db, hash_storage_log_keys(&storage_logs)); storage.pending_patch.state = changed_keys .into_iter() - .map(|(key, state_value)| (key.hashed_key(), (state_value.value, 1))) // enum index doesn't matter in the test + .map(|(key, state_value)| (key, (state_value.value, 1))) // enum index doesn't matter in the test .collect(); storage.save(Some(L1BatchNumber(1))).await.unwrap(); diff --git a/core/lib/state/src/shadow_storage.rs b/core/lib/state/src/shadow_storage.rs index 9ef1aacca155..5e32f9b25e71 100644 --- a/core/lib/state/src/shadow_storage.rs +++ b/core/lib/state/src/shadow_storage.rs @@ -50,9 +50,9 @@ impl<'a> ShadowStorage<'a> { } impl ReadStorage for ShadowStorage<'_> { - fn read_value(&mut self, &key: &StorageKey) -> StorageValue { - let source_value = self.source_storage.read_value(&key); - let expected_value = self.to_check_storage.read_value(&key); + fn read_value(&mut self, key: &StorageKey) -> StorageValue { + let source_value = self.source_storage.as_mut().read_value(key); + let expected_value = self.to_check_storage.as_mut().read_value(key); if source_value != expected_value { self.metrics.read_value_mismatch.inc(); tracing::error!( @@ -65,8 +65,8 @@ impl ReadStorage for ShadowStorage<'_> { } fn is_write_initial(&mut self, key: &StorageKey) -> bool { - let source_value = self.source_storage.is_write_initial(key); - let expected_value = self.to_check_storage.is_write_initial(key); + let source_value = self.source_storage.as_mut().is_write_initial(key); + let expected_value = self.to_check_storage.as_mut().is_write_initial(key); if source_value != expected_value { self.metrics.is_write_initial_mismatch.inc(); tracing::error!( @@ -93,18 +93,16 @@ impl ReadStorage for ShadowStorage<'_> { } fn get_enumeration_index(&mut self, key: &StorageKey) -> Option { - let source_value = self.source_storage.get_enumeration_index(key); - let expected_value = self.to_check_storage.get_enumeration_index(key); + let source_value = self.source_storage.as_mut().get_enumeration_index(key); + let expected_value = self.to_check_storage.as_mut().get_enumeration_index(key); if source_value != expected_value { tracing::error!( - "get_enumeration_index({:?}) -- l1_batch_number={:?} -- expected source={:?} to be equal to \ - to_check={:?}", key, self.l1_batch_number, source_value, expected_value + "get_enumeration_index({key:?}) -- l1_batch_number={:?} -- \ + expected source={source_value:?} to be equal to to_check={expected_value:?}", + self.l1_batch_number ); - self.metrics.get_enumeration_index_mismatch.inc(); } source_value } } - -// TODO: Add unit tests when we swap metrics crate; blocked by: https://linear.app/matterlabs/issue/QIT-3/rework-metrics-approach diff --git a/core/lib/state/src/storage_factory.rs b/core/lib/state/src/storage_factory.rs index 9f161cbeedf8..307fa465a7c9 100644 --- a/core/lib/state/src/storage_factory.rs +++ b/core/lib/state/src/storage_factory.rs @@ -30,7 +30,7 @@ pub trait ReadStorageFactory: Debug + Send + Sync + 'static { #[derive(Debug, Clone)] pub struct BatchDiff { /// Storage slots touched by this batch along with new values there. - pub state_diff: HashMap, + pub state_diff: HashMap, /// Initial write indices introduced by this batch. pub enum_index_diff: HashMap, /// Factory dependencies introduced by this batch. @@ -140,11 +140,12 @@ impl<'a> PgOrRocksdbStorage<'a> { impl ReadStorage for RocksdbWithMemory { fn read_value(&mut self, key: &StorageKey) -> StorageValue { + let hashed_key = key.hashed_key(); match self .batch_diffs .iter() .rev() - .find_map(|b| b.state_diff.get(key)) + .find_map(|b| b.state_diff.get(&hashed_key)) { None => self.rocksdb.read_value(key), Some(value) => *value, diff --git a/core/lib/state/src/storage_overrides.rs b/core/lib/state/src/storage_overrides.rs new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/core/lib/state/src/storage_view.rs b/core/lib/state/src/storage_view.rs index 83d558887654..22bcdacbc547 100644 --- a/core/lib/state/src/storage_view.rs +++ b/core/lib/state/src/storage_view.rs @@ -45,17 +45,35 @@ pub struct StorageView { storage_handle: S, // Used for caching and to get the list/count of modified keys modified_storage_keys: HashMap, + cache: StorageViewCache, + metrics: StorageViewMetrics, +} + +/// `StorageViewCache` is a struct for caching storage reads and `contains_key()` checks. +#[derive(Debug, Default, Clone)] +pub struct StorageViewCache { // Used purely for caching read_storage_keys: HashMap, // Cache for `contains_key()` checks. The cache is only valid within one L1 batch execution. - initial_writes_cache: HashMap, - metrics: StorageViewMetrics, + initial_writes: HashMap, +} + +impl StorageViewCache { + /// Returns the read storage keys. + pub fn read_storage_keys(&self) -> HashMap { + self.read_storage_keys.clone() + } + + /// Returns the initial writes. + pub fn initial_writes(&self) -> HashMap { + self.initial_writes.clone() + } } impl StorageView { - /// Returns the modified storage keys - pub fn modified_storage_keys(&self) -> &HashMap { - &self.modified_storage_keys + /// Returns the underlying storage cache. + pub fn cache(&self) -> StorageViewCache { + self.cache.clone() } } @@ -90,8 +108,10 @@ impl StorageView { Self { storage_handle, modified_storage_keys: HashMap::new(), - read_storage_keys: HashMap::new(), - initial_writes_cache: HashMap::new(), + cache: StorageViewCache { + read_storage_keys: HashMap::new(), + initial_writes: HashMap::new(), + }, metrics: StorageViewMetrics::default(), } } @@ -102,10 +122,10 @@ impl StorageView { let cached_value = self .modified_storage_keys .get(key) - .or_else(|| self.read_storage_keys.get(key)); + .or_else(|| self.cache.read_storage_keys.get(key)); cached_value.copied().unwrap_or_else(|| { let value = self.storage_handle.read_value(key); - self.read_storage_keys.insert(*key, value); + self.cache.read_storage_keys.insert(*key, value); self.metrics.time_spent_on_storage_missed += started_at.elapsed(); self.metrics.storage_invocations_missed += 1; value @@ -114,8 +134,8 @@ impl StorageView { fn cache_size(&self) -> usize { self.modified_storage_keys.len() * mem::size_of::<(StorageKey, StorageValue)>() - + self.initial_writes_cache.len() * mem::size_of::<(StorageKey, bool)>() - + self.read_storage_keys.len() * mem::size_of::<(StorageKey, StorageValue)>() + + self.cache.initial_writes.len() * mem::size_of::<(StorageKey, bool)>() + + self.cache.read_storage_keys.len() * mem::size_of::<(StorageKey, StorageValue)>() } /// Returns the current metrics. @@ -153,11 +173,11 @@ impl ReadStorage for StorageView { /// Only keys contained in the underlying storage will return `false`. If a key was /// inserted using [`Self::set_value()`], it will still return `true`. fn is_write_initial(&mut self, key: &StorageKey) -> bool { - if let Some(&is_write_initial) = self.initial_writes_cache.get(key) { + if let Some(&is_write_initial) = self.cache.initial_writes.get(key) { is_write_initial } else { let is_write_initial = self.storage_handle.is_write_initial(key); - self.initial_writes_cache.insert(*key, is_write_initial); + self.cache.initial_writes.insert(*key, is_write_initial); is_write_initial } } @@ -173,7 +193,7 @@ impl ReadStorage for StorageView { impl WriteStorage for StorageView { fn read_storage_keys(&self) -> &HashMap { - &self.read_storage_keys + &self.cache.read_storage_keys } fn set_value(&mut self, key: StorageKey, value: StorageValue) -> StorageValue { diff --git a/core/lib/state/src/test_utils.rs b/core/lib/state/src/test_utils.rs index 52febc5040ee..1d1731bf0015 100644 --- a/core/lib/state/src/test_utils.rs +++ b/core/lib/state/src/test_utils.rs @@ -118,6 +118,7 @@ pub(crate) async fn create_l1_batch( let mut written_keys: Vec<_> = logs_for_initial_writes.iter().map(|log| log.key).collect(); written_keys.sort_unstable(); + let written_keys: Vec<_> = written_keys.iter().map(StorageKey::hashed_key).collect(); conn.storage_logs_dedup_dal() .insert_initial_writes(l1_batch_number, &written_keys) .await @@ -154,6 +155,7 @@ pub(crate) async fn prepare_postgres_for_snapshot_recovery( .unwrap(); let mut written_keys: Vec<_> = snapshot_storage_logs.iter().map(|log| log.key).collect(); written_keys.sort_unstable(); + let written_keys: Vec<_> = written_keys.iter().map(StorageKey::hashed_key).collect(); conn.storage_logs_dedup_dal() .insert_initial_writes(snapshot_recovery.l1_batch_number, &written_keys) .await diff --git a/core/lib/state/src/witness.rs b/core/lib/state/src/witness.rs new file mode 100644 index 000000000000..5965f3c11884 --- /dev/null +++ b/core/lib/state/src/witness.rs @@ -0,0 +1,44 @@ +use zksync_types::{witness_block_state::WitnessStorageState, StorageKey, StorageValue, H256}; + +use crate::ReadStorage; + +/// [`ReadStorage`] implementation backed by binary serialized [`WitnessHashBlockState`]. +/// Note that `load_factory_deps` is not used. +/// FactoryDeps data is used straight inside witness generator, loaded with the blob. +#[derive(Debug)] +pub struct WitnessStorage { + storage_state: WitnessStorageState, +} + +impl WitnessStorage { + /// Creates a new storage with the provided witness's block state. + pub fn new(storage_state: WitnessStorageState) -> Self { + Self { storage_state } + } +} + +impl ReadStorage for WitnessStorage { + fn read_value(&mut self, key: &StorageKey) -> StorageValue { + self.storage_state + .read_storage_key + .get(key) + .copied() + .unwrap_or_default() + } + + fn is_write_initial(&mut self, key: &StorageKey) -> bool { + self.storage_state + .is_write_initial + .get(key) + .copied() + .unwrap_or_default() + } + + fn load_factory_dep(&mut self, _hash: H256) -> Option> { + unreachable!("Factory deps should not be used in the witness storage") + } + + fn get_enumeration_index(&mut self, _key: &StorageKey) -> Option { + unreachable!("Enumeration index should not be used in the witness storage") + } +} diff --git a/core/lib/storage/Cargo.toml b/core/lib/storage/Cargo.toml index b04b4524ddd7..8c704476ce41 100644 --- a/core/lib/storage/Cargo.toml +++ b/core/lib/storage/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "zksync_storage" -version = "0.1.0" +description = "ZKsync RocksDB storage interfaces" +version.workspace = true edition.workspace = true authors.workspace = true homepage.workspace = true diff --git a/core/lib/tee_verifier/Cargo.toml b/core/lib/tee_verifier/Cargo.toml index 9b7230386665..0d50684e165d 100644 --- a/core/lib/tee_verifier/Cargo.toml +++ b/core/lib/tee_verifier/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "zksync_tee_verifier" -version = "0.1.0" +description = "ZKsync library for TEE verification" +version.workspace = true edition.workspace = true authors.workspace = true homepage.workspace = true @@ -9,16 +10,14 @@ license.workspace = true keywords.workspace = true categories.workspace = true -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - [dependencies] anyhow.workspace = true -multivm.workspace = true +zksync_multivm.workspace = true serde.workspace = true tracing.workspace = true -vm_utils.workspace = true +zksync_vm_utils.workspace = true zksync_config.workspace = true -zksync_crypto.workspace = true +zksync_crypto_primitives.workspace = true zksync_dal.workspace = true zksync_db_connection.workspace = true zksync_merkle_tree.workspace = true diff --git a/core/lib/tee_verifier/src/lib.rs b/core/lib/tee_verifier/src/lib.rs index 825b833bccc2..32443b60c8ca 100644 --- a/core/lib/tee_verifier/src/lib.rs +++ b/core/lib/tee_verifier/src/lib.rs @@ -7,60 +7,37 @@ use std::{cell::RefCell, rc::Rc}; use anyhow::Context; -use multivm::{ - interface::{FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmFactory, VmInterface}, +use zksync_crypto_primitives::hasher::blake2::Blake2Hasher; +use zksync_merkle_tree::{ + BlockOutputWithProofs, TreeInstruction, TreeLogEntry, TreeLogEntryWithProof, ValueHash, +}; +use zksync_multivm::{ + interface::{FinishedL1Batch, L2BlockEnv, VmFactory, VmInterface}, vm_latest::HistoryEnabled, VmInstance, }; -use serde::{Deserialize, Serialize}; -use vm_utils::execute_tx; -use zksync_crypto::hasher::blake2::Blake2Hasher; -use zksync_merkle_tree::{ - BlockOutputWithProofs, TreeInstruction, TreeLogEntry, TreeLogEntryWithProof, +use zksync_prover_interface::inputs::{ + StorageLogMetadata, V1TeeVerifierInput, WitnessInputMerklePaths, }; -use zksync_object_store::{serialize_using_bincode, Bucket, StoredObject}; -use zksync_prover_interface::inputs::{PrepareBasicCircuitsJob, StorageLogMetadata}; use zksync_state::{InMemoryStorage, ReadStorage, StorageView}; use zksync_types::{block::L2BlockExecutionData, L1BatchNumber, StorageLog, H256}; use zksync_utils::bytecode::hash_bytecode; - -/// Version 1 of the data used as input for the TEE verifier. -#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] -pub struct V1TeeVerifierInput { - prepare_basic_circuits_job: PrepareBasicCircuitsJob, - l2_blocks_execution_data: Vec, - l1_batch_env: L1BatchEnv, - system_env: SystemEnv, - used_contracts: Vec<(H256, Vec)>, +use zksync_vm_utils::execute_tx; + +/// A structure to hold the result of verification. +pub struct VerificationResult { + /// The root hash of the batch that was verified. + pub value_hash: ValueHash, + /// The batch number that was verified. + pub batch_number: L1BatchNumber, } -/// Data used as input for the TEE verifier. -#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] -#[non_exhaustive] -#[allow(clippy::large_enum_variant)] -pub enum TeeVerifierInput { - /// `V0` suppresses warning about irrefutable `let...else` pattern - V0, - V1(V1TeeVerifierInput), +/// A trait for the computations that can be verified in TEE. +pub trait Verify { + fn verify(self) -> anyhow::Result; } -impl TeeVerifierInput { - pub fn new( - prepare_basic_circuits_job: PrepareBasicCircuitsJob, - l2_blocks_execution_data: Vec, - l1_batch_env: L1BatchEnv, - system_env: SystemEnv, - used_contracts: Vec<(H256, Vec)>, - ) -> Self { - TeeVerifierInput::V1(V1TeeVerifierInput { - prepare_basic_circuits_job, - l2_blocks_execution_data, - l1_batch_env, - system_env, - used_contracts, - }) - } - +impl Verify for V1TeeVerifierInput { /// Verify that the L1Batch produces the expected root hash /// by executing the VM and verifying the merkle paths of all /// touch storage slots. @@ -69,22 +46,10 @@ impl TeeVerifierInput { /// /// Returns a verbose error of the failure, because any error is /// not actionable. - pub fn verify(self) -> anyhow::Result<()> { - let TeeVerifierInput::V1(V1TeeVerifierInput { - prepare_basic_circuits_job, - l2_blocks_execution_data, - l1_batch_env, - system_env, - used_contracts, - }) = self - else { - tracing::error!("TeeVerifierInput variant not supported"); - anyhow::bail!("TeeVerifierInput variant not supported"); - }; - - let old_root_hash = l1_batch_env.previous_batch_hash.unwrap(); - let l2_chain_id = system_env.chain_id; - let enumeration_index = prepare_basic_circuits_job.next_enumeration_index(); + fn verify(self) -> anyhow::Result { + let old_root_hash = self.l1_batch_env.previous_batch_hash.unwrap(); + let l2_chain_id = self.system_env.chain_id; + let enumeration_index = self.witness_input_merkle_paths.next_enumeration_index(); let mut raw_storage = InMemoryStorage::with_custom_system_contracts_and_chain_id( l2_chain_id, @@ -92,206 +57,199 @@ impl TeeVerifierInput { Vec::with_capacity(0), ); - for (hash, bytes) in used_contracts.into_iter() { + for (hash, bytes) in self.used_contracts.into_iter() { tracing::trace!("raw_storage.store_factory_dep({hash}, bytes)"); raw_storage.store_factory_dep(hash, bytes) } let block_output_with_proofs = - Self::get_bowp_and_set_initial_values(prepare_basic_circuits_job, &mut raw_storage); + get_bowp_and_set_initial_values(self.witness_input_merkle_paths, &mut raw_storage); let storage_view = Rc::new(RefCell::new(StorageView::new(&raw_storage))); - let vm = VmInstance::new(l1_batch_env, system_env, storage_view); + let batch_number = self.l1_batch_env.number; + let vm = VmInstance::new(self.l1_batch_env, self.system_env, storage_view); - let vm_out = Self::execute_vm(l2_blocks_execution_data, vm)?; + let vm_out = execute_vm(self.l2_blocks_execution_data, vm)?; let instructions: Vec = - Self::generate_tree_instructions(enumeration_index, &block_output_with_proofs, vm_out)?; + generate_tree_instructions(enumeration_index, &block_output_with_proofs, vm_out)?; block_output_with_proofs .verify_proofs(&Blake2Hasher, old_root_hash, &instructions) .context("Failed to verify_proofs {l1_batch_number} correctly!")?; - Ok(()) + Ok(VerificationResult { + value_hash: block_output_with_proofs.root_hash().unwrap(), + batch_number, + }) } +} - /// Sets the initial storage values and returns `BlockOutputWithProofs` - fn get_bowp_and_set_initial_values( - prepare_basic_circuits_job: PrepareBasicCircuitsJob, - raw_storage: &mut InMemoryStorage, - ) -> BlockOutputWithProofs { - let logs = prepare_basic_circuits_job - .into_merkle_paths() - .map( - |StorageLogMetadata { - root_hash, - merkle_paths, - is_write, - first_write, - leaf_enumeration_index, - value_read, - leaf_hashed_key: leaf_storage_key, - .. - }| { - let root_hash = root_hash.into(); - let merkle_path = merkle_paths.into_iter().map(|x| x.into()).collect(); - let base: TreeLogEntry = match (is_write, first_write, leaf_enumeration_index) { - (false, _, 0) => TreeLogEntry::ReadMissingKey, - (false, _, _) => { - // This is a special U256 here, which needs `to_little_endian` - let mut hashed_key = [0_u8; 32]; - leaf_storage_key.to_little_endian(&mut hashed_key); - raw_storage.set_value_hashed_enum( - hashed_key.into(), - leaf_enumeration_index, - value_read.into(), - ); - TreeLogEntry::Read { - leaf_index: leaf_enumeration_index, - value: value_read.into(), - } +/// Sets the initial storage values and returns `BlockOutputWithProofs` +fn get_bowp_and_set_initial_values( + witness_input_merkle_paths: WitnessInputMerklePaths, + raw_storage: &mut InMemoryStorage, +) -> BlockOutputWithProofs { + let logs = witness_input_merkle_paths + .into_merkle_paths() + .map( + |StorageLogMetadata { + root_hash, + merkle_paths, + is_write, + first_write, + leaf_enumeration_index, + value_read, + leaf_hashed_key: leaf_storage_key, + .. + }| { + let root_hash = root_hash.into(); + let merkle_path = merkle_paths.into_iter().map(|x| x.into()).collect(); + let base: TreeLogEntry = match (is_write, first_write, leaf_enumeration_index) { + (false, _, 0) => TreeLogEntry::ReadMissingKey, + (false, _, _) => { + // This is a special U256 here, which needs `to_little_endian` + let mut hashed_key = [0_u8; 32]; + leaf_storage_key.to_little_endian(&mut hashed_key); + raw_storage.set_value_hashed_enum( + hashed_key.into(), + leaf_enumeration_index, + value_read.into(), + ); + TreeLogEntry::Read { + leaf_index: leaf_enumeration_index, + value: value_read.into(), } - (true, true, _) => TreeLogEntry::Inserted, - (true, false, _) => { - // This is a special U256 here, which needs `to_little_endian` - let mut hashed_key = [0_u8; 32]; - leaf_storage_key.to_little_endian(&mut hashed_key); - raw_storage.set_value_hashed_enum( - hashed_key.into(), - leaf_enumeration_index, - value_read.into(), - ); - TreeLogEntry::Updated { - leaf_index: leaf_enumeration_index, - previous_value: value_read.into(), - } + } + (true, true, _) => TreeLogEntry::Inserted, + (true, false, _) => { + // This is a special U256 here, which needs `to_little_endian` + let mut hashed_key = [0_u8; 32]; + leaf_storage_key.to_little_endian(&mut hashed_key); + raw_storage.set_value_hashed_enum( + hashed_key.into(), + leaf_enumeration_index, + value_read.into(), + ); + TreeLogEntry::Updated { + leaf_index: leaf_enumeration_index, + previous_value: value_read.into(), } - }; - TreeLogEntryWithProof { - base, - merkle_path, - root_hash, } - }, - ) - .collect(); + }; + TreeLogEntryWithProof { + base, + merkle_path, + root_hash, + } + }, + ) + .collect(); - BlockOutputWithProofs { - logs, - leaf_count: 0, - } + BlockOutputWithProofs { + logs, + leaf_count: 0, } +} - /// Executes the VM and returns `FinishedL1Batch` on success. - fn execute_vm( - l2_blocks_execution_data: Vec, - mut vm: VmInstance, - ) -> anyhow::Result { - let next_l2_blocks_data = l2_blocks_execution_data.iter().skip(1); - - let l2_blocks_data = l2_blocks_execution_data.iter().zip(next_l2_blocks_data); +/// Executes the VM and returns `FinishedL1Batch` on success. +fn execute_vm( + l2_blocks_execution_data: Vec, + mut vm: VmInstance, +) -> anyhow::Result { + let next_l2_blocks_data = l2_blocks_execution_data.iter().skip(1); - for (l2_block_data, next_l2_block_data) in l2_blocks_data { - tracing::trace!( - "Started execution of l2_block: {:?}, executing {:?} transactions", - l2_block_data.number, - l2_block_data.txs.len(), - ); - for tx in &l2_block_data.txs { - tracing::trace!("Started execution of tx: {tx:?}"); - execute_tx(tx, &mut vm) - .context("failed to execute transaction in TeeVerifierInputProducer")?; - tracing::trace!("Finished execution of tx: {tx:?}"); - } - vm.start_new_l2_block(L2BlockEnv::from_l2_block_data(next_l2_block_data)); + let l2_blocks_data = l2_blocks_execution_data.iter().zip(next_l2_blocks_data); - tracing::trace!("Finished execution of l2_block: {:?}", l2_block_data.number); + for (l2_block_data, next_l2_block_data) in l2_blocks_data { + tracing::trace!( + "Started execution of l2_block: {:?}, executing {:?} transactions", + l2_block_data.number, + l2_block_data.txs.len(), + ); + for tx in &l2_block_data.txs { + tracing::trace!("Started execution of tx: {tx:?}"); + execute_tx(tx, &mut vm) + .context("failed to execute transaction in TeeVerifierInputProducer")?; + tracing::trace!("Finished execution of tx: {tx:?}"); } + vm.start_new_l2_block(L2BlockEnv::from_l2_block_data(next_l2_block_data)); - Ok(vm.finish_batch()) - } - - /// Map `LogQuery` and `TreeLogEntry` to a `TreeInstruction` - fn map_log_tree( - storage_log: &StorageLog, - tree_log_entry: &TreeLogEntry, - idx: &mut u64, - ) -> anyhow::Result { - let key = storage_log.key.hashed_key_u256(); - Ok(match (storage_log.is_write(), *tree_log_entry) { - (true, TreeLogEntry::Updated { leaf_index, .. }) => { - TreeInstruction::write(key, leaf_index, H256(storage_log.value.into())) - } - (true, TreeLogEntry::Inserted) => { - let leaf_index = *idx; - *idx += 1; - TreeInstruction::write(key, leaf_index, H256(storage_log.value.into())) - } - (false, TreeLogEntry::Read { value, .. }) => { - if storage_log.value != value { - tracing::error!( - "Failed to map LogQuery to TreeInstruction: {:#?} != {:#?}", - storage_log.value, - value - ); - anyhow::bail!( - "Failed to map LogQuery to TreeInstruction: {:#?} != {:#?}", - storage_log.value, - value - ); - } - TreeInstruction::Read(key) - } - (false, TreeLogEntry::ReadMissingKey { .. }) => TreeInstruction::Read(key), - _ => { - tracing::error!("Failed to map LogQuery to TreeInstruction"); - anyhow::bail!("Failed to map LogQuery to TreeInstruction"); - } - }) + tracing::trace!("Finished execution of l2_block: {:?}", l2_block_data.number); } - /// Generates the `TreeInstruction`s from the VM executions. - fn generate_tree_instructions( - mut idx: u64, - bowp: &BlockOutputWithProofs, - vm_out: FinishedL1Batch, - ) -> anyhow::Result> { - vm_out - .final_execution_state - .deduplicated_storage_logs - .into_iter() - .zip(bowp.logs.iter()) - .map(|(log_query, tree_log_entry)| { - Self::map_log_tree(&log_query, &tree_log_entry.base, &mut idx) - }) - .collect::, _>>() - } + Ok(vm.finish_batch()) } -impl StoredObject for TeeVerifierInput { - const BUCKET: Bucket = Bucket::TeeVerifierInput; - type Key<'a> = L1BatchNumber; - - fn encode_key(key: Self::Key<'_>) -> String { - format!("tee_verifier_input_for_l1_batch_{key}.bin") - } +/// Map `LogQuery` and `TreeLogEntry` to a `TreeInstruction` +fn map_log_tree( + storage_log: &StorageLog, + tree_log_entry: &TreeLogEntry, + idx: &mut u64, +) -> anyhow::Result { + let key = storage_log.key.hashed_key_u256(); + Ok(match (storage_log.is_write(), *tree_log_entry) { + (true, TreeLogEntry::Updated { leaf_index, .. }) => { + TreeInstruction::write(key, leaf_index, H256(storage_log.value.into())) + } + (true, TreeLogEntry::Inserted) => { + let leaf_index = *idx; + *idx += 1; + TreeInstruction::write(key, leaf_index, H256(storage_log.value.into())) + } + (false, TreeLogEntry::Read { value, .. }) => { + if storage_log.value != value { + tracing::error!( + "Failed to map LogQuery to TreeInstruction: {:#?} != {:#?}", + storage_log.value, + value + ); + anyhow::bail!( + "Failed to map LogQuery to TreeInstruction: {:#?} != {:#?}", + storage_log.value, + value + ); + } + TreeInstruction::Read(key) + } + (false, TreeLogEntry::ReadMissingKey { .. }) => TreeInstruction::Read(key), + _ => { + tracing::error!("Failed to map LogQuery to TreeInstruction"); + anyhow::bail!("Failed to map LogQuery to TreeInstruction"); + } + }) +} - serialize_using_bincode!(); +/// Generates the `TreeInstruction`s from the VM executions. +fn generate_tree_instructions( + mut idx: u64, + bowp: &BlockOutputWithProofs, + vm_out: FinishedL1Batch, +) -> anyhow::Result> { + vm_out + .final_execution_state + .deduplicated_storage_logs + .into_iter() + .zip(bowp.logs.iter()) + .map(|(log_query, tree_log_entry)| map_log_tree(&log_query, &tree_log_entry.base, &mut idx)) + .collect::, _>>() } #[cfg(test)] mod tests { - use multivm::interface::TxExecutionMode; use zksync_basic_types::U256; use zksync_contracts::{BaseSystemContracts, SystemContractCode}; + use zksync_multivm::interface::{L1BatchEnv, SystemEnv, TxExecutionMode}; + use zksync_object_store::StoredObject; + use zksync_prover_interface::inputs::TeeVerifierInput; use super::*; #[test] fn test_v1_serialization() { - let tvi = TeeVerifierInput::new( - PrepareBasicCircuitsJob::new(0), + let tvi = V1TeeVerifierInput::new( + WitnessInputMerklePaths::new(0), vec![], L1BatchEnv { previous_batch_hash: Some(H256([1; 32])), @@ -327,7 +285,7 @@ mod tests { }, vec![(H256([1; 32]), vec![0, 1, 2, 3, 4])], ); - + let tvi = TeeVerifierInput::new(tvi); let serialized = ::serialize(&tvi) .expect("Failed to serialize TeeVerifierInput."); let deserialized: TeeVerifierInput = diff --git a/core/lib/types/Cargo.toml b/core/lib/types/Cargo.toml index a562cccacbc1..c80f304a75a6 100644 --- a/core/lib/types/Cargo.toml +++ b/core/lib/types/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "zksync_types" -version = "0.1.0" +description = "Shared ZKsync types" +version.workspace = true edition.workspace = true authors.workspace = true homepage.workspace = true @@ -27,12 +28,14 @@ once_cell.workspace = true rlp.workspace = true serde.workspace = true serde_json.workspace = true +bigdecimal.workspace = true strum = { workspace = true, features = ["derive"] } thiserror.workspace = true num_enum.workspace = true hex.workspace = true prost.workspace = true itertools.workspace = true +tracing.workspace = true # Crypto stuff secp256k1.workspace = true diff --git a/core/lib/types/src/api/mod.rs b/core/lib/types/src/api/mod.rs index ce21a754c7aa..751de9bd7040 100644 --- a/core/lib/types/src/api/mod.rs +++ b/core/lib/types/src/api/mod.rs @@ -1,5 +1,6 @@ use chrono::{DateTime, Utc}; use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; +use serde_json::Value; use strum::Display; use zksync_basic_types::{ web3::{AccessList, Bytes, Index}, @@ -17,6 +18,7 @@ use crate::{ }; pub mod en; +pub mod state_override; /// Block Number #[derive(Copy, Clone, Debug, PartialEq, Display)] @@ -27,6 +29,8 @@ pub enum BlockNumber { Finalized, /// Latest sealed block Latest, + /// Last block that was committed on L1 + L1Committed, /// Earliest block (genesis) Earliest, /// Latest block (may be the block that is currently open). @@ -51,6 +55,7 @@ impl Serialize for BlockNumber { BlockNumber::Committed => serializer.serialize_str("committed"), BlockNumber::Finalized => serializer.serialize_str("finalized"), BlockNumber::Latest => serializer.serialize_str("latest"), + BlockNumber::L1Committed => serializer.serialize_str("l1_committed"), BlockNumber::Earliest => serializer.serialize_str("earliest"), BlockNumber::Pending => serializer.serialize_str("pending"), } @@ -73,6 +78,7 @@ impl<'de> Deserialize<'de> for BlockNumber { "committed" => BlockNumber::Committed, "finalized" => BlockNumber::Finalized, "latest" => BlockNumber::Latest, + "l1_committed" => BlockNumber::L1Committed, "earliest" => BlockNumber::Earliest, "pending" => BlockNumber::Pending, num => { @@ -438,6 +444,9 @@ pub struct Log { pub log_type: Option, /// Removed pub removed: Option, + /// L2 block timestamp + #[serde(rename = "blockTimestamp")] + pub block_timestamp: Option, } impl Log { @@ -817,6 +826,14 @@ pub struct ApiStorageLog { pub written_value: U256, } +/// Raw transaction execution data. +/// Data is taken from `TransactionExecutionMetrics`. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct TransactionExecutionInfo { + pub execution_info: Value, +} + #[cfg(test)] mod tests { use super::*; diff --git a/core/lib/types/src/api/state_override.rs b/core/lib/types/src/api/state_override.rs new file mode 100644 index 000000000000..a2497a65c533 --- /dev/null +++ b/core/lib/types/src/api/state_override.rs @@ -0,0 +1,208 @@ +use std::collections::HashMap; + +use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; +use zksync_basic_types::{web3::Bytes, H256, U256}; +use zksync_utils::bytecode::{hash_bytecode, validate_bytecode, InvalidBytecodeError}; + +use crate::Address; + +/// Collection of overridden accounts. +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +pub struct StateOverride(HashMap); + +impl StateOverride { + /// Wraps the provided account overrides. + pub fn new(state: HashMap) -> Self { + Self(state) + } + + /// Gets overrides for the specified account. + pub fn get(&self, address: &Address) -> Option<&OverrideAccount> { + self.0.get(address) + } + + /// Iterates over all account overrides. + pub fn iter(&self) -> impl Iterator + '_ { + self.0.iter() + } +} + +/// Serialized bytecode representation. +#[derive(Debug, Clone, PartialEq)] +pub struct Bytecode(Bytes); + +impl Bytecode { + pub fn new(bytes: Vec) -> Result { + validate_bytecode(&bytes)?; + Ok(Self(Bytes(bytes))) + } + + /// Returns the canonical hash of this bytecode. + pub fn hash(&self) -> H256 { + hash_bytecode(&self.0 .0) + } + + /// Converts this bytecode into bytes. + pub fn into_bytes(self) -> Vec { + self.0 .0 + } +} + +impl Serialize for Bytecode { + fn serialize(&self, serializer: S) -> Result { + self.0.serialize(serializer) + } +} + +impl<'de> Deserialize<'de> for Bytecode { + fn deserialize>(deserializer: D) -> Result { + let bytes = Bytes::deserialize(deserializer)?; + validate_bytecode(&bytes.0).map_err(de::Error::custom)?; + Ok(Self(bytes)) + } +} + +/// Account override for `eth_estimateGas`. +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +#[cfg_attr(test, derive(PartialEq))] +#[serde(rename_all = "camelCase")] +pub struct OverrideAccount { + pub balance: Option, + pub nonce: Option, + pub code: Option, + #[serde(flatten, deserialize_with = "state_deserializer")] + pub state: Option, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[cfg_attr(test, derive(PartialEq))] +#[serde(rename_all = "camelCase")] +pub enum OverrideState { + State(HashMap), + StateDiff(HashMap), +} + +fn state_deserializer<'de, D>(deserializer: D) -> Result, D::Error> +where + D: Deserializer<'de>, +{ + let val = serde_json::Value::deserialize(deserializer)?; + let state: Option> = match val.get("state") { + Some(val) => serde_json::from_value(val.clone()).map_err(de::Error::custom)?, + None => None, + }; + let state_diff: Option> = match val.get("stateDiff") { + Some(val) => serde_json::from_value(val.clone()).map_err(de::Error::custom)?, + None => None, + }; + + match (state, state_diff) { + (Some(state), None) => Ok(Some(OverrideState::State(state))), + (None, Some(state_diff)) => Ok(Some(OverrideState::StateDiff(state_diff))), + (None, None) => Ok(None), + _ => Err(de::Error::custom( + "Both 'state' and 'stateDiff' cannot be set simultaneously", + )), + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn deserializing_bytecode() { + let bytecode_str = "0x0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"; + let json = serde_json::Value::String(bytecode_str.to_owned()); + let bytecode: Bytecode = serde_json::from_value(json).unwrap(); + assert_ne!(bytecode.hash(), H256::zero()); + let bytecode = bytecode.into_bytes(); + assert_eq!(bytecode.len(), 32); + assert_eq!(bytecode[0], 0x01); + assert_eq!(bytecode[31], 0xef); + } + + #[test] + fn deserializing_invalid_bytecode() { + let invalid_bytecodes = [ + "1234", // not 0x-prefixed + "0x1234", // length not divisible by 32 + "0x0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef\ + 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef", // even number of words + ]; + for bytecode_str in invalid_bytecodes { + let json = serde_json::Value::String(bytecode_str.to_owned()); + serde_json::from_value::(json).unwrap_err(); + } + + let long_bytecode = String::from("0x") + + &"0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef".repeat(65_537); + let json = serde_json::Value::String(long_bytecode); + serde_json::from_value::(json).unwrap_err(); + } + + #[test] + fn deserializing_state_override() { + let json = serde_json::json!({ + "0x0123456789abcdef0123456789abcdef01234567": { + "balance": "0x123", + "nonce": "0x1", + }, + "0x123456789abcdef0123456789abcdef012345678": { + "stateDiff": { + "0x0000000000000000000000000000000000000000000000000000000000000000": + "0x0000000000000000000000000000000000000000000000000000000000000001", + "0x0000000000000000000000000000000000000000000000000000000000000001": + "0x0000000000000000000000000000000000000000000000000000000000000002", + } + } + }); + + let state_override: StateOverride = serde_json::from_value(json).unwrap(); + assert_eq!(state_override.0.len(), 2); + + let first_address: Address = "0x0123456789abcdef0123456789abcdef01234567" + .parse() + .unwrap(); + let first_override = &state_override.0[&first_address]; + assert_eq!( + *first_override, + OverrideAccount { + balance: Some(0x123.into()), + nonce: Some(1.into()), + ..OverrideAccount::default() + } + ); + + let second_address: Address = "0x123456789abcdef0123456789abcdef012345678" + .parse() + .unwrap(); + let second_override = &state_override.0[&second_address]; + assert_eq!( + *second_override, + OverrideAccount { + state: Some(OverrideState::StateDiff(HashMap::from([ + (H256::from_low_u64_be(0), H256::from_low_u64_be(1)), + (H256::from_low_u64_be(1), H256::from_low_u64_be(2)), + ]))), + ..OverrideAccount::default() + } + ); + } + + #[test] + fn deserializing_bogus_account_override() { + let json = serde_json::json!({ + "state": { + "0x0000000000000000000000000000000000000000000000000000000000000001": + "0x0000000000000000000000000000000000000000000000000000000000000002", + }, + "stateDiff": { + "0x0000000000000000000000000000000000000000000000000000000000000000": + "0x0000000000000000000000000000000000000000000000000000000000000001", + }, + }); + let err = serde_json::from_value::(json).unwrap_err(); + assert!(err.to_string().contains("'state' and 'stateDiff'"), "{err}"); + } +} diff --git a/core/lib/types/src/base_token_ratio.rs b/core/lib/types/src/base_token_ratio.rs new file mode 100644 index 000000000000..019a84dcb706 --- /dev/null +++ b/core/lib/types/src/base_token_ratio.rs @@ -0,0 +1,32 @@ +use std::num::NonZeroU64; + +use chrono::{DateTime, Utc}; + +/// Represents the base token to ETH conversion ratio at a given point in time. +#[derive(Debug, Clone)] +pub struct BaseTokenRatio { + pub id: u32, + pub ratio_timestamp: DateTime, + pub numerator: NonZeroU64, + pub denominator: NonZeroU64, + pub used_in_l1: bool, +} + +/// Struct to represent API response containing denominator, numerator, and timestamp. +#[derive(Debug, Clone, Copy, PartialEq)] +pub struct BaseTokenAPIRatio { + pub numerator: NonZeroU64, + pub denominator: NonZeroU64, + // Either the timestamp of the quote or the timestamp of the request. + pub ratio_timestamp: DateTime, +} + +impl Default for BaseTokenAPIRatio { + fn default() -> Self { + Self { + numerator: NonZeroU64::new(1).unwrap(), + denominator: NonZeroU64::new(1).unwrap(), + ratio_timestamp: Utc::now(), + } + } +} diff --git a/core/lib/types/src/block.rs b/core/lib/types/src/block.rs index 221b9b4d63ff..bc13bed457bf 100644 --- a/core/lib/types/src/block.rs +++ b/core/lib/types/src/block.rs @@ -30,6 +30,14 @@ impl DeployedContract { } } +/// Holder for l1 batches data, used in eth sender metrics +pub struct L1BatchStatistics { + pub number: L1BatchNumber, + pub timestamp: u64, + pub l2_tx_count: u32, + pub l1_tx_count: u32, +} + /// Holder for the block metadata that is not available from transactions themselves. #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct L1BatchHeader { diff --git a/core/lib/types/src/commitment/mod.rs b/core/lib/types/src/commitment/mod.rs index 61c2d7b5ea27..63d1bad486f3 100644 --- a/core/lib/types/src/commitment/mod.rs +++ b/core/lib/types/src/commitment/mod.rs @@ -536,7 +536,7 @@ pub struct L1BatchCommitment { pub meta_parameters: L1BatchMetaParameters, } -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Default, Debug, Clone, PartialEq, Eq)] #[cfg_attr(test, derive(Serialize, Deserialize))] pub struct L1BatchCommitmentHash { pub pass_through_data: H256, @@ -720,7 +720,7 @@ impl CommitmentInput { } } -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Default, Debug, Clone, PartialEq, Eq)] pub struct L1BatchCommitmentArtifacts { pub commitment_hash: L1BatchCommitmentHash, pub l2_l1_merkle_root: H256, diff --git a/core/lib/types/src/commitment/tests/post_boojum_1_4_1_test.json b/core/lib/types/src/commitment/tests/post_boojum_1_4_1_test.json index ab260f4011de..c5eccbce038a 100644 --- a/core/lib/types/src/commitment/tests/post_boojum_1_4_1_test.json +++ b/core/lib/types/src/commitment/tests/post_boojum_1_4_1_test.json @@ -81,38 +81,9 @@ "address": "0x000000000000000000000000000000000000800a", "key": "0x1", "derived_key": [ - 113, - 233, - 23, - 33, - 249, - 145, - 133, - 118, - 215, - 96, - 240, - 47, - 3, - 202, - 196, - 124, - 111, - 64, - 3, - 49, - 96, - 49, - 132, - 142, - 60, - 29, - 153, - 230, - 232, - 58, - 71, - 67 + 113, 233, 23, 33, 249, 145, 133, 118, 215, 96, 240, 47, 3, 202, 196, + 124, 111, 64, 3, 49, 96, 49, 132, 142, 60, 29, 153, 230, 232, 58, + 71, 67 ], "enumeration_index": 49, "initial_value": "0x18776f28c303800", @@ -122,38 +93,9 @@ "address": "0x000000000000000000000000000000000000800a", "key": "0x294a00337abeee2b3cd948ffeed92231e2a3acc2eb11210400e0aa9557f23e26", "derived_key": [ - 45, - 90, - 105, - 98, - 204, - 206, - 229, - 212, - 173, - 180, - 138, - 54, - 187, - 191, - 68, - 58, - 83, - 23, - 33, - 72, - 67, - 129, - 18, - 89, - 55, - 243, - 0, - 26, - 197, - 255, - 135, - 91 + 45, 90, 105, 98, 204, 206, 229, 212, 173, 180, 138, 54, 187, 191, + 68, 58, 83, 23, 33, 72, 67, 129, 18, 89, 55, 243, 0, 26, 197, 255, + 135, 91 ], "enumeration_index": 50, "initial_value": "0xf5559e28fd66c0", @@ -163,38 +105,9 @@ "address": "0x000000000000000000000000000000000000800a", "key": "0xeaa2b2fbf0b42c559059e5e9510edc15755f1c1883f0e41d5ba5f9aea4ac201a", "derived_key": [ - 141, - 97, - 126, - 192, - 90, - 203, - 191, - 95, - 226, - 69, - 41, - 166, - 75, - 35, - 133, - 169, - 106, - 173, - 67, - 240, - 155, - 225, - 173, - 169, - 44, - 112, - 64, - 49, - 220, - 193, - 72, - 27 + 141, 97, 126, 192, 90, 203, 191, 95, 226, 69, 41, 166, 75, 35, 133, + 169, 106, 173, 67, 240, 155, 225, 173, 169, 44, 112, 64, 49, 220, + 193, 72, 27 ], "enumeration_index": 0, "initial_value": "0x0", @@ -204,38 +117,9 @@ "address": "0x000000000000000000000000000000000000800b", "key": "0x7", "derived_key": [ - 18, - 59, - 175, - 197, - 134, - 247, - 119, - 100, - 72, - 140, - 210, - 76, - 106, - 119, - 84, - 110, - 90, - 15, - 232, - 189, - 251, - 79, - 162, - 3, - 207, - 175, - 252, - 54, - 204, - 228, - 221, - 91 + 18, 59, 175, 197, 134, 247, 119, 100, 72, 140, 210, 76, 106, 119, + 84, 110, 90, 15, 232, 189, 251, 79, 162, 3, 207, 175, 252, 54, 204, + 228, 221, 91 ], "enumeration_index": 53, "initial_value": "0x100000000000000000000000065c22e3e", @@ -245,38 +129,9 @@ "address": "0x000000000000000000000000000000000000800b", "key": "0x9", "derived_key": [ - 142, - 125, - 208, - 106, - 197, - 183, - 59, - 71, - 59, - 230, - 188, - 90, - 81, - 3, - 15, - 76, - 116, - 55, - 101, - 124, - 183, - 178, - 155, - 243, - 118, - 197, - 100, - 184, - 209, - 103, - 90, - 94 + 142, 125, 208, 106, 197, 183, 59, 71, 59, 230, 188, 90, 81, 3, 15, + 76, 116, 55, 101, 124, 183, 178, 155, 243, 118, 197, 100, 184, 209, + 103, 90, 94 ], "enumeration_index": 54, "initial_value": "0x200000000000000000000000065c22e3f", @@ -286,38 +141,9 @@ "address": "0x000000000000000000000000000000000000800b", "key": "0xd", "derived_key": [ - 235, - 221, - 239, - 221, - 164, - 142, - 178, - 170, - 127, - 102, - 236, - 247, - 148, - 10, - 40, - 14, - 158, - 243, - 251, - 46, - 149, - 219, - 9, - 149, - 83, - 132, - 64, - 166, - 42, - 247, - 152, - 97 + 235, 221, 239, 221, 164, 142, 178, 170, 127, 102, 236, 247, 148, 10, + 40, 14, 158, 243, 251, 46, 149, 219, 9, 149, 83, 132, 64, 166, 42, + 247, 152, 97 ], "enumeration_index": 0, "initial_value": "0x0", @@ -327,38 +153,9 @@ "address": "0x000000000000000000000000000000000000800b", "key": "0xe", "derived_key": [ - 70, - 64, - 215, - 56, - 69, - 54, - 78, - 198, - 145, - 246, - 222, - 251, - 96, - 106, - 58, - 114, - 253, - 165, - 215, - 173, - 51, - 209, - 125, - 4, - 153, - 90, - 142, - 37, - 44, - 74, - 6, - 216 + 70, 64, 215, 56, 69, 54, 78, 198, 145, 246, 222, 251, 96, 106, 58, + 114, 253, 165, 215, 173, 51, 209, 125, 4, 153, 90, 142, 37, 44, 74, + 6, 216 ], "enumeration_index": 0, "initial_value": "0x0", @@ -368,38 +165,9 @@ "address": "0x000000000000000000000000000000000000800b", "key": "0x10c", "derived_key": [ - 121, - 9, - 53, - 136, - 208, - 232, - 71, - 239, - 167, - 58, - 16, - 206, - 32, - 228, - 121, - 159, - 177, - 228, - 102, - 66, - 214, - 86, - 23, - 199, - 229, - 33, - 63, - 160, - 73, - 137, - 217, - 45 + 121, 9, 53, 136, 208, 232, 71, 239, 167, 58, 16, 206, 32, 228, 121, + 159, 177, 228, 102, 66, 214, 86, 23, 199, 229, 33, 63, 160, 73, 137, + 217, 45 ], "enumeration_index": 57, "initial_value": "0x200000000000000000000000065c22e3f", @@ -409,38 +177,9 @@ "address": "0x000000000000000000000000000000000000800b", "key": "0xad67d757c34507f157cacfa2e3153e9f260a2244f30428821be7be64587ac55f", "derived_key": [ - 12, - 194, - 74, - 180, - 47, - 190, - 197, - 49, - 125, - 155, - 26, - 44, - 164, - 124, - 169, - 185, - 59, - 158, - 195, - 109, - 121, - 142, - 253, - 124, - 218, - 167, - 57, - 36, - 22, - 48, - 203, - 70 + 12, 194, 74, 180, 47, 190, 197, 49, 125, 155, 26, 44, 164, 124, 169, + 185, 59, 158, 195, 109, 121, 142, 253, 124, 218, 167, 57, 36, 22, + 48, 203, 70 ], "enumeration_index": 0, "initial_value": "0x0", @@ -451,7 +190,10 @@ "events_queue_commitment": "0x6193a5098eb140796387bdf40700a3855eeb010474b5478f30bf917172c67883", "bootloader_initial_content_commitment": "0xf031b4491c37f20516c4ebf428f4765156409f67089e64772f4106fd2d9f3351" }, - "blob_commitments": ["0x0000000000000000000000000000000000000000000000000000000000000000", "0x0000000000000000000000000000000000000000000000000000000000000000"] + "blob_commitments": [ + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000" + ] } }, "pass_through_data": { @@ -480,347 +222,40 @@ }, "system_logs_linear_hash": "0x3fc3c24217a2f1e09715eb3fa07327bec6818799a847175174ae027525519eb6", "state_diffs_compressed": [ - 1, - 0, - 1, - 72, - 4, - 0, - 4, - 141, - 97, - 126, - 192, - 90, - 203, - 191, - 95, - 226, - 69, - 41, - 166, - 75, - 35, - 133, - 169, - 106, - 173, - 67, - 240, - 155, - 225, - 173, - 169, - 44, - 112, - 64, - 49, - 220, - 193, - 72, - 27, - 65, - 111, - 5, - 225, - 147, - 53, - 50, - 134, - 160, - 235, - 221, - 239, - 221, - 164, - 142, - 178, - 170, - 127, - 102, - 236, - 247, - 148, - 10, - 40, - 14, - 158, - 243, - 251, - 46, - 149, - 219, - 9, - 149, - 83, - 132, - 64, - 166, - 42, - 247, - 152, - 97, - 0, - 235, - 190, - 96, - 156, - 211, - 204, - 209, - 31, - 39, - 62, - 185, - 67, - 116, - 214, - 211, - 162, - 247, - 133, - 108, - 95, - 16, - 57, - 220, - 72, - 119, - 198, - 163, - 52, - 24, - 138, - 199, - 193, - 70, - 64, - 215, - 56, - 69, - 54, - 78, - 198, - 145, - 246, - 222, - 251, - 96, - 106, - 58, - 114, - 253, - 165, - 215, - 173, - 51, - 209, - 125, - 4, - 153, - 90, - 142, - 37, - 44, - 74, - 6, - 216, - 0, - 112, - 142, - 127, - 207, - 104, - 235, - 171, - 108, - 135, - 50, - 38, - 134, - 202, - 196, - 188, - 219, - 95, - 43, - 212, - 199, - 31, - 51, - 123, - 24, - 209, - 71, - 253, - 154, - 108, - 68, - 173, - 19, - 12, - 194, - 74, - 180, - 47, - 190, - 197, - 49, - 125, - 155, - 26, - 44, - 164, - 124, - 169, - 185, - 59, - 158, - 195, - 109, - 121, - 142, - 253, - 124, - 218, - 167, - 57, - 36, - 22, - 48, - 203, - 70, - 0, - 85, - 97, - 141, - 181, - 255, - 36, - 174, - 228, - 210, - 54, - 146, - 27, - 111, - 66, - 114, - 16, - 17, - 97, - 19, - 113, - 21, - 163, - 180, - 196, - 166, - 95, - 134, - 119, - 177, - 36, - 192, - 28, - 0, - 0, - 0, - 49, - 65, - 111, - 6, - 45, - 144, - 62, - 129, - 207, - 96, - 0, - 0, - 0, - 50, - 49, - 75, - 253, - 9, - 79, - 72, - 192, - 0, - 0, - 0, - 53, - 137, - 1, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 1, - 66, - 0, - 0, - 0, - 54, - 137, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 1, - 66, - 0, - 0, - 0, - 57, - 137, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 1, - 66 + 1, 0, 1, 72, 4, 0, 4, 141, 97, 126, 192, 90, 203, 191, 95, 226, 69, 41, + 166, 75, 35, 133, 169, 106, 173, 67, 240, 155, 225, 173, 169, 44, 112, + 64, 49, 220, 193, 72, 27, 65, 111, 5, 225, 147, 53, 50, 134, 160, 235, + 221, 239, 221, 164, 142, 178, 170, 127, 102, 236, 247, 148, 10, 40, 14, + 158, 243, 251, 46, 149, 219, 9, 149, 83, 132, 64, 166, 42, 247, 152, 97, + 0, 235, 190, 96, 156, 211, 204, 209, 31, 39, 62, 185, 67, 116, 214, 211, + 162, 247, 133, 108, 95, 16, 57, 220, 72, 119, 198, 163, 52, 24, 138, + 199, 193, 70, 64, 215, 56, 69, 54, 78, 198, 145, 246, 222, 251, 96, 106, + 58, 114, 253, 165, 215, 173, 51, 209, 125, 4, 153, 90, 142, 37, 44, 74, + 6, 216, 0, 112, 142, 127, 207, 104, 235, 171, 108, 135, 50, 38, 134, + 202, 196, 188, 219, 95, 43, 212, 199, 31, 51, 123, 24, 209, 71, 253, + 154, 108, 68, 173, 19, 12, 194, 74, 180, 47, 190, 197, 49, 125, 155, 26, + 44, 164, 124, 169, 185, 59, 158, 195, 109, 121, 142, 253, 124, 218, 167, + 57, 36, 22, 48, 203, 70, 0, 85, 97, 141, 181, 255, 36, 174, 228, 210, + 54, 146, 27, 111, 66, 114, 16, 17, 97, 19, 113, 21, 163, 180, 196, 166, + 95, 134, 119, 177, 36, 192, 28, 0, 0, 0, 49, 65, 111, 6, 45, 144, 62, + 129, 207, 96, 0, 0, 0, 50, 49, 75, 253, 9, 79, 72, 192, 0, 0, 0, 53, + 137, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 66, 0, 0, 0, 54, + 137, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 66, 0, 0, 0, 57, + 137, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 66 ], "state_diffs_hash": "0xb18f72a4a5b4b8ce1b7e41095fb1332a211a140376bcc2607910875d236708e0", "aux_commitments": { "events_queue_commitment": "0x6193a5098eb140796387bdf40700a3855eeb010474b5478f30bf917172c67883", "bootloader_initial_content_commitment": "0xf031b4491c37f20516c4ebf428f4765156409f67089e64772f4106fd2d9f3351" }, - "blob_linear_hashes": ["0x0000000000000000000000000000000000000000000000000000000000000000", "0x0000000000000000000000000000000000000000000000000000000000000000"], - "blob_commitments": ["0x0000000000000000000000000000000000000000000000000000000000000000", "0x0000000000000000000000000000000000000000000000000000000000000000"] + "blob_linear_hashes": [ + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000" + ], + "blob_commitments": [ + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000" + ] } }, "hashes": { diff --git a/core/lib/types/src/commitment/tests/post_boojum_1_4_2_test.json b/core/lib/types/src/commitment/tests/post_boojum_1_4_2_test.json index 726874949ad8..4983bbeca143 100644 --- a/core/lib/types/src/commitment/tests/post_boojum_1_4_2_test.json +++ b/core/lib/types/src/commitment/tests/post_boojum_1_4_2_test.json @@ -97,38 +97,9 @@ "address": "0x000000000000000000000000000000000000800a", "key": "0x1", "derived_key": [ - 113, - 233, - 23, - 33, - 249, - 145, - 133, - 118, - 215, - 96, - 240, - 47, - 3, - 202, - 196, - 124, - 111, - 64, - 3, - 49, - 96, - 49, - 132, - 142, - 60, - 29, - 153, - 230, - 232, - 58, - 71, - 67 + 113, 233, 23, 33, 249, 145, 133, 118, 215, 96, 240, 47, 3, 202, 196, + 124, 111, 64, 3, 49, 96, 49, 132, 142, 60, 29, 153, 230, 232, 58, + 71, 67 ], "enumeration_index": 49, "initial_value": "0x18776f28c303800", @@ -138,38 +109,9 @@ "address": "0x000000000000000000000000000000000000800a", "key": "0x294a00337abeee2b3cd948ffeed92231e2a3acc2eb11210400e0aa9557f23e26", "derived_key": [ - 45, - 90, - 105, - 98, - 204, - 206, - 229, - 212, - 173, - 180, - 138, - 54, - 187, - 191, - 68, - 58, - 83, - 23, - 33, - 72, - 67, - 129, - 18, - 89, - 55, - 243, - 0, - 26, - 197, - 255, - 135, - 91 + 45, 90, 105, 98, 204, 206, 229, 212, 173, 180, 138, 54, 187, 191, + 68, 58, 83, 23, 33, 72, 67, 129, 18, 89, 55, 243, 0, 26, 197, 255, + 135, 91 ], "enumeration_index": 50, "initial_value": "0xf5559e28fd66c0", @@ -179,38 +121,9 @@ "address": "0x000000000000000000000000000000000000800a", "key": "0xeaa2b2fbf0b42c559059e5e9510edc15755f1c1883f0e41d5ba5f9aea4ac201a", "derived_key": [ - 141, - 97, - 126, - 192, - 90, - 203, - 191, - 95, - 226, - 69, - 41, - 166, - 75, - 35, - 133, - 169, - 106, - 173, - 67, - 240, - 155, - 225, - 173, - 169, - 44, - 112, - 64, - 49, - 220, - 193, - 72, - 27 + 141, 97, 126, 192, 90, 203, 191, 95, 226, 69, 41, 166, 75, 35, 133, + 169, 106, 173, 67, 240, 155, 225, 173, 169, 44, 112, 64, 49, 220, + 193, 72, 27 ], "enumeration_index": 0, "initial_value": "0x0", @@ -220,38 +133,9 @@ "address": "0x000000000000000000000000000000000000800b", "key": "0x7", "derived_key": [ - 18, - 59, - 175, - 197, - 134, - 247, - 119, - 100, - 72, - 140, - 210, - 76, - 106, - 119, - 84, - 110, - 90, - 15, - 232, - 189, - 251, - 79, - 162, - 3, - 207, - 175, - 252, - 54, - 204, - 228, - 221, - 91 + 18, 59, 175, 197, 134, 247, 119, 100, 72, 140, 210, 76, 106, 119, + 84, 110, 90, 15, 232, 189, 251, 79, 162, 3, 207, 175, 252, 54, 204, + 228, 221, 91 ], "enumeration_index": 53, "initial_value": "0x100000000000000000000000065c22e3e", @@ -261,38 +145,9 @@ "address": "0x000000000000000000000000000000000000800b", "key": "0x9", "derived_key": [ - 142, - 125, - 208, - 106, - 197, - 183, - 59, - 71, - 59, - 230, - 188, - 90, - 81, - 3, - 15, - 76, - 116, - 55, - 101, - 124, - 183, - 178, - 155, - 243, - 118, - 197, - 100, - 184, - 209, - 103, - 90, - 94 + 142, 125, 208, 106, 197, 183, 59, 71, 59, 230, 188, 90, 81, 3, 15, + 76, 116, 55, 101, 124, 183, 178, 155, 243, 118, 197, 100, 184, 209, + 103, 90, 94 ], "enumeration_index": 54, "initial_value": "0x200000000000000000000000065c22e3f", @@ -302,38 +157,9 @@ "address": "0x000000000000000000000000000000000000800b", "key": "0xd", "derived_key": [ - 235, - 221, - 239, - 221, - 164, - 142, - 178, - 170, - 127, - 102, - 236, - 247, - 148, - 10, - 40, - 14, - 158, - 243, - 251, - 46, - 149, - 219, - 9, - 149, - 83, - 132, - 64, - 166, - 42, - 247, - 152, - 97 + 235, 221, 239, 221, 164, 142, 178, 170, 127, 102, 236, 247, 148, 10, + 40, 14, 158, 243, 251, 46, 149, 219, 9, 149, 83, 132, 64, 166, 42, + 247, 152, 97 ], "enumeration_index": 0, "initial_value": "0x0", @@ -343,38 +169,9 @@ "address": "0x000000000000000000000000000000000000800b", "key": "0xe", "derived_key": [ - 70, - 64, - 215, - 56, - 69, - 54, - 78, - 198, - 145, - 246, - 222, - 251, - 96, - 106, - 58, - 114, - 253, - 165, - 215, - 173, - 51, - 209, - 125, - 4, - 153, - 90, - 142, - 37, - 44, - 74, - 6, - 216 + 70, 64, 215, 56, 69, 54, 78, 198, 145, 246, 222, 251, 96, 106, 58, + 114, 253, 165, 215, 173, 51, 209, 125, 4, 153, 90, 142, 37, 44, 74, + 6, 216 ], "enumeration_index": 0, "initial_value": "0x0", @@ -384,38 +181,9 @@ "address": "0x000000000000000000000000000000000000800b", "key": "0x10c", "derived_key": [ - 121, - 9, - 53, - 136, - 208, - 232, - 71, - 239, - 167, - 58, - 16, - 206, - 32, - 228, - 121, - 159, - 177, - 228, - 102, - 66, - 214, - 86, - 23, - 199, - 229, - 33, - 63, - 160, - 73, - 137, - 217, - 45 + 121, 9, 53, 136, 208, 232, 71, 239, 167, 58, 16, 206, 32, 228, 121, + 159, 177, 228, 102, 66, 214, 86, 23, 199, 229, 33, 63, 160, 73, 137, + 217, 45 ], "enumeration_index": 57, "initial_value": "0x200000000000000000000000065c22e3f", @@ -425,38 +193,9 @@ "address": "0x000000000000000000000000000000000000800b", "key": "0xad67d757c34507f157cacfa2e3153e9f260a2244f30428821be7be64587ac55f", "derived_key": [ - 12, - 194, - 74, - 180, - 47, - 190, - 197, - 49, - 125, - 155, - 26, - 44, - 164, - 124, - 169, - 185, - 59, - 158, - 195, - 109, - 121, - 142, - 253, - 124, - 218, - 167, - 57, - 36, - 22, - 48, - 203, - 70 + 12, 194, 74, 180, 47, 190, 197, 49, 125, 155, 26, 44, 164, 124, 169, + 185, 59, 158, 195, 109, 121, 142, 253, 124, 218, 167, 57, 36, 22, + 48, 203, 70 ], "enumeration_index": 0, "initial_value": "0x0", @@ -467,7 +206,10 @@ "events_queue_commitment": "0x6193a5098eb140796387bdf40700a3855eeb010474b5478f30bf917172c67883", "bootloader_initial_content_commitment": "0xf031b4491c37f20516c4ebf428f4765156409f67089e64772f4106fd2d9f3351" }, - "blob_commitments": ["0x0000000000000000000000000000000000000000000000000000000000000001", "0x0000000000000000000000000000000000000000000000000000000000000002"] + "blob_commitments": [ + "0x0000000000000000000000000000000000000000000000000000000000000001", + "0x0000000000000000000000000000000000000000000000000000000000000002" + ] } }, "pass_through_data": { @@ -496,347 +238,40 @@ }, "system_logs_linear_hash": "0xc559d154f69af74a0017e2380afa3a861822cf47bc5b99e3a76f7fc4de6cca09", "state_diffs_compressed": [ - 1, - 0, - 1, - 72, - 4, - 0, - 4, - 141, - 97, - 126, - 192, - 90, - 203, - 191, - 95, - 226, - 69, - 41, - 166, - 75, - 35, - 133, - 169, - 106, - 173, - 67, - 240, - 155, - 225, - 173, - 169, - 44, - 112, - 64, - 49, - 220, - 193, - 72, - 27, - 65, - 111, - 5, - 225, - 147, - 53, - 50, - 134, - 160, - 235, - 221, - 239, - 221, - 164, - 142, - 178, - 170, - 127, - 102, - 236, - 247, - 148, - 10, - 40, - 14, - 158, - 243, - 251, - 46, - 149, - 219, - 9, - 149, - 83, - 132, - 64, - 166, - 42, - 247, - 152, - 97, - 0, - 235, - 190, - 96, - 156, - 211, - 204, - 209, - 31, - 39, - 62, - 185, - 67, - 116, - 214, - 211, - 162, - 247, - 133, - 108, - 95, - 16, - 57, - 220, - 72, - 119, - 198, - 163, - 52, - 24, - 138, - 199, - 193, - 70, - 64, - 215, - 56, - 69, - 54, - 78, - 198, - 145, - 246, - 222, - 251, - 96, - 106, - 58, - 114, - 253, - 165, - 215, - 173, - 51, - 209, - 125, - 4, - 153, - 90, - 142, - 37, - 44, - 74, - 6, - 216, - 0, - 112, - 142, - 127, - 207, - 104, - 235, - 171, - 108, - 135, - 50, - 38, - 134, - 202, - 196, - 188, - 219, - 95, - 43, - 212, - 199, - 31, - 51, - 123, - 24, - 209, - 71, - 253, - 154, - 108, - 68, - 173, - 19, - 12, - 194, - 74, - 180, - 47, - 190, - 197, - 49, - 125, - 155, - 26, - 44, - 164, - 124, - 169, - 185, - 59, - 158, - 195, - 109, - 121, - 142, - 253, - 124, - 218, - 167, - 57, - 36, - 22, - 48, - 203, - 70, - 0, - 85, - 97, - 141, - 181, - 255, - 36, - 174, - 228, - 210, - 54, - 146, - 27, - 111, - 66, - 114, - 16, - 17, - 97, - 19, - 113, - 21, - 163, - 180, - 196, - 166, - 95, - 134, - 119, - 177, - 36, - 192, - 28, - 0, - 0, - 0, - 49, - 65, - 111, - 6, - 45, - 144, - 62, - 129, - 207, - 96, - 0, - 0, - 0, - 50, - 49, - 75, - 253, - 9, - 79, - 72, - 192, - 0, - 0, - 0, - 53, - 137, - 1, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 1, - 66, - 0, - 0, - 0, - 54, - 137, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 1, - 66, - 0, - 0, - 0, - 57, - 137, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 1, - 66 + 1, 0, 1, 72, 4, 0, 4, 141, 97, 126, 192, 90, 203, 191, 95, 226, 69, 41, + 166, 75, 35, 133, 169, 106, 173, 67, 240, 155, 225, 173, 169, 44, 112, + 64, 49, 220, 193, 72, 27, 65, 111, 5, 225, 147, 53, 50, 134, 160, 235, + 221, 239, 221, 164, 142, 178, 170, 127, 102, 236, 247, 148, 10, 40, 14, + 158, 243, 251, 46, 149, 219, 9, 149, 83, 132, 64, 166, 42, 247, 152, 97, + 0, 235, 190, 96, 156, 211, 204, 209, 31, 39, 62, 185, 67, 116, 214, 211, + 162, 247, 133, 108, 95, 16, 57, 220, 72, 119, 198, 163, 52, 24, 138, + 199, 193, 70, 64, 215, 56, 69, 54, 78, 198, 145, 246, 222, 251, 96, 106, + 58, 114, 253, 165, 215, 173, 51, 209, 125, 4, 153, 90, 142, 37, 44, 74, + 6, 216, 0, 112, 142, 127, 207, 104, 235, 171, 108, 135, 50, 38, 134, + 202, 196, 188, 219, 95, 43, 212, 199, 31, 51, 123, 24, 209, 71, 253, + 154, 108, 68, 173, 19, 12, 194, 74, 180, 47, 190, 197, 49, 125, 155, 26, + 44, 164, 124, 169, 185, 59, 158, 195, 109, 121, 142, 253, 124, 218, 167, + 57, 36, 22, 48, 203, 70, 0, 85, 97, 141, 181, 255, 36, 174, 228, 210, + 54, 146, 27, 111, 66, 114, 16, 17, 97, 19, 113, 21, 163, 180, 196, 166, + 95, 134, 119, 177, 36, 192, 28, 0, 0, 0, 49, 65, 111, 6, 45, 144, 62, + 129, 207, 96, 0, 0, 0, 50, 49, 75, 253, 9, 79, 72, 192, 0, 0, 0, 53, + 137, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 66, 0, 0, 0, 54, + 137, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 66, 0, 0, 0, 57, + 137, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 66 ], "state_diffs_hash": "0xb18f72a4a5b4b8ce1b7e41095fb1332a211a140376bcc2607910875d236708e0", "aux_commitments": { "events_queue_commitment": "0x6193a5098eb140796387bdf40700a3855eeb010474b5478f30bf917172c67883", "bootloader_initial_content_commitment": "0xf031b4491c37f20516c4ebf428f4765156409f67089e64772f4106fd2d9f3351" }, - "blob_linear_hashes": ["0x0000000000000000000000000000000000000000000000000000000000000003", "0x0000000000000000000000000000000000000000000000000000000000000004"], - "blob_commitments": ["0x0000000000000000000000000000000000000000000000000000000000000001", "0x0000000000000000000000000000000000000000000000000000000000000002"] + "blob_linear_hashes": [ + "0x0000000000000000000000000000000000000000000000000000000000000003", + "0x0000000000000000000000000000000000000000000000000000000000000004" + ], + "blob_commitments": [ + "0x0000000000000000000000000000000000000000000000000000000000000001", + "0x0000000000000000000000000000000000000000000000000000000000000002" + ] } }, "hashes": { diff --git a/core/lib/types/src/commitment/tests/post_boojum_1_5_0_test.json b/core/lib/types/src/commitment/tests/post_boojum_1_5_0_test.json index 506110c6bcce..59a24b7c90ce 100644 --- a/core/lib/types/src/commitment/tests/post_boojum_1_5_0_test.json +++ b/core/lib/types/src/commitment/tests/post_boojum_1_5_0_test.json @@ -129,38 +129,9 @@ "address": "0x000000000000000000000000000000000000800a", "key": "0x1", "derived_key": [ - 113, - 233, - 23, - 33, - 249, - 145, - 133, - 118, - 215, - 96, - 240, - 47, - 3, - 202, - 196, - 124, - 111, - 64, - 3, - 49, - 96, - 49, - 132, - 142, - 60, - 29, - 153, - 230, - 232, - 58, - 71, - 67 + 113, 233, 23, 33, 249, 145, 133, 118, 215, 96, 240, 47, 3, 202, 196, + 124, 111, 64, 3, 49, 96, 49, 132, 142, 60, 29, 153, 230, 232, 58, + 71, 67 ], "enumeration_index": 49, "initial_value": "0x18776f28c303800", @@ -170,38 +141,9 @@ "address": "0x000000000000000000000000000000000000800a", "key": "0x294a00337abeee2b3cd948ffeed92231e2a3acc2eb11210400e0aa9557f23e26", "derived_key": [ - 45, - 90, - 105, - 98, - 204, - 206, - 229, - 212, - 173, - 180, - 138, - 54, - 187, - 191, - 68, - 58, - 83, - 23, - 33, - 72, - 67, - 129, - 18, - 89, - 55, - 243, - 0, - 26, - 197, - 255, - 135, - 91 + 45, 90, 105, 98, 204, 206, 229, 212, 173, 180, 138, 54, 187, 191, + 68, 58, 83, 23, 33, 72, 67, 129, 18, 89, 55, 243, 0, 26, 197, 255, + 135, 91 ], "enumeration_index": 50, "initial_value": "0xf5559e28fd66c0", @@ -211,38 +153,9 @@ "address": "0x000000000000000000000000000000000000800a", "key": "0xeaa2b2fbf0b42c559059e5e9510edc15755f1c1883f0e41d5ba5f9aea4ac201a", "derived_key": [ - 141, - 97, - 126, - 192, - 90, - 203, - 191, - 95, - 226, - 69, - 41, - 166, - 75, - 35, - 133, - 169, - 106, - 173, - 67, - 240, - 155, - 225, - 173, - 169, - 44, - 112, - 64, - 49, - 220, - 193, - 72, - 27 + 141, 97, 126, 192, 90, 203, 191, 95, 226, 69, 41, 166, 75, 35, 133, + 169, 106, 173, 67, 240, 155, 225, 173, 169, 44, 112, 64, 49, 220, + 193, 72, 27 ], "enumeration_index": 0, "initial_value": "0x0", @@ -252,38 +165,9 @@ "address": "0x000000000000000000000000000000000000800b", "key": "0x7", "derived_key": [ - 18, - 59, - 175, - 197, - 134, - 247, - 119, - 100, - 72, - 140, - 210, - 76, - 106, - 119, - 84, - 110, - 90, - 15, - 232, - 189, - 251, - 79, - 162, - 3, - 207, - 175, - 252, - 54, - 204, - 228, - 221, - 91 + 18, 59, 175, 197, 134, 247, 119, 100, 72, 140, 210, 76, 106, 119, + 84, 110, 90, 15, 232, 189, 251, 79, 162, 3, 207, 175, 252, 54, 204, + 228, 221, 91 ], "enumeration_index": 53, "initial_value": "0x100000000000000000000000065c22e3e", @@ -293,38 +177,9 @@ "address": "0x000000000000000000000000000000000000800b", "key": "0x9", "derived_key": [ - 142, - 125, - 208, - 106, - 197, - 183, - 59, - 71, - 59, - 230, - 188, - 90, - 81, - 3, - 15, - 76, - 116, - 55, - 101, - 124, - 183, - 178, - 155, - 243, - 118, - 197, - 100, - 184, - 209, - 103, - 90, - 94 + 142, 125, 208, 106, 197, 183, 59, 71, 59, 230, 188, 90, 81, 3, 15, + 76, 116, 55, 101, 124, 183, 178, 155, 243, 118, 197, 100, 184, 209, + 103, 90, 94 ], "enumeration_index": 54, "initial_value": "0x200000000000000000000000065c22e3f", @@ -334,38 +189,9 @@ "address": "0x000000000000000000000000000000000000800b", "key": "0xd", "derived_key": [ - 235, - 221, - 239, - 221, - 164, - 142, - 178, - 170, - 127, - 102, - 236, - 247, - 148, - 10, - 40, - 14, - 158, - 243, - 251, - 46, - 149, - 219, - 9, - 149, - 83, - 132, - 64, - 166, - 42, - 247, - 152, - 97 + 235, 221, 239, 221, 164, 142, 178, 170, 127, 102, 236, 247, 148, 10, + 40, 14, 158, 243, 251, 46, 149, 219, 9, 149, 83, 132, 64, 166, 42, + 247, 152, 97 ], "enumeration_index": 0, "initial_value": "0x0", @@ -375,38 +201,9 @@ "address": "0x000000000000000000000000000000000000800b", "key": "0xe", "derived_key": [ - 70, - 64, - 215, - 56, - 69, - 54, - 78, - 198, - 145, - 246, - 222, - 251, - 96, - 106, - 58, - 114, - 253, - 165, - 215, - 173, - 51, - 209, - 125, - 4, - 153, - 90, - 142, - 37, - 44, - 74, - 6, - 216 + 70, 64, 215, 56, 69, 54, 78, 198, 145, 246, 222, 251, 96, 106, 58, + 114, 253, 165, 215, 173, 51, 209, 125, 4, 153, 90, 142, 37, 44, 74, + 6, 216 ], "enumeration_index": 0, "initial_value": "0x0", @@ -416,38 +213,9 @@ "address": "0x000000000000000000000000000000000000800b", "key": "0x10c", "derived_key": [ - 121, - 9, - 53, - 136, - 208, - 232, - 71, - 239, - 167, - 58, - 16, - 206, - 32, - 228, - 121, - 159, - 177, - 228, - 102, - 66, - 214, - 86, - 23, - 199, - 229, - 33, - 63, - 160, - 73, - 137, - 217, - 45 + 121, 9, 53, 136, 208, 232, 71, 239, 167, 58, 16, 206, 32, 228, 121, + 159, 177, 228, 102, 66, 214, 86, 23, 199, 229, 33, 63, 160, 73, 137, + 217, 45 ], "enumeration_index": 57, "initial_value": "0x200000000000000000000000065c22e3f", @@ -457,38 +225,9 @@ "address": "0x000000000000000000000000000000000000800b", "key": "0xad67d757c34507f157cacfa2e3153e9f260a2244f30428821be7be64587ac55f", "derived_key": [ - 12, - 194, - 74, - 180, - 47, - 190, - 197, - 49, - 125, - 155, - 26, - 44, - 164, - 124, - 169, - 185, - 59, - 158, - 195, - 109, - 121, - 142, - 253, - 124, - 218, - 167, - 57, - 36, - 22, - 48, - 203, - 70 + 12, 194, 74, 180, 47, 190, 197, 49, 125, 155, 26, 44, 164, 124, 169, + 185, 59, 158, 195, 109, 121, 142, 253, 124, 218, 167, 57, 36, 22, + 48, 203, 70 ], "enumeration_index": 0, "initial_value": "0x0", @@ -499,7 +238,24 @@ "events_queue_commitment": "0x6193a5098eb140796387bdf40700a3855eeb010474b5478f30bf917172c67883", "bootloader_initial_content_commitment": "0xf031b4491c37f20516c4ebf428f4765156409f67089e64772f4106fd2d9f3351" }, - "blob_commitments": ["0x0000000000000000000000000000000000000000000000000000000000000001", "0x0000000000000000000000000000000000000000000000000000000000000002", "0x0000000000000000000000000000000000000000000000000000000000000003", "0x0000000000000000000000000000000000000000000000000000000000000004", "0x0000000000000000000000000000000000000000000000000000000000000005", "0x0000000000000000000000000000000000000000000000000000000000000006", "0x0000000000000000000000000000000000000000000000000000000000000000", "0x0000000000000000000000000000000000000000000000000000000000000000", "0x0000000000000000000000000000000000000000000000000000000000000000", "0x0000000000000000000000000000000000000000000000000000000000000000", "0x0000000000000000000000000000000000000000000000000000000000000000", "0x0000000000000000000000000000000000000000000000000000000000000000", "0x0000000000000000000000000000000000000000000000000000000000000000", "0x0000000000000000000000000000000000000000000000000000000000000000", "0x0000000000000000000000000000000000000000000000000000000000000000", "0x0000000000000000000000000000000000000000000000000000000000000000"] + "blob_commitments": [ + "0x0000000000000000000000000000000000000000000000000000000000000001", + "0x0000000000000000000000000000000000000000000000000000000000000002", + "0x0000000000000000000000000000000000000000000000000000000000000003", + "0x0000000000000000000000000000000000000000000000000000000000000004", + "0x0000000000000000000000000000000000000000000000000000000000000005", + "0x0000000000000000000000000000000000000000000000000000000000000006", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000" + ] } }, "pass_through_data": { @@ -528,347 +284,68 @@ }, "system_logs_linear_hash": "0x602dacc0a26e3347f0679924c4ae151ff5200e7dd80902fe0fc11c806c4d3ffb", "state_diffs_compressed": [ - 1, - 0, - 1, - 72, - 4, - 0, - 4, - 141, - 97, - 126, - 192, - 90, - 203, - 191, - 95, - 226, - 69, - 41, - 166, - 75, - 35, - 133, - 169, - 106, - 173, - 67, - 240, - 155, - 225, - 173, - 169, - 44, - 112, - 64, - 49, - 220, - 193, - 72, - 27, - 65, - 111, - 5, - 225, - 147, - 53, - 50, - 134, - 160, - 235, - 221, - 239, - 221, - 164, - 142, - 178, - 170, - 127, - 102, - 236, - 247, - 148, - 10, - 40, - 14, - 158, - 243, - 251, - 46, - 149, - 219, - 9, - 149, - 83, - 132, - 64, - 166, - 42, - 247, - 152, - 97, - 0, - 235, - 190, - 96, - 156, - 211, - 204, - 209, - 31, - 39, - 62, - 185, - 67, - 116, - 214, - 211, - 162, - 247, - 133, - 108, - 95, - 16, - 57, - 220, - 72, - 119, - 198, - 163, - 52, - 24, - 138, - 199, - 193, - 70, - 64, - 215, - 56, - 69, - 54, - 78, - 198, - 145, - 246, - 222, - 251, - 96, - 106, - 58, - 114, - 253, - 165, - 215, - 173, - 51, - 209, - 125, - 4, - 153, - 90, - 142, - 37, - 44, - 74, - 6, - 216, - 0, - 112, - 142, - 127, - 207, - 104, - 235, - 171, - 108, - 135, - 50, - 38, - 134, - 202, - 196, - 188, - 219, - 95, - 43, - 212, - 199, - 31, - 51, - 123, - 24, - 209, - 71, - 253, - 154, - 108, - 68, - 173, - 19, - 12, - 194, - 74, - 180, - 47, - 190, - 197, - 49, - 125, - 155, - 26, - 44, - 164, - 124, - 169, - 185, - 59, - 158, - 195, - 109, - 121, - 142, - 253, - 124, - 218, - 167, - 57, - 36, - 22, - 48, - 203, - 70, - 0, - 85, - 97, - 141, - 181, - 255, - 36, - 174, - 228, - 210, - 54, - 146, - 27, - 111, - 66, - 114, - 16, - 17, - 97, - 19, - 113, - 21, - 163, - 180, - 196, - 166, - 95, - 134, - 119, - 177, - 36, - 192, - 28, - 0, - 0, - 0, - 49, - 65, - 111, - 6, - 45, - 144, - 62, - 129, - 207, - 96, - 0, - 0, - 0, - 50, - 49, - 75, - 253, - 9, - 79, - 72, - 192, - 0, - 0, - 0, - 53, - 137, - 1, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 1, - 66, - 0, - 0, - 0, - 54, - 137, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 1, - 66, - 0, - 0, - 0, - 57, - 137, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 1, - 66 + 1, 0, 1, 72, 4, 0, 4, 141, 97, 126, 192, 90, 203, 191, 95, 226, 69, 41, + 166, 75, 35, 133, 169, 106, 173, 67, 240, 155, 225, 173, 169, 44, 112, + 64, 49, 220, 193, 72, 27, 65, 111, 5, 225, 147, 53, 50, 134, 160, 235, + 221, 239, 221, 164, 142, 178, 170, 127, 102, 236, 247, 148, 10, 40, 14, + 158, 243, 251, 46, 149, 219, 9, 149, 83, 132, 64, 166, 42, 247, 152, 97, + 0, 235, 190, 96, 156, 211, 204, 209, 31, 39, 62, 185, 67, 116, 214, 211, + 162, 247, 133, 108, 95, 16, 57, 220, 72, 119, 198, 163, 52, 24, 138, + 199, 193, 70, 64, 215, 56, 69, 54, 78, 198, 145, 246, 222, 251, 96, 106, + 58, 114, 253, 165, 215, 173, 51, 209, 125, 4, 153, 90, 142, 37, 44, 74, + 6, 216, 0, 112, 142, 127, 207, 104, 235, 171, 108, 135, 50, 38, 134, + 202, 196, 188, 219, 95, 43, 212, 199, 31, 51, 123, 24, 209, 71, 253, + 154, 108, 68, 173, 19, 12, 194, 74, 180, 47, 190, 197, 49, 125, 155, 26, + 44, 164, 124, 169, 185, 59, 158, 195, 109, 121, 142, 253, 124, 218, 167, + 57, 36, 22, 48, 203, 70, 0, 85, 97, 141, 181, 255, 36, 174, 228, 210, + 54, 146, 27, 111, 66, 114, 16, 17, 97, 19, 113, 21, 163, 180, 196, 166, + 95, 134, 119, 177, 36, 192, 28, 0, 0, 0, 49, 65, 111, 6, 45, 144, 62, + 129, 207, 96, 0, 0, 0, 50, 49, 75, 253, 9, 79, 72, 192, 0, 0, 0, 53, + 137, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 66, 0, 0, 0, 54, + 137, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 66, 0, 0, 0, 57, + 137, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 66 ], "state_diffs_hash": "0xb18f72a4a5b4b8ce1b7e41095fb1332a211a140376bcc2607910875d236708e0", "aux_commitments": { "events_queue_commitment": "0x6193a5098eb140796387bdf40700a3855eeb010474b5478f30bf917172c67883", "bootloader_initial_content_commitment": "0xf031b4491c37f20516c4ebf428f4765156409f67089e64772f4106fd2d9f3351" }, - "blob_linear_hashes": ["0x0000000000000000000000000000000000000000000000000000000000000003", "0x0000000000000000000000000000000000000000000000000000000000000004", "0x0000000000000000000000000000000000000000000000000000000000000005", "0x0000000000000000000000000000000000000000000000000000000000000006", "0x0000000000000000000000000000000000000000000000000000000000000007", "0x0000000000000000000000000000000000000000000000000000000000000008","0x0000000000000000000000000000000000000000000000000000000000000000", "0x0000000000000000000000000000000000000000000000000000000000000000", "0x0000000000000000000000000000000000000000000000000000000000000000", "0x0000000000000000000000000000000000000000000000000000000000000000", "0x0000000000000000000000000000000000000000000000000000000000000000", "0x0000000000000000000000000000000000000000000000000000000000000000", "0x0000000000000000000000000000000000000000000000000000000000000000", "0x0000000000000000000000000000000000000000000000000000000000000000", "0x0000000000000000000000000000000000000000000000000000000000000000", "0x0000000000000000000000000000000000000000000000000000000000000000"], - "blob_commitments": ["0x0000000000000000000000000000000000000000000000000000000000000001", "0x0000000000000000000000000000000000000000000000000000000000000002", "0x0000000000000000000000000000000000000000000000000000000000000003", "0x0000000000000000000000000000000000000000000000000000000000000004", "0x0000000000000000000000000000000000000000000000000000000000000005", "0x0000000000000000000000000000000000000000000000000000000000000006", "0x0000000000000000000000000000000000000000000000000000000000000000", "0x0000000000000000000000000000000000000000000000000000000000000000", "0x0000000000000000000000000000000000000000000000000000000000000000", "0x0000000000000000000000000000000000000000000000000000000000000000", "0x0000000000000000000000000000000000000000000000000000000000000000", "0x0000000000000000000000000000000000000000000000000000000000000000", "0x0000000000000000000000000000000000000000000000000000000000000000", "0x0000000000000000000000000000000000000000000000000000000000000000", "0x0000000000000000000000000000000000000000000000000000000000000000", "0x0000000000000000000000000000000000000000000000000000000000000000"] + "blob_linear_hashes": [ + "0x0000000000000000000000000000000000000000000000000000000000000003", + "0x0000000000000000000000000000000000000000000000000000000000000004", + "0x0000000000000000000000000000000000000000000000000000000000000005", + "0x0000000000000000000000000000000000000000000000000000000000000006", + "0x0000000000000000000000000000000000000000000000000000000000000007", + "0x0000000000000000000000000000000000000000000000000000000000000008", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000" + ], + "blob_commitments": [ + "0x0000000000000000000000000000000000000000000000000000000000000001", + "0x0000000000000000000000000000000000000000000000000000000000000002", + "0x0000000000000000000000000000000000000000000000000000000000000003", + "0x0000000000000000000000000000000000000000000000000000000000000004", + "0x0000000000000000000000000000000000000000000000000000000000000005", + "0x0000000000000000000000000000000000000000000000000000000000000006", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000" + ] } }, "hashes": { diff --git a/core/lib/types/src/event/mod.rs b/core/lib/types/src/event/mod.rs index 055b41d77c7d..81e796097249 100644 --- a/core/lib/types/src/event/mod.rs +++ b/core/lib/types/src/event/mod.rs @@ -58,6 +58,7 @@ impl From<&VmEvent> for Log { transaction_log_index: None, log_type: None, removed: Some(false), + block_timestamp: None, } } } diff --git a/core/lib/types/src/fee_model.rs b/core/lib/types/src/fee_model.rs index 9c2cc4d2aaf8..38d785113e5f 100644 --- a/core/lib/types/src/fee_model.rs +++ b/core/lib/types/src/fee_model.rs @@ -1,3 +1,6 @@ +use std::num::NonZeroU64; + +use bigdecimal::{BigDecimal, ToPrimitive}; use serde::{Deserialize, Serialize}; use zksync_config::configs::chain::{FeeModelVersion, StateKeeperConfig}; use zksync_system_constants::L1_GAS_PER_PUBDATA_BYTE; @@ -236,9 +239,86 @@ pub struct FeeParamsV1 { #[derive(Debug, Clone, Copy, Serialize, Deserialize)] pub struct FeeParamsV2 { - pub config: FeeModelConfigV2, - pub l1_gas_price: u64, - pub l1_pubdata_price: u64, + config: FeeModelConfigV2, + l1_gas_price: u64, + l1_pubdata_price: u64, + conversion_ratio: BaseTokenConversionRatio, +} + +impl FeeParamsV2 { + pub fn new( + config: FeeModelConfigV2, + l1_gas_price: u64, + l1_pubdata_price: u64, + conversion_ratio: BaseTokenConversionRatio, + ) -> Self { + Self { + config, + l1_gas_price, + l1_pubdata_price, + conversion_ratio, + } + } + + /// Returns the fee model config with the minimal L2 gas price denominated in the chain's base token (WEI or equivalent). + pub fn config(&self) -> FeeModelConfigV2 { + FeeModelConfigV2 { + minimal_l2_gas_price: self.convert_to_base_token(self.config.minimal_l2_gas_price), + ..self.config + } + } + + /// Returns the l1 gas price denominated in the chain's base token (WEI or equivalent). + pub fn l1_gas_price(&self) -> u64 { + self.convert_to_base_token(self.l1_gas_price) + } + + /// Returns the l1 pubdata price denominated in the chain's base token (WEI or equivalent). + pub fn l1_pubdata_price(&self) -> u64 { + self.convert_to_base_token(self.l1_pubdata_price) + } + + /// Converts the fee param to the base token. + fn convert_to_base_token(&self, price_in_wei: u64) -> u64 { + let conversion_ratio = BigDecimal::from(self.conversion_ratio.numerator.get()) + / BigDecimal::from(self.conversion_ratio.denominator.get()); + let converted_price_bd = BigDecimal::from(price_in_wei) * conversion_ratio; + + // Match on the converted price to ensure it can be represented as a u64 + match converted_price_bd.to_u64() { + Some(converted_price) => converted_price, + None => { + if converted_price_bd > BigDecimal::from(u64::MAX) { + tracing::warn!( + "Conversion to base token price failed: converted price is too large: {}. Using u64::MAX instead.", + converted_price_bd + ); + } else { + panic!( + "Conversion to base token price failed: converted price is not a valid u64: {}", + converted_price_bd + ); + } + u64::MAX + } + } + } +} + +/// The struct that represents the BaseToken<->ETH conversion ratio. +#[derive(Debug, Clone, Copy, Serialize, Deserialize)] +pub struct BaseTokenConversionRatio { + pub numerator: NonZeroU64, + pub denominator: NonZeroU64, +} + +impl Default for BaseTokenConversionRatio { + fn default() -> Self { + Self { + numerator: NonZeroU64::new(1).unwrap(), + denominator: NonZeroU64::new(1).unwrap(), + } + } } #[derive(Debug, Clone, Copy, Serialize, Deserialize)] diff --git a/core/lib/types/src/lib.rs b/core/lib/types/src/lib.rs index 3c3a96c297d7..105d43aa6c6c 100644 --- a/core/lib/types/src/lib.rs +++ b/core/lib/types/src/lib.rs @@ -57,6 +57,7 @@ pub mod vm_trace; pub mod zk_evm_types; pub mod api; +pub mod base_token_ratio; pub mod eth_sender; pub mod helpers; pub mod proto; diff --git a/core/lib/types/src/proto/mod.proto b/core/lib/types/src/proto/mod.proto index 163215bb1237..1e2ae0ede515 100644 --- a/core/lib/types/src/proto/mod.proto +++ b/core/lib/types/src/proto/mod.proto @@ -7,8 +7,12 @@ message SnapshotStorageLogsChunk { } message SnapshotStorageLog { - optional bytes account_address = 1; // required; H160 - optional bytes storage_key = 2; // required; H256 + // `account_address` and `storage_key` fields are obsolete and are not used in the new snapshot format; + // `hashed_key` is used instead. The fields are retained for now to support recovery from old snapshots. + optional bytes account_address = 1; // optional; H160 + optional bytes storage_key = 2; // optional; H256 + optional bytes hashed_key = 6; // optional; H256 + optional bytes storage_value = 3; // required; H256 optional uint32 l1_batch_number_of_initial_write = 4; // required optional uint64 enumeration_index = 5; // required diff --git a/core/lib/types/src/protocol_upgrade.rs b/core/lib/types/src/protocol_upgrade.rs index c0d7267ebfae..674996260204 100644 --- a/core/lib/types/src/protocol_upgrade.rs +++ b/core/lib/types/src/protocol_upgrade.rs @@ -10,7 +10,7 @@ use zksync_basic_types::{ }; use zksync_contracts::{ BaseSystemContractsHashes, ADMIN_EXECUTE_UPGRADE_FUNCTION, - ADMIN_UPGRADE_CHAIN_FROM_VERSION_FUNCTION, + ADMIN_UPGRADE_CHAIN_FROM_VERSION_FUNCTION, DIAMOND_CUT, }; use zksync_utils::h256_to_u256; @@ -28,10 +28,6 @@ pub struct Call { pub value: U256, /// The calldata to be executed on the `target` address. pub data: Vec, - /// Hash of the corresponding Ethereum transaction. Size should be 32 bytes. - pub eth_hash: H256, - /// Block in which Ethereum transaction was included. - pub eth_block: u64, } impl std::fmt::Debug for Call { @@ -40,8 +36,6 @@ impl std::fmt::Debug for Call { .field("target", &self.target) .field("value", &self.value) .field("data", &hex::encode(&self.data)) - .field("eth_hash", &self.eth_hash) - .field("eth_block", &self.eth_block) .finish() } } @@ -99,8 +93,17 @@ impl From for VerifierParams { } impl ProtocolUpgrade { + pub fn try_from_diamond_cut(diamond_cut_data: &[u8]) -> anyhow::Result { + // Unwraps are safe because we have validated the input against the function signature. + let diamond_cut_tokens = DIAMOND_CUT.decode_input(diamond_cut_data)?[0] + .clone() + .into_tuple() + .unwrap(); + Self::try_from_init_calldata(&diamond_cut_tokens[2].clone().into_bytes().unwrap()) + } + /// `l1-contracts/contracts/state-transition/libraries/diamond.sol:DiamondCutData.initCalldata` - fn try_from_init_calldata(init_calldata: &[u8], eth_block: u64) -> anyhow::Result { + fn try_from_init_calldata(init_calldata: &[u8]) -> anyhow::Result { let upgrade = ethabi::decode( &[abi::ProposedUpgrade::schema()], init_calldata.get(4..).context("need >= 4 bytes")?, @@ -124,7 +127,7 @@ impl ProtocolUpgrade { Transaction::try_from(abi::Transaction::L1 { tx: upgrade.l2_protocol_upgrade_tx, factory_deps: upgrade.factory_deps, - eth_block, + eth_block: 0, }) .context("Transaction::try_from()")? .try_into() @@ -148,10 +151,7 @@ pub fn decode_set_chain_id_event( protocol_version, Transaction::try_from(abi::Transaction::L1 { tx: tx.into(), - eth_block: event - .block_number - .expect("Event block number is missing") - .as_u64(), + eth_block: 0, factory_deps: vec![], }) .unwrap() @@ -199,7 +199,6 @@ impl TryFrom for ProtocolUpgrade { ProtocolUpgrade::try_from_init_calldata( // Unwrap is safe because we have validated the input against the function signature. &diamond_cut_tokens[2].clone().into_bytes().unwrap(), - call.eth_block, ) .context("ProtocolUpgrade::try_from_init_calldata()") } @@ -226,14 +225,6 @@ impl TryFrom for GovernanceOperation { // Extract `GovernanceOperation` data. let mut decoded_governance_operation = decoded.remove(1).into_tuple().unwrap(); - let eth_hash = event - .transaction_hash - .expect("Event transaction hash is missing"); - let eth_block = event - .block_number - .expect("Event block number is missing") - .as_u64(); - let calls = decoded_governance_operation.remove(0).into_array().unwrap(); let predecessor = H256::from_slice( &decoded_governance_operation @@ -260,8 +251,6 @@ impl TryFrom for GovernanceOperation { .unwrap(), value: decoded_governance_operation.remove(0).into_uint().unwrap(), data: decoded_governance_operation.remove(0).into_bytes().unwrap(), - eth_hash, - eth_block, } }) .collect(); @@ -486,6 +475,7 @@ mod tests { transaction_log_index: Default::default(), log_type: Default::default(), removed: Default::default(), + block_timestamp: Default::default(), }; let decoded_op: GovernanceOperation = correct_log.clone().try_into().unwrap(); assert_eq!(decoded_op.calls.len(), 1); diff --git a/core/lib/types/src/pubdata_da.rs b/core/lib/types/src/pubdata_da.rs index 8f7d3a96f55e..6705fdc29530 100644 --- a/core/lib/types/src/pubdata_da.rs +++ b/core/lib/types/src/pubdata_da.rs @@ -1,5 +1,7 @@ +use chrono::{DateTime, Utc}; use num_enum::TryFromPrimitive; use serde::{Deserialize, Serialize}; +use zksync_basic_types::L1BatchNumber; use zksync_config::configs::eth_sender::PubdataSendingMode; /// Enum holding the current values used for DA Layers. @@ -7,8 +9,12 @@ use zksync_config::configs::eth_sender::PubdataSendingMode; #[derive(Debug, Clone, Copy, Deserialize, PartialEq, Serialize)] #[derive(TryFromPrimitive)] pub enum PubdataDA { + /// Pubdata is sent to the L1 as a tx calldata. Calldata = 0, + /// Pubdata is sent to L1 as EIP-4844 blobs. Blobs, + /// Pubdata is sent to the external storage (GCS/DA layers) or not sent at all. + Custom, } impl From for PubdataDA { @@ -16,6 +22,16 @@ impl From for PubdataDA { match value { PubdataSendingMode::Calldata => PubdataDA::Calldata, PubdataSendingMode::Blobs => PubdataDA::Blobs, + PubdataSendingMode::Custom => PubdataDA::Custom, } } } + +/// Represents a blob in the data availability layer. +#[derive(Debug, Clone)] +pub struct DataAvailabilityBlob { + pub l1_batch_number: L1BatchNumber, + pub blob_id: String, + pub inclusion_data: Option>, + pub sent_at: DateTime, +} diff --git a/core/lib/types/src/snapshots.rs b/core/lib/types/src/snapshots.rs index 6e4f734a33c1..a29e5a91bf1e 100644 --- a/core/lib/types/src/snapshots.rs +++ b/core/lib/types/src/snapshots.rs @@ -25,6 +25,9 @@ pub struct AllSnapshots { pub enum SnapshotVersion { /// Initial snapshot version. Keys in storage logs are stored as `(address, key)` pairs. Version0 = 0, + /// Snapshot version made compatible with L1 recovery. Differs from `Version0` by including + /// hashed keys in storage logs instead of `(address, key)` pairs. + Version1 = 1, } /// Storage snapshot metadata. Used in DAL to fetch certain snapshot data. @@ -79,18 +82,33 @@ pub struct SnapshotStorageLogsStorageKey { } #[derive(Debug, Clone, PartialEq)] -pub struct SnapshotStorageLogsChunk { - pub storage_logs: Vec, +pub struct SnapshotStorageLogsChunk { + pub storage_logs: Vec>, } +/// Storage log record in a storage snapshot. +/// +/// Version 0 and version 1 snapshots differ in the key type; version 0 uses full [`StorageKey`]s (i.e., storage key preimages), +/// and version 1 uses [`H256`] hashed keys. See [`SnapshotVersion`] for details. #[derive(Debug, Clone, PartialEq, Eq, Hash)] -pub struct SnapshotStorageLog { - pub key: StorageKey, +pub struct SnapshotStorageLog { + pub key: K, pub value: StorageValue, pub l1_batch_number_of_initial_write: L1BatchNumber, pub enumeration_index: u64, } +impl SnapshotStorageLog { + pub fn drop_key_preimage(self) -> SnapshotStorageLog { + SnapshotStorageLog { + key: self.key.hashed_key(), + value: self.value, + l1_batch_number_of_initial_write: self.l1_batch_number_of_initial_write, + enumeration_index: self.enumeration_index, + } + } +} + #[derive(Debug, PartialEq)] pub struct SnapshotFactoryDependencies { pub factory_deps: Vec, @@ -144,17 +162,58 @@ impl ProtoFmt for SnapshotStorageLog { type Proto = crate::proto::SnapshotStorageLog; fn read(r: &Self::Proto) -> anyhow::Result { + let hashed_key = if let Some(hashed_key) = &r.hashed_key { + <[u8; 32]>::try_from(hashed_key.as_slice()) + .context("hashed_key")? + .into() + } else { + let address = required(&r.account_address) + .and_then(|bytes| Ok(<[u8; 20]>::try_from(bytes.as_slice())?.into())) + .context("account_address")?; + let key = required(&r.storage_key) + .and_then(|bytes| Ok(<[u8; 32]>::try_from(bytes.as_slice())?.into())) + .context("storage_key")?; + StorageKey::new(AccountTreeId::new(address), key).hashed_key() + }; + Ok(Self { - key: StorageKey::new( - AccountTreeId::new( - required(&r.account_address) - .and_then(|bytes| Ok(<[u8; 20]>::try_from(bytes.as_slice())?.into())) - .context("account_address")?, - ), - required(&r.storage_key) - .and_then(|bytes| Ok(<[u8; 32]>::try_from(bytes.as_slice())?.into())) - .context("storage_key")?, + key: hashed_key, + value: required(&r.storage_value) + .and_then(|bytes| Ok(<[u8; 32]>::try_from(bytes.as_slice())?.into())) + .context("storage_value")?, + l1_batch_number_of_initial_write: L1BatchNumber( + *required(&r.l1_batch_number_of_initial_write) + .context("l1_batch_number_of_initial_write")?, ), + enumeration_index: *required(&r.enumeration_index).context("enumeration_index")?, + }) + } + + fn build(&self) -> Self::Proto { + Self::Proto { + account_address: None, + storage_key: None, + hashed_key: Some(self.key.as_bytes().to_vec()), + storage_value: Some(self.value.as_bytes().to_vec()), + l1_batch_number_of_initial_write: Some(self.l1_batch_number_of_initial_write.0), + enumeration_index: Some(self.enumeration_index), + } + } +} + +impl ProtoFmt for SnapshotStorageLog { + type Proto = crate::proto::SnapshotStorageLog; + + fn read(r: &Self::Proto) -> anyhow::Result { + let address = required(&r.account_address) + .and_then(|bytes| Ok(<[u8; 20]>::try_from(bytes.as_slice())?.into())) + .context("account_address")?; + let key = required(&r.storage_key) + .and_then(|bytes| Ok(<[u8; 32]>::try_from(bytes.as_slice())?.into())) + .context("storage_key")?; + + Ok(Self { + key: StorageKey::new(AccountTreeId::new(address), key), value: required(&r.storage_value) .and_then(|bytes| Ok(<[u8; 32]>::try_from(bytes.as_slice())?.into())) .context("storage_value")?, @@ -168,23 +227,27 @@ impl ProtoFmt for SnapshotStorageLog { fn build(&self) -> Self::Proto { Self::Proto { - account_address: Some(self.key.address().as_bytes().into()), - storage_key: Some(self.key.key().as_bytes().into()), - storage_value: Some(self.value.as_bytes().into()), + account_address: Some(self.key.address().as_bytes().to_vec()), + storage_key: Some(self.key.key().as_bytes().to_vec()), + hashed_key: None, + storage_value: Some(self.value.as_bytes().to_vec()), l1_batch_number_of_initial_write: Some(self.l1_batch_number_of_initial_write.0), enumeration_index: Some(self.enumeration_index), } } } -impl ProtoFmt for SnapshotStorageLogsChunk { +impl ProtoFmt for SnapshotStorageLogsChunk +where + SnapshotStorageLog: ProtoFmt, +{ type Proto = crate::proto::SnapshotStorageLogsChunk; fn read(r: &Self::Proto) -> anyhow::Result { let mut storage_logs = Vec::with_capacity(r.storage_logs.len()); for (i, storage_log) in r.storage_logs.iter().enumerate() { storage_logs.push( - SnapshotStorageLog::read(storage_log) + SnapshotStorageLog::::read(storage_log) .with_context(|| format!("storage_log[{i}]"))?, ) } @@ -196,7 +259,7 @@ impl ProtoFmt for SnapshotStorageLogsChunk { storage_logs: self .storage_logs .iter() - .map(SnapshotStorageLog::build) + .map(SnapshotStorageLog::::build) .collect(), } } diff --git a/core/lib/types/src/storage/mod.rs b/core/lib/types/src/storage/mod.rs index 510ec5b19d12..a30a57bffa51 100644 --- a/core/lib/types/src/storage/mod.rs +++ b/core/lib/types/src/storage/mod.rs @@ -1,18 +1,18 @@ use core::fmt::Debug; use blake2::{Blake2s256, Digest}; +pub use log::*; use serde::{Deserialize, Serialize}; use zksync_basic_types::{web3::keccak256, L2ChainId}; +pub use zksync_system_constants::*; +use zksync_utils::address_to_h256; use crate::{AccountTreeId, Address, H160, H256, U256}; pub mod log; +pub mod witness_block_state; pub mod writes; -pub use log::*; -pub use zksync_system_constants::*; -use zksync_utils::address_to_h256; - /// Typed fully qualified key of the storage slot in global state tree. #[derive(Debug, Copy, Clone, Hash, Eq, PartialEq, Ord, PartialOrd)] #[derive(Serialize, Deserialize)] diff --git a/core/lib/types/src/storage/witness_block_state.rs b/core/lib/types/src/storage/witness_block_state.rs index 63ee1ba1c566..bce9cc9034d7 100644 --- a/core/lib/types/src/storage/witness_block_state.rs +++ b/core/lib/types/src/storage/witness_block_state.rs @@ -5,8 +5,43 @@ use serde::{Deserialize, Serialize}; use crate::{StorageKey, StorageValue}; /// Storage data used during Witness Generation. -#[derive(Debug, Default, Serialize, Deserialize)] -pub struct WitnessBlockState { +#[derive(Debug, Default, Clone)] +pub struct WitnessStorageState { pub read_storage_key: HashMap, pub is_write_initial: HashMap, } + +/// A serde schema for serializing/deserializing `WitnessBlockState` +#[derive(Debug, Default, Clone, Serialize, Deserialize)] +struct WitnessStorageStateSerde { + pub read_storage_key: Vec<(StorageKey, StorageValue)>, + pub is_write_initial: Vec<(StorageKey, bool)>, +} + +impl Serialize for WitnessStorageState { + fn serialize(&self, s: S) -> Result { + WitnessStorageStateSerde { + read_storage_key: self + .read_storage_key + .iter() + .map(|(k, v)| (*k, *v)) + .collect(), + is_write_initial: self + .is_write_initial + .iter() + .map(|(k, v)| (*k, *v)) + .collect(), + } + .serialize(s) + } +} + +impl<'de> serde::Deserialize<'de> for WitnessStorageState { + fn deserialize>(d: D) -> Result { + let x = WitnessStorageStateSerde::deserialize(d)?; + Ok(Self { + read_storage_key: x.read_storage_key.into_iter().collect(), + is_write_initial: x.is_write_initial.into_iter().collect(), + }) + } +} diff --git a/core/lib/types/src/transaction_request.rs b/core/lib/types/src/transaction_request.rs index a59b21409cd1..887dfcbff378 100644 --- a/core/lib/types/src/transaction_request.rs +++ b/core/lib/types/src/transaction_request.rs @@ -400,7 +400,9 @@ impl TransactionRequest { } // returns packed eth signature if it is present - fn get_packed_signature(&self) -> Result { + pub fn get_packed_signature( + &self, + ) -> Result { let packed_v = self .v .ok_or(SerializationTransactionError::IncompleteSignature)? diff --git a/core/lib/utils/Cargo.toml b/core/lib/utils/Cargo.toml index 4eea7d1398d1..5ec27380df5b 100644 --- a/core/lib/utils/Cargo.toml +++ b/core/lib/utils/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "zksync_utils" -version = "0.1.0" +description = "ZKsync utilities" +version.workspace = true edition.workspace = true authors.workspace = true homepage.workspace = true @@ -12,7 +13,7 @@ categories.workspace = true [dependencies] zksync_basic_types.workspace = true zk_evm.workspace = true -vlog.workspace = true +zksync_vlog.workspace = true bigdecimal.workspace = true num = { workspace = true, features = ["serde"] } diff --git a/core/lib/utils/src/wait_for_tasks.rs b/core/lib/utils/src/wait_for_tasks.rs index 2fa59280e99e..ab548bdd1dde 100644 --- a/core/lib/utils/src/wait_for_tasks.rs +++ b/core/lib/utils/src/wait_for_tasks.rs @@ -47,14 +47,14 @@ impl ManagedTasks { let err = "One of the actors finished its run, while it wasn't expected to do it"; tracing::error!("{err}"); - vlog::capture_message(err, vlog::AlertLevel::Warning); + zksync_vlog::capture_message(err, zksync_vlog::AlertLevel::Warning); } } Ok(Err(err)) => { let err = format!("One of the tokio actors unexpectedly finished with error: {err:#}"); tracing::error!("{err}"); - vlog::capture_message(&err, vlog::AlertLevel::Warning); + zksync_vlog::capture_message(&err, zksync_vlog::AlertLevel::Warning); } Err(error) => { let panic_message = try_extract_panic_message(error); diff --git a/core/lib/vlog/Cargo.toml b/core/lib/vlog/Cargo.toml index 8efefb158021..eb1ed735519c 100644 --- a/core/lib/vlog/Cargo.toml +++ b/core/lib/vlog/Cargo.toml @@ -1,6 +1,7 @@ [package] -name = "vlog" -version = "0.1.0" +name = "zksync_vlog" +description = "ZKsync observability stack" +version.workspace = true edition.workspace = true authors.workspace = true homepage.workspace = true @@ -8,10 +9,11 @@ repository.workspace = true license.workspace = true keywords.workspace = true categories.workspace = true -publish = false [dependencies] +anyhow.workspace = true chrono.workspace = true +tokio.workspace = true tracing.workspace = true tracing-subscriber = { workspace = true, features = [ "fmt", @@ -29,3 +31,5 @@ opentelemetry-otlp = { workspace = true, features = [ "reqwest-client", ] } opentelemetry-semantic-conventions.workspace = true +vise.workspace = true +vise-exporter.workspace = true diff --git a/core/lib/vlog/src/lib.rs b/core/lib/vlog/src/lib.rs index a65a11f3c479..9b2886ba81d5 100644 --- a/core/lib/vlog/src/lib.rs +++ b/core/lib/vlog/src/lib.rs @@ -1,4 +1,4 @@ -//! This module contains the observability subsystem. +//! This crate contains the observability subsystem. //! It is responsible for providing a centralized interface for consistent observability configuration. use std::{backtrace::Backtrace, borrow::Cow, panic::PanicInfo, str::FromStr}; @@ -28,6 +28,8 @@ use tracing_subscriber::{ EnvFilter, Layer, }; +pub mod prometheus; + type TracingLayer = Layered, EnvFilter, Inner>, Inner>; @@ -139,6 +141,7 @@ pub struct OpenTelemetryOptions { /// Currently capable of configuring logging output and sentry integration. #[derive(Debug, Default)] pub struct ObservabilityBuilder { + disable_default_logs: bool, log_format: LogFormat, log_directives: Option, sentry_url: Option, @@ -176,6 +179,14 @@ impl ObservabilityBuilder { self } + /// Disables logs enabled by default. + /// May be used, for example, in interactive CLI applications, where the user may want to fully control + /// the verbosity. + pub fn disable_default_logs(mut self) -> Self { + self.disable_default_logs = true; + self + } + /// Enables Sentry integration. /// Returns an error if the provided Sentry URL is invalid. pub fn with_sentry_url( @@ -254,15 +265,36 @@ impl ObservabilityBuilder { subscriber.with(layer) } + /// Builds a filter for the logs. + /// + /// Unless `disable_default_logs` was set, uses `zksync=info` as a default which is then merged + /// with user-defined directives. Provided directives can extend/override the default value. + /// + /// The provided default convers all the crates with a name starting with `zksync` (per `tracing` + /// [documentation][1]), which is a good enough default for any project. + /// + /// If `log_directives` are provided via `with_log_directives`, they will be used. + /// Otherwise, the value will be parsed from the environment variable `RUST_LOG`. + /// + /// [1]: https://docs.rs/tracing-subscriber/0.3.18/tracing_subscriber/filter/targets/struct.Targets.html#filtering-with-targets + fn build_filter(&self) -> EnvFilter { + let mut directives = if self.disable_default_logs { + "".to_string() + } else { + "zksync=info,".to_string() + }; + if let Some(log_directives) = &self.log_directives { + directives.push_str(log_directives); + } else if let Ok(env_directives) = std::env::var(EnvFilter::DEFAULT_ENV) { + directives.push_str(&env_directives); + }; + EnvFilter::new(directives) + } + /// Initializes the observability subsystem. pub fn build(self) -> ObservabilityGuard { // Initialize logs. - - let env_filter = if let Some(log_directives) = self.log_directives { - tracing_subscriber::EnvFilter::new(log_directives) - } else { - tracing_subscriber::EnvFilter::from_default_env() - }; + let env_filter = self.build_filter(); match self.log_format { LogFormat::Plain => { diff --git a/core/lib/vlog/src/prometheus.rs b/core/lib/vlog/src/prometheus.rs new file mode 100644 index 000000000000..14db8fa418d7 --- /dev/null +++ b/core/lib/vlog/src/prometheus.rs @@ -0,0 +1,73 @@ +//! Prometheus-related functionality, such as [`PrometheusExporterConfig`]. + +use std::{net::Ipv4Addr, time::Duration}; + +use anyhow::Context as _; +use tokio::sync::watch; +use vise::MetricsCollection; +use vise_exporter::MetricsExporter; + +#[derive(Debug)] +enum PrometheusTransport { + Pull { + port: u16, + }, + Push { + gateway_uri: String, + interval: Duration, + }, +} + +/// Configuration of a Prometheus exporter. +#[derive(Debug)] +pub struct PrometheusExporterConfig { + transport: PrometheusTransport, +} + +impl PrometheusExporterConfig { + /// Creates an exporter that will run an HTTP server on the specified `port`. + pub const fn pull(port: u16) -> Self { + Self { + transport: PrometheusTransport::Pull { port }, + } + } + + /// Creates an exporter that will push metrics to the specified Prometheus gateway endpoint. + pub const fn push(gateway_uri: String, interval: Duration) -> Self { + Self { + transport: PrometheusTransport::Push { + gateway_uri, + interval, + }, + } + } + + /// Runs the exporter. This future should be spawned in a separate Tokio task. + pub async fn run(self, mut stop_receiver: watch::Receiver) -> anyhow::Result<()> { + let registry = MetricsCollection::lazy().collect(); + let metrics_exporter = + MetricsExporter::new(registry.into()).with_graceful_shutdown(async move { + stop_receiver.changed().await.ok(); + }); + + match self.transport { + PrometheusTransport::Pull { port } => { + let prom_bind_address = (Ipv4Addr::UNSPECIFIED, port).into(); + metrics_exporter + .start(prom_bind_address) + .await + .context("Failed starting metrics server")?; + } + PrometheusTransport::Push { + gateway_uri, + interval, + } => { + let endpoint = gateway_uri + .parse() + .context("Failed parsing Prometheus push gateway endpoint")?; + metrics_exporter.push_to_gateway(endpoint, interval).await; + } + } + Ok(()) + } +} diff --git a/core/lib/vm_utils/Cargo.toml b/core/lib/vm_utils/Cargo.toml index 2ae020f44052..c325f0e9db30 100644 --- a/core/lib/vm_utils/Cargo.toml +++ b/core/lib/vm_utils/Cargo.toml @@ -1,6 +1,7 @@ [package] -name = "vm_utils" -version = "0.1.0" +name = "zksync_vm_utils" +description = "ZKsync VM utilities" +version.workspace = true edition.workspace = true authors.workspace = true homepage.workspace = true @@ -10,7 +11,7 @@ keywords.workspace = true categories.workspace = true [dependencies] -multivm.workspace = true +zksync_multivm.workspace = true zksync_types.workspace = true zksync_dal.workspace = true zksync_state.workspace = true diff --git a/core/lib/vm_utils/src/lib.rs b/core/lib/vm_utils/src/lib.rs index 5526b3f1657b..a3ec715851a4 100644 --- a/core/lib/vm_utils/src/lib.rs +++ b/core/lib/vm_utils/src/lib.rs @@ -1,11 +1,11 @@ use anyhow::{anyhow, Context}; -use multivm::{ +use tokio::runtime::Handle; +use zksync_dal::{Connection, Core}; +use zksync_multivm::{ interface::{VmFactory, VmInterface, VmInterfaceHistoryEnabled}, vm_latest::HistoryEnabled, VmInstance, }; -use tokio::runtime::Handle; -use zksync_dal::{Connection, Core}; use zksync_state::{PostgresStorage, ReadStorage, StoragePtr, StorageView}; use zksync_types::{L1BatchNumber, L2ChainId, Transaction}; @@ -24,8 +24,9 @@ pub fn create_vm( mut connection: Connection<'_, Core>, l2_chain_id: L2ChainId, ) -> anyhow::Result { - let l1_batch_params_provider = rt_handle - .block_on(L1BatchParamsProvider::new(&mut connection)) + let mut l1_batch_params_provider = L1BatchParamsProvider::new(); + rt_handle + .block_on(l1_batch_params_provider.initialize(&mut connection)) .context("failed initializing L1 batch params provider")?; let first_l2_block_in_batch = rt_handle .block_on( diff --git a/core/lib/vm_utils/src/storage.rs b/core/lib/vm_utils/src/storage.rs index f9b6dec23da6..fbf52a67623d 100644 --- a/core/lib/vm_utils/src/storage.rs +++ b/core/lib/vm_utils/src/storage.rs @@ -1,13 +1,13 @@ use std::time::{Duration, Instant}; use anyhow::Context; -use multivm::{ +use zksync_contracts::BaseSystemContracts; +use zksync_dal::{Connection, Core, CoreDal, DalError}; +use zksync_multivm::{ interface::{L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode}, vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, zk_evm_latest::ethereum_types::H256, }; -use zksync_contracts::BaseSystemContracts; -use zksync_dal::{Connection, Core, CoreDal, DalError}; use zksync_types::{ block::L2BlockHeader, fee_model::BatchFeeInput, snapshots::SnapshotRecoveryStatus, Address, L1BatchNumber, L2BlockNumber, L2ChainId, ProtocolVersionId, ZKPORTER_IS_AVAILABLE, @@ -83,18 +83,41 @@ pub fn l1_batch_params( /// Provider of L1 batch parameters for state keeper I/O implementations. The provider is stateless; i.e., it doesn't /// enforce a particular order of method calls. -#[derive(Debug)] +#[derive(Debug, Default)] pub struct L1BatchParamsProvider { snapshot: Option, } impl L1BatchParamsProvider { - pub async fn new(storage: &mut Connection<'_, Core>) -> anyhow::Result { - let snapshot = storage + pub fn new() -> Self { + Self { snapshot: None } + } + + /// Performs the provider initialization. Must only be called with the initialized storage (e.g. + /// either after genesis or snapshot recovery). + pub async fn initialize(&mut self, storage: &mut Connection<'_, Core>) -> anyhow::Result<()> { + if storage + .blocks_dal() + .get_earliest_l1_batch_number() + .await? + .is_some() + { + // We have batches in the storage, no need for special treatment. + return Ok(()); + } + + let Some(snapshot) = storage .snapshot_recovery_dal() .get_applied_snapshot_status() - .await?; - Ok(Self { snapshot }) + .await + .context("failed getting snapshot recovery status")? + else { + anyhow::bail!( + "Storage is not initialized, it doesn't have batches or snapshot recovery status" + ) + }; + self.snapshot = Some(snapshot); + Ok(()) } /// Returns state root hash and timestamp of an L1 batch with the specified number waiting for the hash to be computed diff --git a/core/lib/web3_decl/Cargo.toml b/core/lib/web3_decl/Cargo.toml index 86cd0a105252..50073e357eb4 100644 --- a/core/lib/web3_decl/Cargo.toml +++ b/core/lib/web3_decl/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "zksync_web3_decl" -version = "0.1.0" +description = "ZKsync Web3 API abstractions and clients" +version.workspace = true edition.workspace = true authors.workspace = true homepage.workspace = true @@ -27,6 +28,7 @@ serde_json.workspace = true tokio = { workspace = true, features = ["time"] } tracing.workspace = true vise.workspace = true +rustls.workspace = true [dev-dependencies] assert_matches.workspace = true diff --git a/core/lib/web3_decl/src/client/metrics.rs b/core/lib/web3_decl/src/client/metrics.rs index 01daf76cf07c..0f01bbb49914 100644 --- a/core/lib/web3_decl/src/client/metrics.rs +++ b/core/lib/web3_decl/src/client/metrics.rs @@ -167,7 +167,7 @@ impl L2ClientMetrics { let status = err .downcast_ref::() .and_then(|err| match err { - transport::Error::RequestFailure { status_code } => Some(*status_code), + transport::Error::Rejected { status_code } => Some(*status_code), _ => None, }); let labels = HttpErrorLabels { diff --git a/core/lib/web3_decl/src/client/mod.rs b/core/lib/web3_decl/src/client/mod.rs index 80a310e2d440..ca861e77fdfe 100644 --- a/core/lib/web3_decl/src/client/mod.rs +++ b/core/lib/web3_decl/src/client/mod.rs @@ -46,6 +46,7 @@ mod boxed; mod metrics; mod mock; mod network; +mod rustls; mod shared; #[cfg(test)] mod tests; @@ -140,6 +141,8 @@ impl fmt::Debug for Client { impl Client { /// Creates an HTTP-backed client. pub fn http(url: SensitiveUrl) -> anyhow::Result> { + crate::client::rustls::set_rustls_backend_if_required(); + let client = HttpClientBuilder::default().build(url.expose_str())?; Ok(ClientBuilder::new(client, url)) } @@ -150,6 +153,8 @@ impl WsClient { pub async fn ws( url: SensitiveUrl, ) -> anyhow::Result>> { + crate::client::rustls::set_rustls_backend_if_required(); + let client = ws_client::WsClientBuilder::default() .build(url.expose_str()) .await?; diff --git a/core/lib/web3_decl/src/client/rustls.rs b/core/lib/web3_decl/src/client/rustls.rs new file mode 100644 index 000000000000..2db9b41dd83c --- /dev/null +++ b/core/lib/web3_decl/src/client/rustls.rs @@ -0,0 +1,10 @@ +/// Makes sure that `rustls` crypto backend is set before we instantiate +/// a `Web3` client. `jsonrpsee` doesn't explicitly set it, and when +/// multiple crypto backends are enabled, `rustls` can't choose one and panics. +/// See [this issue](https://github.com/rustls/rustls/issues/1877) for more detail. +/// +/// The problem is on `jsonrpsee` side, but until it's fixed we have to patch it. +pub(super) fn set_rustls_backend_if_required() { + // Function returns an error if the provider is already installed, and we're fine with it. + let _ = rustls::crypto::aws_lc_rs::default_provider().install_default(); +} diff --git a/core/lib/web3_decl/src/client/tests.rs b/core/lib/web3_decl/src/client/tests.rs index 2cb677514c79..6ba7ac7d1a33 100644 --- a/core/lib/web3_decl/src/client/tests.rs +++ b/core/lib/web3_decl/src/client/tests.rs @@ -198,7 +198,7 @@ async fn wrapping_mock_client() { Ok("slow") }) .method("rate_limit", || { - let http_err = transport::Error::RequestFailure { status_code: 429 }; + let http_err = transport::Error::Rejected { status_code: 429 }; Err::<(), _>(Error::Transport(http_err.into())) }) .method("eth_getBlockNumber", || Ok(U64::from(1))) diff --git a/core/lib/web3_decl/src/namespaces/eth.rs b/core/lib/web3_decl/src/namespaces/eth.rs index b0e311d339bc..10443443958b 100644 --- a/core/lib/web3_decl/src/namespaces/eth.rs +++ b/core/lib/web3_decl/src/namespaces/eth.rs @@ -2,7 +2,10 @@ use jsonrpsee::core::RpcResult; use jsonrpsee::proc_macros::rpc; use zksync_types::{ - api::{BlockId, BlockIdVariant, BlockNumber, Transaction, TransactionVariant}, + api::{ + state_override::StateOverride, BlockId, BlockIdVariant, BlockNumber, Transaction, + TransactionVariant, + }, transaction_request::CallRequest, Address, H256, }; @@ -31,10 +34,20 @@ pub trait EthNamespace { async fn chain_id(&self) -> RpcResult; #[method(name = "call")] - async fn call(&self, req: CallRequest, block: Option) -> RpcResult; + async fn call( + &self, + req: CallRequest, + block: Option, + state_override: Option, + ) -> RpcResult; #[method(name = "estimateGas")] - async fn estimate_gas(&self, req: CallRequest, _block: Option) -> RpcResult; + async fn estimate_gas( + &self, + req: CallRequest, + _block: Option, + state_override: Option, + ) -> RpcResult; #[method(name = "gasPrice")] async fn gas_price(&self) -> RpcResult; diff --git a/core/lib/web3_decl/src/namespaces/mod.rs b/core/lib/web3_decl/src/namespaces/mod.rs index 76445f9a4fd8..f3b5c8a9aaee 100644 --- a/core/lib/web3_decl/src/namespaces/mod.rs +++ b/core/lib/web3_decl/src/namespaces/mod.rs @@ -1,13 +1,13 @@ pub use self::{ debug::DebugNamespaceClient, en::EnNamespaceClient, eth::EthNamespaceClient, - net::NetNamespaceClient, snapshots::SnapshotsNamespaceClient, web3::Web3NamespaceClient, - zks::ZksNamespaceClient, + net::NetNamespaceClient, snapshots::SnapshotsNamespaceClient, + unstable::UnstableNamespaceClient, web3::Web3NamespaceClient, zks::ZksNamespaceClient, }; #[cfg(feature = "server")] pub use self::{ debug::DebugNamespaceServer, en::EnNamespaceServer, eth::EthNamespaceServer, eth::EthPubSubServer, net::NetNamespaceServer, snapshots::SnapshotsNamespaceServer, - web3::Web3NamespaceServer, zks::ZksNamespaceServer, + unstable::UnstableNamespaceServer, web3::Web3NamespaceServer, zks::ZksNamespaceServer, }; mod debug; @@ -15,5 +15,6 @@ mod en; mod eth; mod net; mod snapshots; +mod unstable; mod web3; mod zks; diff --git a/core/lib/web3_decl/src/namespaces/unstable.rs b/core/lib/web3_decl/src/namespaces/unstable.rs new file mode 100644 index 000000000000..4996813a9855 --- /dev/null +++ b/core/lib/web3_decl/src/namespaces/unstable.rs @@ -0,0 +1,23 @@ +#[cfg_attr(not(feature = "server"), allow(unused_imports))] +use jsonrpsee::core::RpcResult; +use jsonrpsee::proc_macros::rpc; +use zksync_types::{api::TransactionExecutionInfo, H256}; + +use crate::client::{ForNetwork, L2}; + +/// RPCs in this namespace are experimental, and their interface is unstable, and it WILL change. +#[cfg_attr( + feature = "server", + rpc(server, client, namespace = "unstable", client_bounds(Self: ForNetwork)) +)] +#[cfg_attr( + not(feature = "server"), + rpc(client, namespace = "unstable", client_bounds(Self: ForNetwork)) +)] +pub trait UnstableNamespace { + #[method(name = "getTransactionExecutionInfo")] + async fn transaction_execution_info( + &self, + hash: H256, + ) -> RpcResult>; +} diff --git a/core/lib/web3_decl/src/namespaces/zks.rs b/core/lib/web3_decl/src/namespaces/zks.rs index b6861a9d2dd7..6f443dbded6a 100644 --- a/core/lib/web3_decl/src/namespaces/zks.rs +++ b/core/lib/web3_decl/src/namespaces/zks.rs @@ -5,8 +5,8 @@ use jsonrpsee::core::RpcResult; use jsonrpsee::proc_macros::rpc; use zksync_types::{ api::{ - BlockDetails, BridgeAddresses, L1BatchDetails, L2ToL1LogProof, Proof, ProtocolVersion, - TransactionDetailedResult, TransactionDetails, + state_override::StateOverride, BlockDetails, BridgeAddresses, L1BatchDetails, + L2ToL1LogProof, Proof, ProtocolVersion, TransactionDetailedResult, TransactionDetails, }, fee::Fee, fee_model::{FeeParams, PubdataIndependentBatchFeeModelInput}, @@ -29,10 +29,18 @@ use crate::{ )] pub trait ZksNamespace { #[method(name = "estimateFee")] - async fn estimate_fee(&self, req: CallRequest) -> RpcResult; + async fn estimate_fee( + &self, + req: CallRequest, + state_override: Option, + ) -> RpcResult; #[method(name = "estimateGasL1ToL2")] - async fn estimate_gas_l1_to_l2(&self, req: CallRequest) -> RpcResult; + async fn estimate_gas_l1_to_l2( + &self, + req: CallRequest, + state_override: Option, + ) -> RpcResult; #[method(name = "getBridgehubContract")] async fn get_bridgehub_contract(&self) -> RpcResult>; diff --git a/core/lib/zksync_core_leftovers/Cargo.toml b/core/lib/zksync_core_leftovers/Cargo.toml index c394342c6996..4eab88234749 100644 --- a/core/lib/zksync_core_leftovers/Cargo.toml +++ b/core/lib/zksync_core_leftovers/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "zksync_core_leftovers" -version = "0.1.0" +description = "Deprecated package" +version.workspace = true edition.workspace = true authors.workspace = true homepage.workspace = true @@ -10,106 +11,14 @@ keywords.workspace = true categories.workspace = true [dependencies] -vise.workspace = true -zksync_state.workspace = true -vm_utils.workspace = true -zksync_types.workspace = true zksync_dal.workspace = true -prover_dal.workspace = true -zksync_db_connection.workspace = true zksync_config.workspace = true +zksync_protobuf.workspace = true zksync_protobuf_config.workspace = true -zksync_utils.workspace = true -zksync_contracts.workspace = true -zksync_system_constants.workspace = true -zksync_eth_client.workspace = true -zksync_eth_signer.workspace = true -zksync_l1_contract_interface.workspace = true -zksync_mempool.workspace = true -zksync_circuit_breaker.workspace = true -zksync_storage.workspace = true -zksync_tee_verifier.workspace = true -zksync_merkle_tree.workspace = true -zksync_mini_merkle_tree.workspace = true -prometheus_exporter.workspace = true -zksync_prover_interface.workspace = true -zksync_queued_job_processor.workspace = true -zksync_web3_decl = { workspace = true, features = ["server"] } -zksync_object_store.workspace = true -zksync_health_check.workspace = true -vlog.workspace = true -zksync_eth_watch.workspace = true -zksync_shared_metrics.workspace = true -zksync_proof_data_handler.workspace = true -zksync_commitment_generator.workspace = true -zksync_house_keeper.workspace = true +zksync_env_config.workspace = true zksync_node_genesis.workspace = true -zksync_eth_sender.workspace = true -zksync_node_fee_model.workspace = true -zksync_state_keeper.workspace = true -zksync_metadata_calculator.workspace = true -zksync_node_sync.workspace = true -zksync_node_consensus.workspace = true -zksync_contract_verification_server.workspace = true -zksync_node_api_server.workspace = true -zksync_tee_verifier_input_producer.workspace = true -multivm.workspace = true - -# Consensus dependenices -zksync_concurrency.workspace = true -zksync_consensus_crypto.workspace = true -zksync_consensus_network.workspace = true -zksync_consensus_roles.workspace = true -zksync_consensus_storage.workspace = true -zksync_consensus_executor.workspace = true -zksync_consensus_bft.workspace = true -zksync_consensus_utils.workspace = true -zksync_protobuf.workspace = true -prost.workspace = true -secrecy.workspace = true -serde = { workspace = true, features = ["derive"] } -serde_json.workspace = true +anyhow.workspace = true +tokio = { workspace = true, features = ["time"] } serde_yaml.workspace = true -itertools.workspace = true ctrlc.workspace = true -rand.workspace = true - -tokio = { workspace = true, features = ["time"] } -futures = { workspace = true, features = ["compat"] } -pin-project-lite.workspace = true -chrono = { workspace = true, features = ["serde"] } -anyhow.workspace = true -thiserror.workspace = true -async-trait.workspace = true -thread_local.workspace = true - -reqwest = { workspace = true, features = ["blocking", "json"] } -hex.workspace = true -lru.workspace = true -governor.workspace = true -tower-http = { workspace = true, features = ["full"] } -tower = { workspace = true, features = ["full"] } -axum = { workspace = true, features = [ - "http1", - "json", - "tokio", -] } -once_cell.workspace = true -dashmap.workspace = true - -tracing.workspace = true - -[dev-dependencies] -zksync_test_account.workspace = true -zksync_node_test_utils.workspace = true - -assert_matches.workspace = true -jsonrpsee.workspace = true -tempfile.workspace = true -test-casing.workspace = true -test-log.workspace = true -backon.workspace = true - -[build-dependencies] -zksync_protobuf_build.workspace = true diff --git a/core/lib/zksync_core_leftovers/src/lib.rs b/core/lib/zksync_core_leftovers/src/lib.rs index f0aba52742c1..b79b86d718d0 100644 --- a/core/lib/zksync_core_leftovers/src/lib.rs +++ b/core/lib/zksync_core_leftovers/src/lib.rs @@ -1,111 +1,11 @@ #![allow(clippy::upper_case_acronyms, clippy::derive_partial_eq_without_eq)] -use std::{ - net::Ipv4Addr, - str::FromStr, - sync::Arc, - time::{Duration, Instant}, -}; +use std::str::FromStr; -use anyhow::Context as _; -use prometheus_exporter::PrometheusExporterConfig; -use prover_dal::Prover; -use tokio::{ - sync::{oneshot, watch}, - task::JoinHandle, -}; -use zksync_circuit_breaker::{ - l1_txs::FailedL1TransactionChecker, replication_lag::ReplicationLagChecker, - CircuitBreakerChecker, CircuitBreakers, -}; -use zksync_commitment_generator::{ - validation_task::L1BatchCommitmentModeValidationTask, CommitmentGenerator, -}; -use zksync_concurrency::{ctx, scope}; -use zksync_config::{ - configs::{ - api::{MerkleTreeApiConfig, Web3JsonRpcConfig}, - chain::{CircuitBreakerConfig, MempoolConfig, OperationsManagerConfig, StateKeeperConfig}, - consensus::ConsensusConfig, - database::{MerkleTreeConfig, MerkleTreeMode}, - wallets, - wallets::Wallets, - ContractsConfig, DatabaseSecrets, GeneralConfig, Secrets, - }, - ApiConfig, DBConfig, EthWatchConfig, GenesisConfig, -}; -use zksync_contracts::governance_contract; -use zksync_dal::{metrics::PostgresMetrics, ConnectionPool, Core, CoreDal}; -use zksync_db_connection::healthcheck::ConnectionPoolHealthCheck; -use zksync_eth_client::{clients::PKSigningClient, BoundEthInterface}; -use zksync_eth_sender::{Aggregator, EthTxAggregator, EthTxManager}; -use zksync_eth_watch::{EthHttpQueryClient, EthWatch}; -use zksync_health_check::{AppHealthCheck, HealthStatus, ReactiveHealthCheck}; -use zksync_house_keeper::{ - blocks_state_reporter::L1BatchMetricsReporter, - periodic_job::PeriodicJob, - prover::{ - FriGpuProverArchiver, FriProofCompressorJobRetryManager, FriProofCompressorQueueReporter, - FriProverJobRetryManager, FriProverJobsArchiver, FriProverQueueReporter, - FriWitnessGeneratorJobRetryManager, FriWitnessGeneratorQueueReporter, - WaitingToQueuedFriWitnessJobMover, - }, -}; -use zksync_metadata_calculator::{ - api_server::TreeApiHttpClient, MetadataCalculator, MetadataCalculatorConfig, -}; -use zksync_node_api_server::{ - healthcheck::HealthCheckHandle, - tx_sender::{build_tx_sender, TxSenderConfig}, - web3::{self, mempool_cache::MempoolCache, state::InternalApiConfig, Namespace}, -}; -use zksync_node_fee_model::{ - l1_gas_price::GasAdjusterSingleton, BatchFeeModelInputProvider, MainNodeFeeInputProvider, -}; -use zksync_node_genesis::{ensure_genesis_state, GenesisParams}; -use zksync_object_store::{ObjectStore, ObjectStoreFactory}; -use zksync_queued_job_processor::JobProcessor; -use zksync_shared_metrics::{InitStage, APP_METRICS}; -use zksync_state::{PostgresStorageCaches, RocksdbStorageOptions}; -use zksync_state_keeper::{ - create_state_keeper, io::seal_logic::l2_block_seal_subtasks::L2BlockSealProcess, - AsyncRocksdbCache, MempoolFetcher, MempoolGuard, OutputHandler, StateKeeperPersistence, - TreeWritesPersistence, -}; -use zksync_tee_verifier_input_producer::TeeVerifierInputProducer; -use zksync_types::{ethabi::Contract, fee_model::FeeModelConfig, Address, L2ChainId}; -use zksync_web3_decl::client::{Client, DynClient, L1}; +use tokio::sync::oneshot; pub mod temp_config_store; -/// Inserts the initial information about ZKsync tokens into the database. -pub async fn genesis_init( - genesis_config: GenesisConfig, - database_secrets: &DatabaseSecrets, -) -> anyhow::Result<()> { - let db_url = database_secrets.master_url()?; - let pool = ConnectionPool::::singleton(db_url) - .build() - .await - .context("failed to build connection_pool")?; - let mut storage = pool.connection().await.context("connection()")?; - - let params = GenesisParams::load_genesis_params(genesis_config)?; - ensure_genesis_state(&mut storage, ¶ms).await?; - - Ok(()) -} - -pub async fn is_genesis_needed(database_secrets: &DatabaseSecrets) -> bool { - let db_url = database_secrets.master_url().unwrap(); - let pool = ConnectionPool::::singleton(db_url) - .build() - .await - .expect("failed to build connection_pool"); - let mut storage = pool.connection().await.expect("connection()"); - storage.blocks_dal().is_genesis_needed().await.unwrap() -} - /// Sets up an interrupt handler and returns a future that resolves once an interrupt signal /// is received. pub fn setup_sigint_handler() -> oneshot::Receiver<()> { @@ -154,8 +54,14 @@ pub enum Component { Consensus, /// Component generating commitment for L1 batches. CommitmentGenerator, + /// Component sending a pubdata to the DA layers. + DADispatcher, /// VM runner-based component that saves protective reads to Postgres. VmRunnerProtectiveReads, + /// A component to fetch and persist ETH<->BaseToken conversion ratios for chains with custom base tokens. + BaseTokenRatioPersister, + /// VM runner-based component that saves VM execution data for basic witness generation. + VmRunnerBwip, } #[derive(Debug)] @@ -192,1211 +98,15 @@ impl FromStr for Components { "proof_data_handler" => Ok(Components(vec![Component::ProofDataHandler])), "consensus" => Ok(Components(vec![Component::Consensus])), "commitment_generator" => Ok(Components(vec![Component::CommitmentGenerator])), + "da_dispatcher" => Ok(Components(vec![Component::DADispatcher])), "vm_runner_protective_reads" => { Ok(Components(vec![Component::VmRunnerProtectiveReads])) } - other => Err(format!("{} is not a valid component name", other)), - } - } -} - -pub async fn initialize_components( - configs: &GeneralConfig, - wallets: &Wallets, - genesis_config: &GenesisConfig, - contracts_config: &ContractsConfig, - components: &[Component], - secrets: &Secrets, - consensus_config: Option, -) -> anyhow::Result<( - Vec>>, - watch::Sender, - HealthCheckHandle, -)> { - tracing::info!("Starting the components: {components:?}"); - let l2_chain_id = genesis_config.l2_chain_id; - let db_config = configs.db_config.clone().context("db_config")?; - let postgres_config = configs.postgres_config.clone().context("postgres_config")?; - let database_secrets = secrets.database.clone().context("database_secrets")?; - - if let Some(threshold) = postgres_config.slow_query_threshold() { - ConnectionPool::::global_config().set_slow_query_threshold(threshold)?; - } - if let Some(threshold) = postgres_config.long_connection_threshold() { - ConnectionPool::::global_config().set_long_connection_threshold(threshold)?; - } - - let pool_size = postgres_config.max_connections()?; - let pool_size_master = postgres_config - .max_connections_master() - .unwrap_or(pool_size); - - let connection_pool = - ConnectionPool::::builder(database_secrets.master_url()?, pool_size_master) - .build() - .await - .context("failed to build connection_pool")?; - // We're most interested in setting acquire / statement timeouts for the API server, which puts the most load - // on Postgres. - let replica_connection_pool = - ConnectionPool::::builder(database_secrets.replica_url()?, pool_size) - .set_acquire_timeout(postgres_config.acquire_timeout()) - .set_statement_timeout(postgres_config.statement_timeout()) - .build() - .await - .context("failed to build replica_connection_pool")?; - - let health_check_config = configs - .api_config - .clone() - .context("api_config")? - .healthcheck; - - let app_health = Arc::new(AppHealthCheck::new( - health_check_config.slow_time_limit(), - health_check_config.hard_time_limit(), - )); - - let eth = configs.eth.clone().context("eth")?; - let l1_secrets = secrets.l1.clone().context("l1_secrets")?; - let circuit_breaker_config = configs - .circuit_breaker_config - .clone() - .context("circuit_breaker_config")?; - - let circuit_breaker_checker = CircuitBreakerChecker::new( - Arc::new( - circuit_breakers_for_components(components, &database_secrets, &circuit_breaker_config) - .await - .context("circuit_breakers_for_components")?, - ), - circuit_breaker_config.sync_interval(), - ); - circuit_breaker_checker.check().await.unwrap_or_else(|err| { - panic!("Circuit breaker triggered: {}", err); - }); - - let query_client = Client::http(l1_secrets.l1_rpc_url.clone()) - .context("Ethereum client")? - .for_network(genesis_config.l1_chain_id.into()) - .build(); - let query_client = Box::new(query_client); - let gas_adjuster_config = eth.gas_adjuster.context("gas_adjuster")?; - let sender = eth.sender.as_ref().context("sender")?; - - let mut gas_adjuster = GasAdjusterSingleton::new( - genesis_config.l1_chain_id, - l1_secrets.l1_rpc_url.clone(), - gas_adjuster_config, - sender.pubdata_sending_mode, - genesis_config.l1_batch_commit_data_generator_mode, - ); - - let (stop_sender, stop_receiver) = watch::channel(false); - - // Prometheus exporter and circuit breaker checker should run for every component configuration. - let prom_config = configs - .prometheus_config - .clone() - .context("prometheus_config")?; - let prom_config = PrometheusExporterConfig::pull(prom_config.listener_port); - - let (prometheus_health_check, prometheus_health_updater) = - ReactiveHealthCheck::new("prometheus_exporter"); - app_health.insert_component(prometheus_health_check)?; - let prometheus_task = prom_config.run(stop_receiver.clone()); - let prometheus_task = tokio::spawn(async move { - prometheus_health_updater.update(HealthStatus::Ready.into()); - let res = prometheus_task.await; - drop(prometheus_health_updater); - res - }); - - let mut task_futures: Vec>> = vec![ - prometheus_task, - tokio::spawn(circuit_breaker_checker.run(stop_receiver.clone())), - ]; - - if components.contains(&Component::WsApi) - || components.contains(&Component::HttpApi) - || components.contains(&Component::ContractVerificationApi) - { - let api_config = configs.api_config.clone().context("api_config")?; - let state_keeper_config = configs - .state_keeper_config - .clone() - .context("state_keeper_config")?; - let tx_sender_config = TxSenderConfig::new( - &state_keeper_config, - &api_config.web3_json_rpc, - wallets - .state_keeper - .clone() - .context("Fee account")? - .fee_account - .address(), - l2_chain_id, - ); - let internal_api_config = - InternalApiConfig::new(&api_config.web3_json_rpc, contracts_config, genesis_config); - - // Lazily initialize storage caches only when they are needed (e.g., skip their initialization - // if we only run the explorer APIs). This is required because the cache update task will - // terminate immediately if storage caches are dropped, which will lead to the (unexpected) - // program termination. - let mut storage_caches = None; - - let mempool_cache = MempoolCache::new(api_config.web3_json_rpc.mempool_cache_size()); - let mempool_cache_update_task = mempool_cache.update_task( - connection_pool.clone(), - api_config.web3_json_rpc.mempool_cache_update_interval(), - ); - task_futures.push(tokio::spawn( - mempool_cache_update_task.run(stop_receiver.clone()), - )); - - if components.contains(&Component::HttpApi) { - storage_caches = Some( - build_storage_caches( - &api_config.web3_json_rpc, - &replica_connection_pool, - &mut task_futures, - stop_receiver.clone(), - ) - .context("build_storage_caches()")?, - ); - - let started_at = Instant::now(); - tracing::info!("Initializing HTTP API"); - let bounded_gas_adjuster = gas_adjuster - .get_or_init() - .await - .context("gas_adjuster.get_or_init()")?; - let batch_fee_input_provider = Arc::new(MainNodeFeeInputProvider::new( - bounded_gas_adjuster, - FeeModelConfig::from_state_keeper_config(&state_keeper_config), - )); - run_http_api( - &mut task_futures, - &app_health, - &database_secrets, - &tx_sender_config, - &state_keeper_config, - &internal_api_config, - &api_config, - connection_pool.clone(), - replica_connection_pool.clone(), - stop_receiver.clone(), - batch_fee_input_provider, - state_keeper_config.save_call_traces, - storage_caches.clone().unwrap(), - mempool_cache.clone(), - ) - .await - .context("run_http_api")?; - - let elapsed = started_at.elapsed(); - APP_METRICS.init_latency[&InitStage::HttpApi].set(elapsed); - tracing::info!( - "Initialized HTTP API on port {:?} in {elapsed:?}", - api_config.web3_json_rpc.http_port - ); - } - - if components.contains(&Component::WsApi) { - let storage_caches = match storage_caches { - Some(storage_caches) => storage_caches, - None => build_storage_caches( - &configs.api_config.clone().context("api")?.web3_json_rpc, - &replica_connection_pool, - &mut task_futures, - stop_receiver.clone(), - ) - .context("build_storage_caches()")?, - }; - - let started_at = Instant::now(); - tracing::info!("initializing WS API"); - let bounded_gas_adjuster = gas_adjuster - .get_or_init() - .await - .context("gas_adjuster.get_or_init()")?; - let batch_fee_input_provider = Arc::new(MainNodeFeeInputProvider::new( - bounded_gas_adjuster, - FeeModelConfig::from_state_keeper_config(&state_keeper_config), - )); - run_ws_api( - &mut task_futures, - &app_health, - &database_secrets, - &tx_sender_config, - &state_keeper_config, - &internal_api_config, - &api_config, - batch_fee_input_provider, - connection_pool.clone(), - replica_connection_pool.clone(), - stop_receiver.clone(), - storage_caches, - mempool_cache, - ) - .await - .context("run_ws_api")?; - - let elapsed = started_at.elapsed(); - APP_METRICS.init_latency[&InitStage::WsApi].set(elapsed); - tracing::info!( - "Initialized WS API on port {} in {elapsed:?}", - api_config.web3_json_rpc.ws_port - ); - } - - if components.contains(&Component::ContractVerificationApi) { - let started_at = Instant::now(); - tracing::info!("initializing contract verification REST API"); - task_futures.push(tokio::spawn( - zksync_contract_verification_server::start_server( - connection_pool.clone(), - replica_connection_pool.clone(), - configs - .contract_verifier - .clone() - .context("Contract verifier")?, - stop_receiver.clone(), - ), - )); - let elapsed = started_at.elapsed(); - APP_METRICS.init_latency[&InitStage::ContractVerificationApi].set(elapsed); - tracing::info!("initialized contract verification REST API in {elapsed:?}"); - } - } - - let object_store_config = configs - .core_object_store - .clone() - .context("core_object_store_config")?; - let store_factory = ObjectStoreFactory::new(object_store_config); - - if components.contains(&Component::StateKeeper) { - let started_at = Instant::now(); - tracing::info!("initializing State Keeper"); - let bounded_gas_adjuster = gas_adjuster - .get_or_init() - .await - .context("gas_adjuster.get_or_init()")?; - let state_keeper_config = configs - .state_keeper_config - .clone() - .context("state_keeper_config")?; - let batch_fee_input_provider = Arc::new(MainNodeFeeInputProvider::new( - bounded_gas_adjuster, - FeeModelConfig::from_state_keeper_config(&state_keeper_config), - )); - add_state_keeper_to_task_futures( - &mut task_futures, - &database_secrets, - contracts_config, - state_keeper_config, - wallets - .state_keeper - .clone() - .context("State keeper wallets")?, - l2_chain_id, - &db_config, - &configs.mempool_config.clone().context("mempool_config")?, - batch_fee_input_provider, - stop_receiver.clone(), - ) - .await - .context("add_state_keeper_to_task_futures()")?; - - let elapsed = started_at.elapsed(); - APP_METRICS.init_latency[&InitStage::StateKeeper].set(elapsed); - tracing::info!("initialized State Keeper in {elapsed:?}"); - } - - let diamond_proxy_addr = contracts_config.diamond_proxy_addr; - let state_transition_manager_addr = contracts_config - .ecosystem_contracts - .as_ref() - .map(|a| a.state_transition_proxy_addr); - - if components.contains(&Component::Consensus) { - let cfg = consensus_config - .clone() - .context("consensus component's config is missing")?; - let secrets = secrets - .consensus - .clone() - .context("consensus component's secrets are missing")?; - let started_at = Instant::now(); - tracing::info!("initializing Consensus"); - let pool = connection_pool.clone(); - let mut stop_receiver = stop_receiver.clone(); - task_futures.push(tokio::spawn(async move { - // We instantiate the root context here, since the consensus task is the only user of the - // structured concurrency framework. - // Note, however, that awaiting for the `stop_receiver` is related to the root context behavior, - // not the consensus task itself. There may have been any number of tasks running in the root context, - // but we only need to wait for stop signal once, and it will be propagated to all child contexts. - let root_ctx = ctx::root(); - scope::run!(&root_ctx, |ctx, s| async move { - s.spawn_bg(zksync_node_consensus::era::run_main_node( - ctx, cfg, secrets, pool, - )); - let _ = stop_receiver.wait_for(|stop| *stop).await?; - Ok(()) - }) - .await - })); - - let elapsed = started_at.elapsed(); - APP_METRICS.init_latency[&InitStage::Consensus].set(elapsed); - tracing::info!("initialized Consensus in {elapsed:?}"); - } - - if components.contains(&Component::EthWatcher) { - let started_at = Instant::now(); - tracing::info!("initializing ETH-Watcher"); - let eth_watch_pool = ConnectionPool::::singleton(database_secrets.master_url()?) - .build() - .await - .context("failed to build eth_watch_pool")?; - let governance = (governance_contract(), contracts_config.governance_addr); - let eth_watch_config = configs - .eth - .clone() - .context("eth_config")? - .watcher - .context("watcher")?; - task_futures.push( - start_eth_watch( - eth_watch_config, - eth_watch_pool, - query_client.clone(), - diamond_proxy_addr, - state_transition_manager_addr, - governance, - stop_receiver.clone(), - ) - .await - .context("start_eth_watch()")?, - ); - let elapsed = started_at.elapsed(); - APP_METRICS.init_latency[&InitStage::EthWatcher].set(elapsed); - tracing::info!("initialized ETH-Watcher in {elapsed:?}"); - } - - if components.contains(&Component::EthTxAggregator) { - let started_at = Instant::now(); - tracing::info!("initializing ETH-TxAggregator"); - let eth_sender_pool = ConnectionPool::::singleton(database_secrets.master_url()?) - .build() - .await - .context("failed to build eth_sender_pool")?; - - let eth_sender_wallets = wallets.eth_sender.clone().context("eth_sender")?; - let operator_private_key = eth_sender_wallets.operator.private_key(); - let diamond_proxy_addr = contracts_config.diamond_proxy_addr; - let default_priority_fee_per_gas = eth - .gas_adjuster - .as_ref() - .context("gas_adjuster")? - .default_priority_fee_per_gas; - let l1_chain_id = genesis_config.l1_chain_id; - - let eth_client = PKSigningClient::new_raw( - operator_private_key.clone(), - diamond_proxy_addr, - default_priority_fee_per_gas, - l1_chain_id, - query_client.clone(), - ); - - let l1_batch_commit_data_generator_mode = - genesis_config.l1_batch_commit_data_generator_mode; - // Run the task synchronously: the main node is expected to have a stable Ethereum client connection, - // and the cost of detecting an incorrect mode with a delay is higher. - L1BatchCommitmentModeValidationTask::new( - contracts_config.diamond_proxy_addr, - l1_batch_commit_data_generator_mode, - query_client.clone(), - ) - .exit_on_success() - .run(stop_receiver.clone()) - .await?; - - let operator_blobs_address = eth_sender_wallets.blob_operator.map(|x| x.address()); - - let sender_config = eth.sender.clone().context("eth_sender")?; - let eth_tx_aggregator_actor = EthTxAggregator::new( - eth_sender_pool, - sender_config.clone(), - Aggregator::new( - sender_config.clone(), - store_factory.create_store().await?, - operator_blobs_address.is_some(), - l1_batch_commit_data_generator_mode, - ), - Box::new(eth_client), - contracts_config.validator_timelock_addr, - contracts_config.l1_multicall3_addr, - diamond_proxy_addr, - l2_chain_id, - operator_blobs_address, - ) - .await; - task_futures.push(tokio::spawn( - eth_tx_aggregator_actor.run(stop_receiver.clone()), - )); - let elapsed = started_at.elapsed(); - APP_METRICS.init_latency[&InitStage::EthTxAggregator].set(elapsed); - tracing::info!("initialized ETH-TxAggregator in {elapsed:?}"); - } - - if components.contains(&Component::EthTxManager) { - let started_at = Instant::now(); - tracing::info!("initializing ETH-TxManager"); - let eth_manager_pool = ConnectionPool::::singleton(database_secrets.master_url()?) - .build() - .await - .context("failed to build eth_manager_pool")?; - let eth_sender = configs.eth.clone().context("eth_sender_config")?; - let eth_sender_wallets = wallets.eth_sender.clone().context("eth_sender")?; - let operator_private_key = eth_sender_wallets.operator.private_key(); - let diamond_proxy_addr = contracts_config.diamond_proxy_addr; - let default_priority_fee_per_gas = eth - .gas_adjuster - .as_ref() - .context("gas_adjuster")? - .default_priority_fee_per_gas; - let l1_chain_id = genesis_config.l1_chain_id; - - let eth_client = PKSigningClient::new_raw( - operator_private_key.clone(), - diamond_proxy_addr, - default_priority_fee_per_gas, - l1_chain_id, - query_client.clone(), - ); - - let eth_client_blobs = if let Some(blob_operator) = eth_sender_wallets.blob_operator { - let operator_blob_private_key = blob_operator.private_key().clone(); - let client = Box::new(PKSigningClient::new_raw( - operator_blob_private_key, - diamond_proxy_addr, - default_priority_fee_per_gas, - l1_chain_id, - query_client, - )); - Some(client as Box) - } else { - None - }; - - let eth_tx_manager_actor = EthTxManager::new( - eth_manager_pool, - eth_sender.sender.clone().context("eth_sender")?, - gas_adjuster - .get_or_init() - .await - .context("gas_adjuster.get_or_init()")?, - Box::new(eth_client), - eth_client_blobs, - ); - task_futures.extend([tokio::spawn( - eth_tx_manager_actor.run(stop_receiver.clone()), - )]); - let elapsed = started_at.elapsed(); - APP_METRICS.init_latency[&InitStage::EthTxManager].set(elapsed); - tracing::info!("initialized ETH-TxManager in {elapsed:?}"); - } - - add_trees_to_task_futures( - configs, - secrets, - &mut task_futures, - &app_health, - components, - &store_factory, - stop_receiver.clone(), - ) - .await - .context("add_trees_to_task_futures()")?; - - // FIXME: return `if components.contains(&Component::TeeVerifierInputProducer)` (not supported by new VM) - if false { - let singleton_connection_pool = - ConnectionPool::::singleton(database_secrets.master_url()?) - .build() - .await - .context("failed to build singleton connection_pool")?; - add_tee_verifier_input_producer_to_task_futures( - &mut task_futures, - &singleton_connection_pool, - &store_factory, - l2_chain_id, - stop_receiver.clone(), - ) - .await - .context("add_tee_verifier_input_producer_to_task_futures()")?; - } - - if components.contains(&Component::Housekeeper) { - add_house_keeper_to_task_futures( - configs, - secrets, - &mut task_futures, - stop_receiver.clone(), - ) - .await - .context("add_house_keeper_to_task_futures()")?; - } - - if components.contains(&Component::ProofDataHandler) { - task_futures.push(tokio::spawn(zksync_proof_data_handler::run_server( - configs - .proof_data_handler_config - .clone() - .context("proof_data_handler_config")?, - store_factory.create_store().await?, - connection_pool.clone(), - genesis_config.l1_batch_commit_data_generator_mode, - stop_receiver.clone(), - ))); - } - - if components.contains(&Component::CommitmentGenerator) { - let pool_size = CommitmentGenerator::default_parallelism().get(); - let commitment_generator_pool = - ConnectionPool::::builder(database_secrets.master_url()?, pool_size) - .build() - .await - .context("failed to build commitment_generator_pool")?; - let commitment_generator = CommitmentGenerator::new( - commitment_generator_pool, - genesis_config.l1_batch_commit_data_generator_mode, - ); - app_health.insert_component(commitment_generator.health_check())?; - task_futures.push(tokio::spawn( - commitment_generator.run(stop_receiver.clone()), - )); - } - - // Run healthcheck server for all components. - let db_health_check = ConnectionPoolHealthCheck::new(replica_connection_pool); - app_health.insert_custom_component(Arc::new(db_health_check))?; - let health_check_handle = - HealthCheckHandle::spawn_server(health_check_config.bind_addr(), app_health); - - if let Some(task) = gas_adjuster.run_if_initialized(stop_receiver.clone()) { - task_futures.push(task); - } - - Ok((task_futures, stop_sender, health_check_handle)) -} - -#[allow(clippy::too_many_arguments)] -async fn add_state_keeper_to_task_futures( - task_futures: &mut Vec>>, - database_secrets: &DatabaseSecrets, - contracts_config: &ContractsConfig, - state_keeper_config: StateKeeperConfig, - state_keeper_wallets: wallets::StateKeeper, - l2chain_id: L2ChainId, - db_config: &DBConfig, - mempool_config: &MempoolConfig, - batch_fee_input_provider: Arc, - stop_receiver: watch::Receiver, -) -> anyhow::Result<()> { - let state_keeper_pool = ConnectionPool::::singleton(database_secrets.master_url()?) - .build() - .await - .context("failed to build state_keeper_pool")?; - let mempool = { - let mut storage = state_keeper_pool - .connection() - .await - .context("Access storage to build mempool")?; - let mempool = MempoolGuard::from_storage(&mut storage, mempool_config.capacity).await; - mempool.register_metrics(); - mempool - }; - - // L2 Block sealing process is parallelized, so we have to provide enough pooled connections. - let persistence_pool = ConnectionPool::::builder( - database_secrets.master_url()?, - L2BlockSealProcess::subtasks_len(), - ) - .build() - .await - .context("failed to build l2_block_sealer_pool")?; - let (persistence, l2_block_sealer) = StateKeeperPersistence::new( - persistence_pool.clone(), - contracts_config - .l2_shared_bridge_addr - .context("`l2_shared_bridge_addr` config is missing")?, - state_keeper_config.l2_block_seal_queue_capacity, - ); - task_futures.push(tokio::spawn(l2_block_sealer.run())); - - // One (potentially held long-term) connection for `AsyncCatchupTask` and another connection - // to access `AsyncRocksdbCache` as a storage. - let async_cache_pool = ConnectionPool::::builder(database_secrets.master_url()?, 2) - .build() - .await - .context("failed to build async_cache_pool")?; - let cache_options = RocksdbStorageOptions { - block_cache_capacity: db_config - .experimental - .state_keeper_db_block_cache_capacity(), - max_open_files: db_config.experimental.state_keeper_db_max_open_files, - }; - let (async_cache, async_catchup_task) = AsyncRocksdbCache::new( - async_cache_pool, - db_config.state_keeper_db_path.clone(), - cache_options, - ); - - let tree_writes_persistence = TreeWritesPersistence::new(persistence_pool); - let output_handler = - OutputHandler::new(Box::new(persistence)).with_handler(Box::new(tree_writes_persistence)); - let state_keeper = create_state_keeper( - state_keeper_config, - state_keeper_wallets, - async_cache, - l2chain_id, - mempool_config, - state_keeper_pool, - mempool.clone(), - batch_fee_input_provider.clone(), - output_handler, - stop_receiver.clone(), - ) - .await; - - let mut stop_receiver_clone = stop_receiver.clone(); - task_futures.push(tokio::task::spawn(async move { - let result = async_catchup_task.run(stop_receiver_clone.clone()).await; - stop_receiver_clone.changed().await?; - result - })); - task_futures.push(tokio::spawn(state_keeper.run())); - - let mempool_fetcher_pool = ConnectionPool::::singleton(database_secrets.master_url()?) - .build() - .await - .context("failed to build mempool_fetcher_pool")?; - let mempool_fetcher = MempoolFetcher::new( - mempool, - batch_fee_input_provider, - mempool_config, - mempool_fetcher_pool, - ); - let mempool_fetcher_handle = tokio::spawn(mempool_fetcher.run(stop_receiver)); - task_futures.push(mempool_fetcher_handle); - Ok(()) -} - -pub async fn start_eth_watch( - config: EthWatchConfig, - pool: ConnectionPool, - eth_gateway: Box>, - diamond_proxy_addr: Address, - state_transition_manager_addr: Option

, - governance: (Contract, Address), - stop_receiver: watch::Receiver, -) -> anyhow::Result>> { - let eth_client = EthHttpQueryClient::new( - eth_gateway, - diamond_proxy_addr, - state_transition_manager_addr, - governance.1, - config.confirmations_for_eth_event, - ); - - let eth_watch = EthWatch::new( - diamond_proxy_addr, - &governance.0, - Box::new(eth_client), - pool, - config.poll_interval(), - ) - .await?; - - Ok(tokio::spawn(eth_watch.run(stop_receiver))) -} - -async fn add_trees_to_task_futures( - configs: &GeneralConfig, - secrets: &Secrets, - task_futures: &mut Vec>>, - app_health: &AppHealthCheck, - components: &[Component], - store_factory: &ObjectStoreFactory, - stop_receiver: watch::Receiver, -) -> anyhow::Result<()> { - if !components.contains(&Component::Tree) { - anyhow::ensure!( - !components.contains(&Component::TreeApi), - "Merkle tree API cannot be started without a tree component" - ); - return Ok(()); - } - - let db_config = configs.db_config.clone().context("db_config")?; - let database_secrets = secrets.database.clone().context("database_secrets")?; - let operation_config = configs - .operations_manager_config - .clone() - .context("operations_manager_config")?; - let api_config = configs - .api_config - .clone() - .context("api_config")? - .merkle_tree; - let api_config = components - .contains(&Component::TreeApi) - .then_some(&api_config); - - let object_store = match db_config.merkle_tree.mode { - MerkleTreeMode::Lightweight => None, - MerkleTreeMode::Full => Some(store_factory.create_store().await?), - }; - - run_tree( - task_futures, - app_health, - &database_secrets, - &db_config.merkle_tree, - api_config, - &operation_config, - object_store, - stop_receiver, - ) - .await - .context("run_tree()") -} - -#[allow(clippy::too_many_arguments)] -async fn run_tree( - task_futures: &mut Vec>>, - app_health: &AppHealthCheck, - database_secrets: &DatabaseSecrets, - merkle_tree_config: &MerkleTreeConfig, - api_config: Option<&MerkleTreeApiConfig>, - operation_manager: &OperationsManagerConfig, - object_store: Option>, - stop_receiver: watch::Receiver, -) -> anyhow::Result<()> { - let started_at = Instant::now(); - let mode_str = if matches!(merkle_tree_config.mode, MerkleTreeMode::Full) { - "full" - } else { - "lightweight" - }; - tracing::info!("Initializing Merkle tree in {mode_str} mode"); - - let config = MetadataCalculatorConfig::for_main_node(merkle_tree_config, operation_manager); - let pool = ConnectionPool::singleton(database_secrets.master_url()?) - .build() - .await - .context("failed to build connection pool for Merkle tree")?; - // The number of connections in a recovery pool is based on the mainnet recovery runs. It doesn't need - // to be particularly accurate at this point, since the main node isn't expected to recover from a snapshot. - let recovery_pool = ConnectionPool::builder(database_secrets.replica_url()?, 10) - .build() - .await - .context("failed to build connection pool for Merkle tree recovery")?; - let metadata_calculator = MetadataCalculator::new(config, object_store, pool) - .await - .context("failed initializing metadata_calculator")? - .with_recovery_pool(recovery_pool); - - if let Some(api_config) = api_config { - let address = (Ipv4Addr::UNSPECIFIED, api_config.port).into(); - let tree_reader = metadata_calculator.tree_reader(); - let stop_receiver = stop_receiver.clone(); - task_futures.push(tokio::spawn(async move { - tree_reader - .wait() - .await - .context("Cannot initialize tree reader")? - .run_api_server(address, stop_receiver) - .await - })); - } - - let tree_health_check = metadata_calculator.tree_health_check(); - app_health.insert_custom_component(Arc::new(tree_health_check))?; - let tree_task = tokio::spawn(metadata_calculator.run(stop_receiver)); - task_futures.push(tree_task); - - let elapsed = started_at.elapsed(); - APP_METRICS.init_latency[&InitStage::Tree].set(elapsed); - tracing::info!("Initialized {mode_str} tree in {elapsed:?}"); - Ok(()) -} - -async fn add_tee_verifier_input_producer_to_task_futures( - task_futures: &mut Vec>>, - connection_pool: &ConnectionPool, - store_factory: &ObjectStoreFactory, - l2_chain_id: L2ChainId, - stop_receiver: watch::Receiver, -) -> anyhow::Result<()> { - let started_at = Instant::now(); - tracing::info!("initializing TeeVerifierInputProducer"); - let producer = TeeVerifierInputProducer::new( - connection_pool.clone(), - store_factory.create_store().await?, - l2_chain_id, - ) - .await?; - task_futures.push(tokio::spawn(producer.run(stop_receiver, None))); - tracing::info!( - "Initialized TeeVerifierInputProducer in {:?}", - started_at.elapsed() - ); - let elapsed = started_at.elapsed(); - APP_METRICS.init_latency[&InitStage::TeeVerifierInputProducer].set(elapsed); - Ok(()) -} - -async fn add_house_keeper_to_task_futures( - configs: &GeneralConfig, - secrets: &Secrets, - task_futures: &mut Vec>>, - stop_receiver: watch::Receiver, -) -> anyhow::Result<()> { - let house_keeper_config = configs - .house_keeper_config - .clone() - .context("house_keeper_config")?; - let postgres_config = configs.postgres_config.clone().context("postgres_config")?; - let secrets = secrets.database.clone().context("database_secrets")?; - let connection_pool = - ConnectionPool::::builder(secrets.replica_url()?, postgres_config.max_connections()?) - .build() - .await - .context("failed to build a connection pool")?; - - let pool_for_metrics = connection_pool.clone(); - let mut stop_receiver_for_metrics = stop_receiver.clone(); - task_futures.push(tokio::spawn(async move { - tokio::select! { - () = PostgresMetrics::run_scraping(pool_for_metrics, Duration::from_secs(60)) => { - tracing::warn!("Postgres metrics scraping unexpectedly stopped"); - } - _ = stop_receiver_for_metrics.changed() => { - tracing::info!("Stop signal received, Postgres metrics scraping is shutting down"); + "base_token_ratio_persister" => { + Ok(Components(vec![Component::BaseTokenRatioPersister])) } + "vm_runner_bwip" => Ok(Components(vec![Component::VmRunnerBwip])), + other => Err(format!("{} is not a valid component name", other)), } - Ok(()) - })); - - let l1_batch_metrics_reporter = L1BatchMetricsReporter::new( - house_keeper_config.l1_batch_metrics_reporting_interval_ms, - connection_pool.clone(), - ); - - let prover_connection_pool = ConnectionPool::::builder( - secrets.prover_url()?, - postgres_config.max_connections()?, - ) - .build() - .await - .context("failed to build a prover_connection_pool")?; - let task = l1_batch_metrics_reporter.run(stop_receiver.clone()); - task_futures.push(tokio::spawn(task)); - - // All FRI Prover related components are configured below. - let fri_prover_config = configs.prover_config.clone().context("fri_prover_config")?; - let fri_prover_job_retry_manager = FriProverJobRetryManager::new( - fri_prover_config.max_attempts, - fri_prover_config.proof_generation_timeout(), - house_keeper_config.prover_job_retrying_interval_ms, - prover_connection_pool.clone(), - ); - let task = fri_prover_job_retry_manager.run(stop_receiver.clone()); - task_futures.push(tokio::spawn(task)); - - let fri_witness_gen_config = configs - .witness_generator - .clone() - .context("fri_witness_generator_config")?; - let fri_witness_gen_job_retry_manager = FriWitnessGeneratorJobRetryManager::new( - fri_witness_gen_config.max_attempts, - fri_witness_gen_config.witness_generation_timeouts(), - house_keeper_config.witness_generator_job_retrying_interval_ms, - prover_connection_pool.clone(), - ); - let task = fri_witness_gen_job_retry_manager.run(stop_receiver.clone()); - task_futures.push(tokio::spawn(task)); - - let waiting_to_queued_fri_witness_job_mover = WaitingToQueuedFriWitnessJobMover::new( - house_keeper_config.witness_job_moving_interval_ms, - prover_connection_pool.clone(), - ); - let task = waiting_to_queued_fri_witness_job_mover.run(stop_receiver.clone()); - task_futures.push(tokio::spawn(task)); - - let fri_witness_generator_stats_reporter = FriWitnessGeneratorQueueReporter::new( - prover_connection_pool.clone(), - house_keeper_config.witness_generator_stats_reporting_interval_ms, - ); - let task = fri_witness_generator_stats_reporter.run(stop_receiver.clone()); - task_futures.push(tokio::spawn(task)); - - // TODO(PLA-862): remove after fields become required - if let Some((archiving_interval, archive_after)) = - house_keeper_config.prover_job_archiver_params() - { - let fri_prover_jobs_archiver = FriProverJobsArchiver::new( - prover_connection_pool.clone(), - archiving_interval, - archive_after, - ); - let task = fri_prover_jobs_archiver.run(stop_receiver.clone()); - task_futures.push(tokio::spawn(task)); - } - - if let Some((archiving_interval, archive_after)) = - house_keeper_config.fri_gpu_prover_archiver_params() - { - let fri_gpu_prover_jobs_archiver = FriGpuProverArchiver::new( - prover_connection_pool.clone(), - archiving_interval, - archive_after, - ); - let task = fri_gpu_prover_jobs_archiver.run(stop_receiver.clone()); - task_futures.push(tokio::spawn(task)); - } - - let fri_prover_group_config = configs - .prover_group_config - .clone() - .context("fri_prover_group_config")?; - let fri_prover_stats_reporter = FriProverQueueReporter::new( - house_keeper_config.prover_stats_reporting_interval_ms, - prover_connection_pool.clone(), - connection_pool.clone(), - fri_prover_group_config, - ); - let task = fri_prover_stats_reporter.run(stop_receiver.clone()); - task_futures.push(tokio::spawn(task)); - - let proof_compressor_config = configs - .proof_compressor_config - .clone() - .context("fri_proof_compressor_config")?; - let fri_proof_compressor_stats_reporter = FriProofCompressorQueueReporter::new( - house_keeper_config.proof_compressor_stats_reporting_interval_ms, - prover_connection_pool.clone(), - ); - let task = fri_proof_compressor_stats_reporter.run(stop_receiver.clone()); - task_futures.push(tokio::spawn(task)); - - let fri_proof_compressor_retry_manager = FriProofCompressorJobRetryManager::new( - proof_compressor_config.max_attempts, - proof_compressor_config.generation_timeout(), - house_keeper_config.proof_compressor_job_retrying_interval_ms, - prover_connection_pool.clone(), - ); - let task = fri_proof_compressor_retry_manager.run(stop_receiver); - task_futures.push(tokio::spawn(task)); - Ok(()) -} - -fn build_storage_caches( - rpc_config: &Web3JsonRpcConfig, - replica_connection_pool: &ConnectionPool, - task_futures: &mut Vec>>, - stop_receiver: watch::Receiver, -) -> anyhow::Result { - let factory_deps_capacity = rpc_config.factory_deps_cache_size() as u64; - let initial_writes_capacity = rpc_config.initial_writes_cache_size() as u64; - let values_capacity = rpc_config.latest_values_cache_size() as u64; - let mut storage_caches = - PostgresStorageCaches::new(factory_deps_capacity, initial_writes_capacity); - - if values_capacity > 0 { - let values_cache_task = storage_caches - .configure_storage_values_cache(values_capacity, replica_connection_pool.clone()); - task_futures.push(tokio::task::spawn(values_cache_task.run(stop_receiver))); - } - Ok(storage_caches) -} - -#[allow(clippy::too_many_arguments)] -async fn run_http_api( - task_futures: &mut Vec>>, - app_health: &AppHealthCheck, - database_secrets: &DatabaseSecrets, - tx_sender_config: &TxSenderConfig, - state_keeper_config: &StateKeeperConfig, - internal_api: &InternalApiConfig, - api_config: &ApiConfig, - master_connection_pool: ConnectionPool, - replica_connection_pool: ConnectionPool, - stop_receiver: watch::Receiver, - batch_fee_model_input_provider: Arc, - with_debug_namespace: bool, - storage_caches: PostgresStorageCaches, - mempool_cache: MempoolCache, -) -> anyhow::Result<()> { - let (tx_sender, vm_barrier) = build_tx_sender( - tx_sender_config, - &api_config.web3_json_rpc, - state_keeper_config, - replica_connection_pool.clone(), - master_connection_pool, - batch_fee_model_input_provider, - storage_caches, - ) - .await?; - - let mut namespaces = Namespace::DEFAULT.to_vec(); - if with_debug_namespace { - namespaces.push(Namespace::Debug) - } - namespaces.push(Namespace::Snapshots); - - let updaters_pool = ConnectionPool::::builder(database_secrets.replica_url()?, 2) - .build() - .await - .context("failed to build updaters_pool")?; - - let mut api_builder = - web3::ApiBuilder::jsonrpsee_backend(internal_api.clone(), replica_connection_pool) - .http(api_config.web3_json_rpc.http_port) - .with_updaters_pool(updaters_pool) - .with_filter_limit(api_config.web3_json_rpc.filters_limit()) - .with_batch_request_size_limit(api_config.web3_json_rpc.max_batch_request_size()) - .with_response_body_size_limit(api_config.web3_json_rpc.max_response_body_size()) - .with_tx_sender(tx_sender) - .with_vm_barrier(vm_barrier) - .with_mempool_cache(mempool_cache) - .enable_api_namespaces(namespaces); - if let Some(tree_api_url) = api_config.web3_json_rpc.tree_api_url() { - let tree_api = Arc::new(TreeApiHttpClient::new(tree_api_url)); - api_builder = api_builder.with_tree_api(tree_api.clone()); - app_health.insert_custom_component(tree_api)?; - } - - let server_handles = api_builder - .build() - .context("failed to build HTTP API server")? - .run(stop_receiver) - .await?; - task_futures.extend(server_handles.tasks); - app_health.insert_component(server_handles.health_check)?; - Ok(()) -} - -#[allow(clippy::too_many_arguments)] -async fn run_ws_api( - task_futures: &mut Vec>>, - app_health: &AppHealthCheck, - database_secrets: &DatabaseSecrets, - tx_sender_config: &TxSenderConfig, - state_keeper_config: &StateKeeperConfig, - internal_api: &InternalApiConfig, - api_config: &ApiConfig, - batch_fee_model_input_provider: Arc, - master_connection_pool: ConnectionPool, - replica_connection_pool: ConnectionPool, - stop_receiver: watch::Receiver, - storage_caches: PostgresStorageCaches, - mempool_cache: MempoolCache, -) -> anyhow::Result<()> { - let (tx_sender, vm_barrier) = build_tx_sender( - tx_sender_config, - &api_config.web3_json_rpc, - state_keeper_config, - replica_connection_pool.clone(), - master_connection_pool, - batch_fee_model_input_provider, - storage_caches, - ) - .await?; - let updaters_pool = ConnectionPool::::singleton(database_secrets.replica_url()?) - .build() - .await - .context("failed to build updaters_pool")?; - - let mut namespaces = Namespace::DEFAULT.to_vec(); - namespaces.push(Namespace::Snapshots); - - let mut api_builder = - web3::ApiBuilder::jsonrpsee_backend(internal_api.clone(), replica_connection_pool) - .ws(api_config.web3_json_rpc.ws_port) - .with_updaters_pool(updaters_pool) - .with_filter_limit(api_config.web3_json_rpc.filters_limit()) - .with_subscriptions_limit(api_config.web3_json_rpc.subscriptions_limit()) - .with_batch_request_size_limit(api_config.web3_json_rpc.max_batch_request_size()) - .with_response_body_size_limit(api_config.web3_json_rpc.max_response_body_size()) - .with_websocket_requests_per_minute_limit( - api_config - .web3_json_rpc - .websocket_requests_per_minute_limit(), - ) - .with_polling_interval(api_config.web3_json_rpc.pubsub_interval()) - .with_tx_sender(tx_sender) - .with_vm_barrier(vm_barrier) - .with_mempool_cache(mempool_cache) - .enable_api_namespaces(namespaces); - if let Some(tree_api_url) = api_config.web3_json_rpc.tree_api_url() { - let tree_api = Arc::new(TreeApiHttpClient::new(tree_api_url)); - api_builder = api_builder.with_tree_api(tree_api.clone()); - app_health.insert_custom_component(tree_api)?; - } - - let server_handles = api_builder - .build() - .context("failed to build WS API server")? - .run(stop_receiver) - .await?; - task_futures.extend(server_handles.tasks); - app_health.insert_component(server_handles.health_check)?; - Ok(()) -} - -async fn circuit_breakers_for_components( - components: &[Component], - database_secrets: &DatabaseSecrets, - circuit_breaker_config: &CircuitBreakerConfig, -) -> anyhow::Result { - let circuit_breakers = CircuitBreakers::default(); - - if components - .iter() - .any(|c| matches!(c, Component::EthTxAggregator | Component::EthTxManager)) - { - let pool = ConnectionPool::::singleton(database_secrets.replica_url()?) - .build() - .await - .context("failed to build a connection pool")?; - circuit_breakers - .insert(Box::new(FailedL1TransactionChecker { pool })) - .await; - } - - if components.iter().any(|c| { - matches!( - c, - Component::HttpApi | Component::WsApi | Component::ContractVerificationApi - ) - }) { - let pool = ConnectionPool::::singleton(database_secrets.replica_url()?) - .build() - .await?; - circuit_breakers - .insert(Box::new(ReplicationLagChecker { - pool, - replication_lag_limit: circuit_breaker_config.replication_lag_limit(), - })) - .await; } - Ok(circuit_breakers) } diff --git a/core/lib/zksync_core_leftovers/src/temp_config_store/mod.rs b/core/lib/zksync_core_leftovers/src/temp_config_store/mod.rs index 1f4c410ed9c1..1ad688ed14cb 100644 --- a/core/lib/zksync_core_leftovers/src/temp_config_store/mod.rs +++ b/core/lib/zksync_core_leftovers/src/temp_config_store/mod.rs @@ -1,3 +1,6 @@ +use std::path::PathBuf; + +use anyhow::Context; use zksync_config::{ configs::{ api::{HealthCheckConfig, MerkleTreeApiConfig, Web3JsonRpcConfig}, @@ -7,28 +10,33 @@ use zksync_config::{ }, fri_prover_group::FriProverGroupConfig, house_keeper::HouseKeeperConfig, + vm_runner::BasicWitnessInputProducerConfig, wallets::{AddressWallet, EthSender, StateKeeper, Wallet, Wallets}, + CommitmentGeneratorConfig, DatabaseSecrets, ExternalPriceApiClientConfig, FriProofCompressorConfig, FriProverConfig, FriProverGatewayConfig, FriWitnessGeneratorConfig, FriWitnessVectorGeneratorConfig, GeneralConfig, ObservabilityConfig, PrometheusConfig, ProofDataHandlerConfig, ProtectiveReadsWriterConfig, + PruningConfig, SnapshotRecoveryConfig, }, - ApiConfig, ContractVerifierConfig, DBConfig, EthConfig, EthWatchConfig, GasAdjusterConfig, - ObjectStoreConfig, PostgresConfig, SnapshotsCreatorConfig, + ApiConfig, BaseTokenAdjusterConfig, ContractVerifierConfig, DADispatcherConfig, DBConfig, + EthConfig, EthWatchConfig, GasAdjusterConfig, ObjectStoreConfig, PostgresConfig, + SnapshotsCreatorConfig, }; -use zksync_protobuf::{repr::ProtoRepr, ProtoFmt}; - -pub fn decode_yaml(yaml: &str) -> anyhow::Result { - let d = serde_yaml::Deserializer::from_str(yaml); - let this: T = zksync_protobuf::serde::deserialize(d)?; - Ok(this) -} +use zksync_env_config::FromEnv; +use zksync_protobuf::repr::ProtoRepr; +use zksync_protobuf_config::proto::secrets::Secrets; pub fn decode_yaml_repr(yaml: &str) -> anyhow::Result { let d = serde_yaml::Deserializer::from_str(yaml); let this: T = zksync_protobuf::serde::deserialize_proto_with_options(d, false)?; this.read() } -// + +pub fn read_yaml_repr(path_buf: PathBuf) -> anyhow::Result { + let yaml = std::fs::read_to_string(path_buf).context("failed reading YAML config")?; + decode_yaml_repr::(&yaml) +} + // TODO (QIT-22): This structure is going to be removed when components will be responsible for their own configs. /// A temporary config store allowing to pass deserialized configs from `zksync_server` to `zksync_core`. /// All the configs are optional, since for some component combination it is not needed to pass all the configs. @@ -60,8 +68,15 @@ pub struct TempConfigStore { pub gas_adjuster_config: Option, pub observability: Option, pub snapshot_creator: Option, + pub da_dispatcher_config: Option, pub protective_reads_writer_config: Option, + pub basic_witness_input_producer_config: Option, pub core_object_store: Option, + pub base_token_adjuster_config: Option, + pub commitment_generator: Option, + pub pruning: Option, + pub snapshot_recovery: Option, + pub external_price_api_client_config: Option, } impl TempConfigStore { @@ -87,8 +102,16 @@ impl TempConfigStore { eth: self.eth_sender_config.clone(), snapshot_creator: self.snapshot_creator.clone(), observability: self.observability.clone(), + da_dispatcher_config: self.da_dispatcher_config.clone(), protective_reads_writer_config: self.protective_reads_writer_config.clone(), + basic_witness_input_producer_config: self.basic_witness_input_producer_config.clone(), core_object_store: self.core_object_store.clone(), + base_token_adjuster: self.base_token_adjuster_config.clone(), + commitment_generator: self.commitment_generator.clone(), + snapshot_recovery: self.snapshot_recovery.clone(), + pruning: self.pruning.clone(), + external_price_api_client_config: self.external_price_api_client_config.clone(), + consensus_config: None, } } @@ -122,3 +145,68 @@ impl TempConfigStore { } } } + +fn load_env_config() -> anyhow::Result { + Ok(TempConfigStore { + postgres_config: PostgresConfig::from_env().ok(), + health_check_config: HealthCheckConfig::from_env().ok(), + merkle_tree_api_config: MerkleTreeApiConfig::from_env().ok(), + web3_json_rpc_config: Web3JsonRpcConfig::from_env().ok(), + circuit_breaker_config: CircuitBreakerConfig::from_env().ok(), + mempool_config: MempoolConfig::from_env().ok(), + network_config: NetworkConfig::from_env().ok(), + contract_verifier: ContractVerifierConfig::from_env().ok(), + operations_manager_config: OperationsManagerConfig::from_env().ok(), + state_keeper_config: StateKeeperConfig::from_env().ok(), + house_keeper_config: HouseKeeperConfig::from_env().ok(), + fri_proof_compressor_config: FriProofCompressorConfig::from_env().ok(), + fri_prover_config: FriProverConfig::from_env().ok(), + fri_prover_group_config: FriProverGroupConfig::from_env().ok(), + fri_prover_gateway_config: FriProverGatewayConfig::from_env().ok(), + fri_witness_vector_generator: FriWitnessVectorGeneratorConfig::from_env().ok(), + fri_witness_generator_config: FriWitnessGeneratorConfig::from_env().ok(), + prometheus_config: PrometheusConfig::from_env().ok(), + proof_data_handler_config: ProofDataHandlerConfig::from_env().ok(), + api_config: ApiConfig::from_env().ok(), + db_config: DBConfig::from_env().ok(), + eth_sender_config: EthConfig::from_env().ok(), + eth_watch_config: EthWatchConfig::from_env().ok(), + gas_adjuster_config: GasAdjusterConfig::from_env().ok(), + observability: ObservabilityConfig::from_env().ok(), + snapshot_creator: SnapshotsCreatorConfig::from_env().ok(), + da_dispatcher_config: DADispatcherConfig::from_env().ok(), + protective_reads_writer_config: ProtectiveReadsWriterConfig::from_env().ok(), + basic_witness_input_producer_config: BasicWitnessInputProducerConfig::from_env().ok(), + core_object_store: ObjectStoreConfig::from_env().ok(), + base_token_adjuster_config: BaseTokenAdjusterConfig::from_env().ok(), + commitment_generator: None, + pruning: None, + snapshot_recovery: None, + external_price_api_client_config: ExternalPriceApiClientConfig::from_env().ok(), + }) +} + +pub fn load_general_config(path: Option) -> anyhow::Result { + match path { + Some(path) => { + let yaml = std::fs::read_to_string(path).context("Failed to read general config")?; + decode_yaml_repr::(&yaml) + } + None => Ok(load_env_config() + .context("general config from env")? + .general()), + } +} + +pub fn load_database_secrets(path: Option) -> anyhow::Result { + match path { + Some(path) => { + let yaml = std::fs::read_to_string(path).context("Failed to read secrets")?; + let secrets = decode_yaml_repr::(&yaml).context("Failed to parse secrets")?; + Ok(secrets + .database + .context("failed to parse database secrets")?) + } + None => DatabaseSecrets::from_env(), + } +} diff --git a/core/node/api_server/Cargo.toml b/core/node/api_server/Cargo.toml index b826a8b40f21..2a09ce5d176c 100644 --- a/core/node/api_server/Cargo.toml +++ b/core/node/api_server/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "zksync_node_api_server" -version = "0.1.0" +description = "ZKsync API server" +version.workspace = true edition.workspace = true authors.workspace = true homepage.workspace = true @@ -26,7 +27,7 @@ zksync_web3_decl = { workspace = true, features = ["server"] } zksync_utils.workspace = true zksync_protobuf.workspace = true zksync_mini_merkle_tree.workspace = true -multivm.workspace = true +zksync_multivm.workspace = true vise.workspace = true anyhow.workspace = true @@ -48,6 +49,7 @@ pin-project-lite.workspace = true hex.workspace = true http.workspace = true tower.workspace = true +strum = { workspace = true, features = ["derive"] } tower-http = { workspace = true, features = ["cors", "metrics"] } lru.workspace = true diff --git a/core/node/api_server/src/execution_sandbox/apply.rs b/core/node/api_server/src/execution_sandbox/apply.rs index 745c8a316707..99664697b14c 100644 --- a/core/node/api_server/src/execution_sandbox/apply.rs +++ b/core/node/api_server/src/execution_sandbox/apply.rs @@ -9,21 +9,21 @@ use std::time::{Duration, Instant}; use anyhow::Context as _; -use multivm::{ +use tokio::runtime::Handle; +use zksync_dal::{Connection, ConnectionPool, Core, CoreDal, DalError}; +use zksync_multivm::{ interface::{L1BatchEnv, L2BlockEnv, SystemEnv, VmInterface}, utils::adjust_pubdata_price_for_tx, vm_latest::{constants::BATCH_COMPUTATIONAL_GAS_LIMIT, HistoryDisabled}, VmInstance, }; -use tokio::runtime::Handle; -use zksync_dal::{Connection, ConnectionPool, Core, CoreDal, DalError}; use zksync_state::{PostgresStorage, ReadStorage, StoragePtr, StorageView, WriteStorage}; use zksync_system_constants::{ SYSTEM_CONTEXT_ADDRESS, SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, ZKPORTER_IS_AVAILABLE, }; use zksync_types::{ - api, + api::{self, state_override::StateOverride}, block::{pack_block_info, unpack_block_info, L2BlockHasher}, fee_model::BatchFeeInput, get_nonce_key, @@ -34,11 +34,13 @@ use zksync_types::{ use zksync_utils::{h256_to_u256, time::seconds_since_epoch, u256_to_h256}; use super::{ + storage::StorageWithOverrides, vm_metrics::{self, SandboxStage, SANDBOX_METRICS}, BlockArgs, TxExecutionArgs, TxSharedArgs, VmPermit, }; -type BoxedVm<'a> = Box, HistoryDisabled>>; +type VmStorageView<'a> = StorageView>>; +type BoxedVm<'a> = Box>, HistoryDisabled>>; #[derive(Debug)] struct Sandbox<'a> { @@ -46,7 +48,7 @@ struct Sandbox<'a> { l1_batch_env: L1BatchEnv, execution_args: &'a TxExecutionArgs, l2_block_info_to_reset: Option, - storage_view: StorageView>, + storage_view: VmStorageView<'a>, } impl<'a> Sandbox<'a> { @@ -55,6 +57,7 @@ impl<'a> Sandbox<'a> { shared_args: TxSharedArgs, execution_args: &'a TxExecutionArgs, block_args: BlockArgs, + state_override: &StateOverride, ) -> anyhow::Result> { let resolve_started_at = Instant::now(); let resolved_block_info = block_args @@ -90,7 +93,8 @@ impl<'a> Sandbox<'a> { .context("cannot create `PostgresStorage`")? .with_caches(shared_args.caches.clone()); - let storage_view = StorageView::new(storage); + let storage_with_overrides = StorageWithOverrides::new(storage, state_override); + let storage_view = StorageView::new(storage_with_overrides); let (system_env, l1_batch_env) = Self::prepare_env( shared_args, execution_args, @@ -259,7 +263,7 @@ impl<'a> Sandbox<'a> { mut self, tx: &Transaction, adjust_pubdata_price: bool, - ) -> (BoxedVm<'a>, StoragePtr>>) { + ) -> (BoxedVm<'a>, StoragePtr>) { self.setup_storage_view(tx); let protocol_version = self.system_env.version; if adjust_pubdata_price { @@ -294,9 +298,10 @@ pub(super) fn apply_vm_in_sandbox( execution_args: &TxExecutionArgs, connection_pool: &ConnectionPool, tx: Transaction, - block_args: BlockArgs, + block_args: BlockArgs, // Block arguments for the transaction. + state_override: Option, apply: impl FnOnce( - &mut VmInstance, HistoryDisabled>, + &mut VmInstance>, HistoryDisabled>, Transaction, ProtocolVersionId, ) -> T, @@ -319,6 +324,7 @@ pub(super) fn apply_vm_in_sandbox( shared_args, execution_args, block_args, + state_override.as_ref().unwrap_or(&StateOverride::default()), ))?; let protocol_version = sandbox.system_env.version; let (mut vm, storage_view) = sandbox.into_vm(&tx, adjust_pubdata_price); @@ -331,6 +337,7 @@ pub(super) fn apply_vm_in_sandbox( tx.initiator_account(), tx.nonce().unwrap_or(Nonce(0)) ); + let execution_latency = SANDBOX_METRICS.sandbox[&SandboxStage::Execution].start(); let result = apply(&mut vm, tx, protocol_version); let vm_execution_took = execution_latency.observe(); @@ -366,7 +373,7 @@ impl StoredL2BlockInfo { ); let l2_block_info = connection .storage_web3_dal() - .get_historical_value_unchecked(&l2_block_info_key, l2_block_number) + .get_historical_value_unchecked(l2_block_info_key.hashed_key(), l2_block_number) .await .context("failed reading L2 block info from VM state")?; let (l2_block_number_from_state, l2_block_timestamp) = @@ -378,7 +385,10 @@ impl StoredL2BlockInfo { ); let txs_rolling_hash = connection .storage_web3_dal() - .get_historical_value_unchecked(&l2_block_txs_rolling_hash_key, l2_block_number) + .get_historical_value_unchecked( + l2_block_txs_rolling_hash_key.hashed_key(), + l2_block_number, + ) .await .context("failed reading transaction rolling hash from VM state")?; diff --git a/core/node/api_server/src/execution_sandbox/error.rs b/core/node/api_server/src/execution_sandbox/error.rs index 9d6d635a344c..5d63d50a3c85 100644 --- a/core/node/api_server/src/execution_sandbox/error.rs +++ b/core/node/api_server/src/execution_sandbox/error.rs @@ -1,5 +1,5 @@ -use multivm::interface::{Halt, TxRevertReason}; use thiserror::Error; +use zksync_multivm::interface::{Halt, TxRevertReason}; #[derive(Debug, Error)] pub(crate) enum SandboxExecutionError { diff --git a/core/node/api_server/src/execution_sandbox/execute.rs b/core/node/api_server/src/execution_sandbox/execute.rs index 9a844df28673..f633b133ab00 100644 --- a/core/node/api_server/src/execution_sandbox/execute.rs +++ b/core/node/api_server/src/execution_sandbox/execute.rs @@ -1,13 +1,13 @@ //! Implementation of "executing" methods, e.g. `eth_call`. use anyhow::Context as _; -use multivm::{ +use tracing::{span, Level}; +use zksync_dal::{ConnectionPool, Core}; +use zksync_multivm::{ interface::{TxExecutionMode, VmExecutionResultAndLogs, VmInterface}, tracers::StorageInvocations, MultiVMTracer, }; -use tracing::{span, Level}; -use zksync_dal::{ConnectionPool, Core}; use zksync_types::{ fee::TransactionExecutionMetrics, l2::L2Tx, transaction_request::CallOverrides, ExecuteTransactionCommon, Nonce, PackedEthSignature, Transaction, U256, @@ -17,6 +17,7 @@ use super::{ apply, testonly::MockTransactionExecutor, vm_metrics, ApiTracer, BlockArgs, TxSharedArgs, VmPermit, }; +use crate::execution_sandbox::api::state_override::StateOverride; #[derive(Debug)] pub(crate) struct TxExecutionArgs { @@ -111,6 +112,7 @@ impl TransactionExecutor { connection_pool: ConnectionPool, tx: Transaction, block_args: BlockArgs, + state_override: Option, custom_tracers: Vec, ) -> anyhow::Result { if let Self::Mock(mock_executor) = self { @@ -129,6 +131,7 @@ impl TransactionExecutor { &connection_pool, tx, block_args, + state_override, |vm, tx, _| { let storage_invocation_tracer = StorageInvocations::new(execution_args.missed_storage_invocation_limit); @@ -170,6 +173,7 @@ impl TransactionExecutor { block_args: BlockArgs, vm_execution_cache_misses_limit: Option, custom_tracers: Vec, + state_override: Option, ) -> anyhow::Result { let execution_args = TxExecutionArgs::for_eth_call( call_overrides.enforced_base_fee, @@ -189,6 +193,7 @@ impl TransactionExecutor { connection_pool, tx.into(), block_args, + state_override, custom_tracers, ) .await?; diff --git a/core/node/api_server/src/execution_sandbox/mod.rs b/core/node/api_server/src/execution_sandbox/mod.rs index 72c6ba9789f5..f7c876679cb0 100644 --- a/core/node/api_server/src/execution_sandbox/mod.rs +++ b/core/node/api_server/src/execution_sandbox/mod.rs @@ -26,6 +26,7 @@ use super::tx_sender::MultiVMBaseSystemContracts; mod apply; mod error; mod execute; +mod storage; pub mod testonly; #[cfg(test)] mod tests; diff --git a/core/node/api_server/src/execution_sandbox/storage.rs b/core/node/api_server/src/execution_sandbox/storage.rs new file mode 100644 index 000000000000..749945b4e341 --- /dev/null +++ b/core/node/api_server/src/execution_sandbox/storage.rs @@ -0,0 +1,201 @@ +//! VM storage functionality specifically used in the VM sandbox. + +use std::{ + collections::{HashMap, HashSet}, + fmt, +}; + +use zksync_state::ReadStorage; +use zksync_types::{ + api::state_override::{OverrideState, StateOverride}, + get_code_key, get_nonce_key, + utils::{decompose_full_nonce, nonces_to_full_nonce, storage_key_for_eth_balance}, + AccountTreeId, StorageKey, StorageValue, H256, +}; +use zksync_utils::{h256_to_u256, u256_to_h256}; + +/// A storage view that allows to override some of the storage values. +#[derive(Debug)] +pub(super) struct StorageWithOverrides { + storage_handle: S, + overridden_slots: HashMap, + overridden_factory_deps: HashMap>, + overridden_accounts: HashSet, +} + +impl StorageWithOverrides { + /// Creates a new storage view based on the underlying storage. + pub(super) fn new(storage: S, state_override: &StateOverride) -> Self { + let mut this = Self { + storage_handle: storage, + overridden_slots: HashMap::new(), + overridden_factory_deps: HashMap::new(), + overridden_accounts: HashSet::new(), + }; + this.apply_state_override(state_override); + this + } + + fn apply_state_override(&mut self, state_override: &StateOverride) { + for (account, overrides) in state_override.iter() { + if let Some(balance) = overrides.balance { + let balance_key = storage_key_for_eth_balance(account); + self.overridden_slots + .insert(balance_key, u256_to_h256(balance)); + } + + if let Some(nonce) = overrides.nonce { + let nonce_key = get_nonce_key(account); + let full_nonce = self.read_value(&nonce_key); + let (_, deployment_nonce) = decompose_full_nonce(h256_to_u256(full_nonce)); + let new_full_nonce = u256_to_h256(nonces_to_full_nonce(nonce, deployment_nonce)); + self.overridden_slots.insert(nonce_key, new_full_nonce); + } + + if let Some(code) = &overrides.code { + let code_key = get_code_key(account); + let code_hash = code.hash(); + self.overridden_slots.insert(code_key, code_hash); + self.store_factory_dep(code_hash, code.clone().into_bytes()); + } + + match &overrides.state { + Some(OverrideState::State(state)) => { + let account = AccountTreeId::new(*account); + self.override_account_state_diff(account, state); + self.overridden_accounts.insert(account); + } + Some(OverrideState::StateDiff(state_diff)) => { + let account = AccountTreeId::new(*account); + self.override_account_state_diff(account, state_diff); + } + None => { /* do nothing */ } + } + } + } + + fn store_factory_dep(&mut self, hash: H256, code: Vec) { + self.overridden_factory_deps.insert(hash, code); + } + + fn override_account_state_diff( + &mut self, + account: AccountTreeId, + state_diff: &HashMap, + ) { + let account_slots = state_diff + .iter() + .map(|(&slot, &value)| (StorageKey::new(account, slot), value)); + self.overridden_slots.extend(account_slots); + } +} + +impl ReadStorage for StorageWithOverrides { + fn read_value(&mut self, key: &StorageKey) -> StorageValue { + if let Some(value) = self.overridden_slots.get(key) { + return *value; + } + if self.overridden_accounts.contains(key.account()) { + return H256::zero(); + } + self.storage_handle.read_value(key) + } + + fn is_write_initial(&mut self, key: &StorageKey) -> bool { + self.storage_handle.is_write_initial(key) + } + + fn load_factory_dep(&mut self, hash: H256) -> Option> { + self.overridden_factory_deps + .get(&hash) + .cloned() + .or_else(|| self.storage_handle.load_factory_dep(hash)) + } + + fn get_enumeration_index(&mut self, key: &StorageKey) -> Option { + self.storage_handle.get_enumeration_index(key) + } +} + +#[cfg(test)] +mod tests { + use zksync_state::InMemoryStorage; + use zksync_types::{ + api::state_override::{Bytecode, OverrideAccount}, + Address, + }; + + use super::*; + + #[test] + fn override_basics() { + let overrides = StateOverride::new(HashMap::from([ + ( + Address::repeat_byte(1), + OverrideAccount { + balance: Some(1.into()), + ..OverrideAccount::default() + }, + ), + ( + Address::repeat_byte(2), + OverrideAccount { + nonce: Some(2.into()), + ..OverrideAccount::default() + }, + ), + ( + Address::repeat_byte(3), + OverrideAccount { + code: Some(Bytecode::new((0..32).collect()).unwrap()), + ..OverrideAccount::default() + }, + ), + ( + Address::repeat_byte(4), + OverrideAccount { + state: Some(OverrideState::StateDiff(HashMap::from([( + H256::zero(), + H256::repeat_byte(1), + )]))), + ..OverrideAccount::default() + }, + ), + ( + Address::repeat_byte(5), + OverrideAccount { + state: Some(OverrideState::State(HashMap::new())), + ..OverrideAccount::default() + }, + ), + ])); + + let mut storage = InMemoryStorage::default(); + let overridden_key = + StorageKey::new(AccountTreeId::new(Address::repeat_byte(4)), H256::zero()); + storage.set_value(overridden_key, H256::repeat_byte(0xff)); + let retained_key = StorageKey::new( + AccountTreeId::new(Address::repeat_byte(4)), + H256::from_low_u64_be(1), + ); + storage.set_value(retained_key, H256::repeat_byte(0xfe)); + let erased_key = StorageKey::new(AccountTreeId::new(Address::repeat_byte(5)), H256::zero()); + storage.set_value(erased_key, H256::repeat_byte(1)); + let mut storage = StorageWithOverrides::new(storage, &overrides); + + let balance = storage.read_value(&storage_key_for_eth_balance(&Address::repeat_byte(1))); + assert_eq!(balance, H256::from_low_u64_be(1)); + let nonce = storage.read_value(&get_nonce_key(&Address::repeat_byte(2))); + assert_eq!(nonce, H256::from_low_u64_be(2)); + let code_hash = storage.read_value(&get_code_key(&Address::repeat_byte(3))); + assert_ne!(code_hash, H256::zero()); + assert!(storage.load_factory_dep(code_hash).is_some()); + + let overridden_value = storage.read_value(&overridden_key); + assert_eq!(overridden_value, H256::repeat_byte(1)); + let retained_value = storage.read_value(&retained_key); + assert_eq!(retained_value, H256::repeat_byte(0xfe)); + let erased_value = storage.read_value(&erased_key); + assert_eq!(erased_value, H256::zero()); + } +} diff --git a/core/node/api_server/src/execution_sandbox/testonly.rs b/core/node/api_server/src/execution_sandbox/testonly.rs index f027acc6d625..673c30b9f17e 100644 --- a/core/node/api_server/src/execution_sandbox/testonly.rs +++ b/core/node/api_server/src/execution_sandbox/testonly.rs @@ -1,6 +1,6 @@ use std::fmt; -use multivm::interface::{ExecutionResult, VmExecutionResultAndLogs}; +use zksync_multivm::interface::{ExecutionResult, VmExecutionResultAndLogs}; use zksync_types::{ fee::TransactionExecutionMetrics, l2::L2Tx, ExecuteTransactionCommon, Transaction, }; diff --git a/core/node/api_server/src/execution_sandbox/tests.rs b/core/node/api_server/src/execution_sandbox/tests.rs index e479066cacc2..0a8af35597b3 100644 --- a/core/node/api_server/src/execution_sandbox/tests.rs +++ b/core/node/api_server/src/execution_sandbox/tests.rs @@ -195,6 +195,7 @@ async fn test_instantiating_vm(pool: ConnectionPool, block_args: BlockArgs &pool, transaction.clone(), block_args, + None, |_, received_tx, _| { assert_eq!(received_tx, transaction); }, diff --git a/core/node/api_server/src/execution_sandbox/tracers.rs b/core/node/api_server/src/execution_sandbox/tracers.rs index 2b969e380dd2..ba258ab7c74a 100644 --- a/core/node/api_server/src/execution_sandbox/tracers.rs +++ b/core/node/api_server/src/execution_sandbox/tracers.rs @@ -1,7 +1,9 @@ use std::sync::Arc; -use multivm::{tracers::CallTracer, vm_latest::HistoryMode, MultiVMTracer, MultiVmTracerPointer}; use once_cell::sync::OnceCell; +use zksync_multivm::{ + tracers::CallTracer, vm_latest::HistoryMode, MultiVMTracer, MultiVmTracerPointer, +}; use zksync_state::WriteStorage; use zksync_types::vm_trace::Call; @@ -14,7 +16,7 @@ pub(crate) enum ApiTracer { impl ApiTracer { pub fn into_boxed< S: WriteStorage, - H: HistoryMode + multivm::HistoryMode + 'static, + H: HistoryMode + zksync_multivm::HistoryMode + 'static, >( self, ) -> MultiVmTracerPointer { diff --git a/core/node/api_server/src/execution_sandbox/validate.rs b/core/node/api_server/src/execution_sandbox/validate.rs index 0356ac74c5c1..5e958cada66e 100644 --- a/core/node/api_server/src/execution_sandbox/validate.rs +++ b/core/node/api_server/src/execution_sandbox/validate.rs @@ -1,7 +1,8 @@ use std::collections::HashSet; use anyhow::Context as _; -use multivm::{ +use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; +use zksync_multivm::{ interface::{ExecutionResult, VmExecutionMode, VmInterface}, tracers::{ validator::{self, ValidationTracer, ValidationTracerParams}, @@ -10,7 +11,6 @@ use multivm::{ vm_latest::HistoryDisabled, MultiVMTracer, }; -use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; use zksync_types::{l2::L2Tx, Address, Transaction, TRUSTED_ADDRESS_SLOTS, TRUSTED_TOKEN_SLOTS}; use super::{ @@ -72,6 +72,7 @@ impl TransactionExecutor { &connection_pool, tx, block_args, + None, |vm, tx, protocol_version| { let stage_latency = SANDBOX_METRICS.sandbox[&SandboxStage::Validation].start(); let span = tracing::debug_span!("validation").entered(); diff --git a/core/node/api_server/src/execution_sandbox/vm_metrics.rs b/core/node/api_server/src/execution_sandbox/vm_metrics.rs index 33100169e392..e1e96d8eee5e 100644 --- a/core/node/api_server/src/execution_sandbox/vm_metrics.rs +++ b/core/node/api_server/src/execution_sandbox/vm_metrics.rs @@ -1,9 +1,9 @@ use std::time::Duration; -use multivm::interface::{VmExecutionResultAndLogs, VmMemoryMetrics}; use vise::{ Buckets, EncodeLabelSet, EncodeLabelValue, Family, Gauge, Histogram, LatencyObserver, Metrics, }; +use zksync_multivm::interface::{VmExecutionResultAndLogs, VmMemoryMetrics}; use zksync_shared_metrics::InteractionType; use zksync_state::StorageViewMetrics; use zksync_types::{ diff --git a/core/node/api_server/src/healthcheck.rs b/core/node/api_server/src/healthcheck.rs index 4e880b57a198..414c2dbc21e9 100644 --- a/core/node/api_server/src/healthcheck.rs +++ b/core/node/api_server/src/healthcheck.rs @@ -25,12 +25,14 @@ async fn run_server( "Starting healthcheck server with checks {app_health_check:?} on {bind_address}" ); + app_health_check.expose_metrics(); let app = Router::new() .route("/health", get(check_health)) .with_state(app_health_check); - - axum::Server::bind(bind_address) - .serve(app.into_make_service()) + let listener = tokio::net::TcpListener::bind(bind_address) + .await + .unwrap_or_else(|err| panic!("Failed binding healthcheck server to {bind_address}: {err}")); + axum::serve(listener, app) .with_graceful_shutdown(async move { if stop_receiver.changed().await.is_err() { tracing::warn!("Stop signal sender for healthcheck server was dropped without sending a signal"); diff --git a/core/node/api_server/src/tx_sender/mod.rs b/core/node/api_server/src/tx_sender/mod.rs index a6bbbf9ffa04..38939937fcda 100644 --- a/core/node/api_server/src/tx_sender/mod.rs +++ b/core/node/api_server/src/tx_sender/mod.rs @@ -3,7 +3,13 @@ use std::{sync::Arc, time::Instant}; use anyhow::Context as _; -use multivm::{ +use tokio::sync::RwLock; +use zksync_config::configs::{api::Web3JsonRpcConfig, chain::StateKeeperConfig}; +use zksync_contracts::BaseSystemContracts; +use zksync_dal::{ + transactions_dal::L2TxSubmissionResult, Connection, ConnectionPool, Core, CoreDal, +}; +use zksync_multivm::{ interface::VmExecutionResultAndLogs, utils::{ adjust_pubdata_price_for_tx, derive_base_fee_and_gas_per_pubdata, derive_overhead, @@ -11,12 +17,6 @@ use multivm::{ }, vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, }; -use tokio::sync::RwLock; -use zksync_config::configs::{api::Web3JsonRpcConfig, chain::StateKeeperConfig}; -use zksync_contracts::BaseSystemContracts; -use zksync_dal::{ - transactions_dal::L2TxSubmissionResult, Connection, ConnectionPool, Core, CoreDal, -}; use zksync_node_fee_model::{ApiFeeInputProvider, BatchFeeModelInputProvider}; use zksync_state::PostgresStorageCaches; use zksync_state_keeper::{ @@ -24,6 +24,7 @@ use zksync_state_keeper::{ SequencerSealer, }; use zksync_types::{ + api::state_override::StateOverride, fee::{Fee, TransactionExecutionMetrics}, fee_model::BatchFeeInput, get_code_key, get_intrinsic_constants, @@ -385,6 +386,7 @@ impl TxSender { self.0.replica_connection_pool.clone(), tx.clone().into(), block_args, + None, vec![], ) .await?; @@ -656,6 +658,7 @@ impl TxSender { block_args: BlockArgs, base_fee: u64, vm_version: VmVersion, + state_override: Option, ) -> anyhow::Result<(VmExecutionResultAndLogs, TransactionExecutionMetrics)> { let gas_limit_with_overhead = tx_gas_limit + derive_overhead( @@ -703,6 +706,7 @@ impl TxSender { self.0.replica_connection_pool.clone(), tx.clone(), block_args, + state_override, vec![], ) .await?; @@ -733,6 +737,7 @@ impl TxSender { mut tx: Transaction, estimated_fee_scale_factor: f64, acceptable_overestimation: u64, + state_override: Option, ) -> Result { let estimation_started_at = Instant::now(); @@ -786,17 +791,25 @@ impl TxSender { ) })?; - if !tx.is_l1() - && account_code_hash == H256::zero() - && tx.execute.value > self.get_balance(&tx.initiator_account()).await? - { - tracing::info!( - "fee estimation failed on validation step. - account: {} does not have enough funds for for transferring tx.value: {}.", - &tx.initiator_account(), - tx.execute.value - ); - return Err(SubmitTxError::InsufficientFundsForTransfer); + if !tx.is_l1() && account_code_hash == H256::zero() { + let balance = match state_override + .as_ref() + .and_then(|overrides| overrides.get(&tx.initiator_account())) + .and_then(|account| account.balance) + { + Some(balance) => balance, + None => self.get_balance(&tx.initiator_account()).await?, + }; + + if tx.execute.value > balance { + tracing::info!( + "fee estimation failed on validation step. + account: {} does not have enough funds for for transferring tx.value: {}.", + tx.initiator_account(), + tx.execute.value + ); + return Err(SubmitTxError::InsufficientFundsForTransfer); + } } // For L2 transactions we need a properly formatted signature @@ -836,6 +849,7 @@ impl TxSender { block_args, base_fee, protocol_version.into(), + state_override.clone(), ) .await .context("estimate_gas step failed")?; @@ -871,6 +885,7 @@ impl TxSender { block_args, base_fee, protocol_version.into(), + state_override.clone(), ) .await .context("estimate_gas step failed")?; @@ -903,6 +918,7 @@ impl TxSender { block_args, base_fee, protocol_version.into(), + state_override, ) .await .context("final estimate_gas step failed")?; @@ -973,6 +989,7 @@ impl TxSender { block_args: BlockArgs, call_overrides: CallOverrides, tx: L2Tx, + state_override: Option, ) -> Result, SubmitTxError> { let vm_permit = self.0.vm_concurrency_limiter.acquire().await; let vm_permit = vm_permit.ok_or(SubmitTxError::ServerShuttingDown)?; @@ -989,6 +1006,7 @@ impl TxSender { block_args, vm_execution_cache_misses_limit, vec![], + state_override, ) .await? .into_api_call_result() diff --git a/core/node/api_server/src/tx_sender/result.rs b/core/node/api_server/src/tx_sender/result.rs index a003b640525d..f4bda54efc65 100644 --- a/core/node/api_server/src/tx_sender/result.rs +++ b/core/node/api_server/src/tx_sender/result.rs @@ -1,5 +1,5 @@ -use multivm::interface::{ExecutionResult, VmExecutionResultAndLogs}; use thiserror::Error; +use zksync_multivm::interface::{ExecutionResult, VmExecutionResultAndLogs}; use zksync_types::{l2::error::TxCheckError, U256}; use zksync_web3_decl::error::EnrichedClientError; diff --git a/core/node/api_server/src/tx_sender/tests.rs b/core/node/api_server/src/tx_sender/tests.rs index 154e94280f36..06b6b7a1301b 100644 --- a/core/node/api_server/src/tx_sender/tests.rs +++ b/core/node/api_server/src/tx_sender/tests.rs @@ -1,7 +1,7 @@ //! Tests for the transaction sender. use assert_matches::assert_matches; -use multivm::interface::ExecutionResult; +use zksync_multivm::interface::ExecutionResult; use zksync_node_fee_model::MockBatchFeeParamsProvider; use zksync_node_genesis::{insert_genesis_batch, GenesisParams}; use zksync_node_test_utils::{create_l2_block, create_l2_transaction, prepare_recovery_snapshot}; diff --git a/core/node/api_server/src/web3/backend_jsonrpsee/metadata.rs b/core/node/api_server/src/web3/backend_jsonrpsee/metadata.rs index d5b8d90fdf98..b703033c1eee 100644 --- a/core/node/api_server/src/web3/backend_jsonrpsee/metadata.rs +++ b/core/node/api_server/src/web3/backend_jsonrpsee/metadata.rs @@ -4,10 +4,7 @@ use std::{cell::RefCell, mem, sync::Arc, time::Instant}; use thread_local::ThreadLocal; use zksync_types::api; -use zksync_web3_decl::{ - error::Web3Error, - jsonrpsee::{helpers::MethodResponseResult, MethodResponse}, -}; +use zksync_web3_decl::{error::Web3Error, jsonrpsee::MethodResponse}; #[cfg(test)] use super::testonly::RecordedMethodCalls; @@ -154,11 +151,11 @@ impl MethodCall<'_> { self.is_completed = true; let meta = &self.meta; let params = &self.params; - match response.success_or_error { - MethodResponseResult::Success => { - API_METRICS.observe_response_size(meta.name, params, response.result.len()); + match response.as_error_code() { + None => { + API_METRICS.observe_response_size(meta.name, params, response.as_result().len()); } - MethodResponseResult::Failed(error_code) => { + Some(error_code) => { API_METRICS.observe_protocol_error( meta.name, params, diff --git a/core/node/api_server/src/web3/backend_jsonrpsee/middleware.rs b/core/node/api_server/src/web3/backend_jsonrpsee/middleware.rs index 5c25b0ebc3ca..564adf01d82c 100644 --- a/core/node/api_server/src/web3/backend_jsonrpsee/middleware.rs +++ b/core/node/api_server/src/web3/backend_jsonrpsee/middleware.rs @@ -337,7 +337,7 @@ mod tests { use rand::{thread_rng, Rng}; use test_casing::{test_casing, Product}; use zksync_types::api; - use zksync_web3_decl::jsonrpsee::helpers::MethodResponseResult; + use zksync_web3_decl::jsonrpsee::{types::Id, ResponsePayload}; use super::*; @@ -366,11 +366,11 @@ mod tests { } } - MethodResponse { - result: "{}".to_string(), - success_or_error: MethodResponseResult::Success, - is_subscription: false, - } + MethodResponse::response( + Id::Number(1), + ResponsePayload::success("{}".to_string()), + usize::MAX, + ) }; WithMethodCall::new( @@ -394,7 +394,7 @@ mod tests { assert_eq!(call.metadata.name, "test"); assert!(call.metadata.block_id.is_some()); assert_eq!(call.metadata.block_diff, Some(9)); - assert!(call.response.is_success()); + assert!(call.error_code.is_none()); } } diff --git a/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/eth.rs b/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/eth.rs index c4a16b132421..ff8ce0356a05 100644 --- a/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/eth.rs +++ b/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/eth.rs @@ -1,7 +1,7 @@ use zksync_types::{ api::{ - Block, BlockId, BlockIdVariant, BlockNumber, Log, Transaction, TransactionId, - TransactionReceipt, TransactionVariant, + state_override::StateOverride, Block, BlockId, BlockIdVariant, BlockNumber, Log, + Transaction, TransactionId, TransactionReceipt, TransactionVariant, }, transaction_request::CallRequest, web3::{Bytes, FeeHistory, Index, SyncState}, @@ -27,14 +27,24 @@ impl EthNamespaceServer for EthNamespace { Ok(self.chain_id_impl()) } - async fn call(&self, req: CallRequest, block: Option) -> RpcResult { - self.call_impl(req, block.map(Into::into)) + async fn call( + &self, + req: CallRequest, + block: Option, + state_override: Option, + ) -> RpcResult { + self.call_impl(req, block.map(Into::into), state_override) .await .map_err(|err| self.current_method().map_err(err)) } - async fn estimate_gas(&self, req: CallRequest, block: Option) -> RpcResult { - self.estimate_gas_impl(req, block) + async fn estimate_gas( + &self, + req: CallRequest, + block: Option, + state_override: Option, + ) -> RpcResult { + self.estimate_gas_impl(req, block, state_override) .await .map_err(|err| self.current_method().map_err(err)) } diff --git a/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/mod.rs b/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/mod.rs index 3f0f043f8d4a..1d00e90b0e84 100644 --- a/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/mod.rs +++ b/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/mod.rs @@ -3,5 +3,6 @@ pub mod en; pub mod eth; pub mod net; pub mod snapshots; +pub mod unstable; pub mod web3; pub mod zks; diff --git a/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/unstable.rs b/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/unstable.rs new file mode 100644 index 000000000000..6abaa718a050 --- /dev/null +++ b/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/unstable.rs @@ -0,0 +1,19 @@ +use zksync_types::{api::TransactionExecutionInfo, H256}; +use zksync_web3_decl::{ + jsonrpsee::core::{async_trait, RpcResult}, + namespaces::UnstableNamespaceServer, +}; + +use crate::web3::namespaces::UnstableNamespace; + +#[async_trait] +impl UnstableNamespaceServer for UnstableNamespace { + async fn transaction_execution_info( + &self, + hash: H256, + ) -> RpcResult> { + self.transaction_execution_info_impl(hash) + .await + .map_err(|err| self.current_method().map_err(err)) + } +} diff --git a/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/zks.rs b/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/zks.rs index 45cb312dde6e..16bbde13509f 100644 --- a/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/zks.rs +++ b/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/zks.rs @@ -3,8 +3,9 @@ use std::collections::HashMap; use itertools::Itertools; use zksync_types::{ api::{ - ApiStorageLog, BlockDetails, BridgeAddresses, L1BatchDetails, L2ToL1LogProof, Log, Proof, - ProtocolVersion, TransactionDetailedResult, TransactionDetails, + state_override::StateOverride, ApiStorageLog, BlockDetails, BridgeAddresses, + L1BatchDetails, L2ToL1LogProof, Log, Proof, ProtocolVersion, TransactionDetailedResult, + TransactionDetails, }, fee::Fee, fee_model::{FeeParams, PubdataIndependentBatchFeeModelInput}, @@ -22,14 +23,22 @@ use crate::web3::ZksNamespace; #[async_trait] impl ZksNamespaceServer for ZksNamespace { - async fn estimate_fee(&self, req: CallRequest) -> RpcResult { - self.estimate_fee_impl(req) + async fn estimate_fee( + &self, + req: CallRequest, + state_override: Option, + ) -> RpcResult { + self.estimate_fee_impl(req, state_override) .await .map_err(|err| self.current_method().map_err(err)) } - async fn estimate_gas_l1_to_l2(&self, req: CallRequest) -> RpcResult { - self.estimate_l1_to_l2_gas_impl(req) + async fn estimate_gas_l1_to_l2( + &self, + req: CallRequest, + state_override: Option, + ) -> RpcResult { + self.estimate_l1_to_l2_gas_impl(req, state_override) .await .map_err(|err| self.current_method().map_err(err)) } diff --git a/core/node/api_server/src/web3/backend_jsonrpsee/testonly.rs b/core/node/api_server/src/web3/backend_jsonrpsee/testonly.rs index 98d6bf2440e4..79f5009eb978 100644 --- a/core/node/api_server/src/web3/backend_jsonrpsee/testonly.rs +++ b/core/node/api_server/src/web3/backend_jsonrpsee/testonly.rs @@ -2,14 +2,14 @@ use std::{mem, sync::Mutex}; -use zksync_web3_decl::jsonrpsee::{helpers::MethodResponseResult, MethodResponse}; +use zksync_web3_decl::jsonrpsee::MethodResponse; use super::metadata::MethodMetadata; #[derive(Debug, Clone)] pub(crate) struct RecordedCall { pub metadata: MethodMetadata, - pub response: MethodResponseResult, + pub error_code: Option, } /// Test-only JSON-RPC recorded of all calls passing through `MetadataMiddleware`. @@ -24,7 +24,7 @@ impl RecordedMethodCalls { .expect("recorded calls are poisoned") .push(RecordedCall { metadata: metadata.clone(), - response: response.success_or_error, + error_code: response.as_error_code(), }); } diff --git a/core/node/api_server/src/web3/metrics.rs b/core/node/api_server/src/web3/metrics.rs index af6e1bf63ad8..9d8cbf813b03 100644 --- a/core/node/api_server/src/web3/metrics.rs +++ b/core/node/api_server/src/web3/metrics.rs @@ -102,6 +102,7 @@ enum BlockIdLabel { Committed, Finalized, Latest, + L1Committed, Earliest, Pending, Number, @@ -139,6 +140,7 @@ impl From<&MethodMetadata> for MethodLabels { api::BlockId::Number(api::BlockNumber::Committed) => BlockIdLabel::Committed, api::BlockId::Number(api::BlockNumber::Finalized) => BlockIdLabel::Finalized, api::BlockId::Number(api::BlockNumber::Latest) => BlockIdLabel::Latest, + api::BlockId::Number(api::BlockNumber::L1Committed) => BlockIdLabel::L1Committed, api::BlockId::Number(api::BlockNumber::Earliest) => BlockIdLabel::Earliest, api::BlockId::Number(api::BlockNumber::Pending) => BlockIdLabel::Pending, }); diff --git a/core/node/api_server/src/web3/mod.rs b/core/node/api_server/src/web3/mod.rs index b86666ea6868..bad1b493a5fd 100644 --- a/core/node/api_server/src/web3/mod.rs +++ b/core/node/api_server/src/web3/mod.rs @@ -24,7 +24,8 @@ use zksync_web3_decl::{ }, namespaces::{ DebugNamespaceServer, EnNamespaceServer, EthNamespaceServer, EthPubSubServer, - NetNamespaceServer, SnapshotsNamespaceServer, Web3NamespaceServer, ZksNamespaceServer, + NetNamespaceServer, SnapshotsNamespaceServer, UnstableNamespaceServer, Web3NamespaceServer, + ZksNamespaceServer, }, types::Filter, }; @@ -37,8 +38,8 @@ use self::{ mempool_cache::MempoolCache, metrics::API_METRICS, namespaces::{ - DebugNamespace, EnNamespace, EthNamespace, NetNamespace, SnapshotsNamespace, Web3Namespace, - ZksNamespace, + DebugNamespace, EnNamespace, EthNamespace, NetNamespace, SnapshotsNamespace, + UnstableNamespace, Web3Namespace, ZksNamespace, }, pubsub::{EthSubscribe, EthSubscriptionIdProvider, PubSubEvent}, state::{Filters, InternalApiConfig, RpcState, SealedL2BlockNumber}, @@ -86,8 +87,9 @@ enum ApiTransport { Http(SocketAddr), } -#[derive(Debug, Deserialize, Clone, PartialEq)] +#[derive(Debug, Deserialize, Clone, PartialEq, strum::EnumString)] #[serde(rename_all = "lowercase")] +#[strum(serialize_all = "lowercase")] pub enum Namespace { Eth, Net, @@ -97,6 +99,7 @@ pub enum Namespace { En, Pubsub, Snapshots, + Unstable, } impl Namespace { @@ -406,9 +409,13 @@ impl ApiServer { .context("cannot merge en namespace")?; } if namespaces.contains(&Namespace::Snapshots) { - rpc.merge(SnapshotsNamespace::new(rpc_state).into_rpc()) + rpc.merge(SnapshotsNamespace::new(rpc_state.clone()).into_rpc()) .context("cannot merge snapshots namespace")?; } + if namespaces.contains(&Namespace::Unstable) { + rpc.merge(UnstableNamespace::new(rpc_state).into_rpc()) + .context("cannot merge unstable namespace")?; + } Ok(rpc) } @@ -539,8 +546,8 @@ impl ApiServer { "Overriding max response size to {limit}B for sync method `{method_name}`" ); let sync_method = sync_method.clone(); - MethodCallback::Sync(Arc::new(move |id, params, _max_response_size| { - sync_method(id, params, limit) + MethodCallback::Sync(Arc::new(move |id, params, _max_response_size, ext| { + sync_method(id, params, limit, ext) })) } (MethodCallback::Async(async_method), Some(limit)) => { @@ -549,8 +556,8 @@ impl ApiServer { ); let async_method = async_method.clone(); MethodCallback::Async(Arc::new( - move |id, params, connection_id, _max_response_size| { - async_method(id, params, connection_id, limit) + move |id, params, connection_id, _max_response_size, ext| { + async_method(id, params, connection_id, limit, ext) }, )) } @@ -560,8 +567,8 @@ impl ApiServer { ); let unsub_method = unsub_method.clone(); MethodCallback::Unsubscription(Arc::new( - move |id, params, connection_id, _max_response_size| { - unsub_method(id, params, connection_id, limit) + move |id, params, connection_id, _max_response_size, ext| { + unsub_method(id, params, connection_id, limit, ext) }, )) } diff --git a/core/node/api_server/src/web3/namespaces/debug.rs b/core/node/api_server/src/web3/namespaces/debug.rs index 35bc2e22bc3d..2f2d1d44cba1 100644 --- a/core/node/api_server/src/web3/namespaces/debug.rs +++ b/core/node/api_server/src/web3/namespaces/debug.rs @@ -1,9 +1,11 @@ use std::sync::Arc; use anyhow::Context as _; -use multivm::{interface::ExecutionResult, vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT}; use once_cell::sync::OnceCell; use zksync_dal::{CoreDal, DalError}; +use zksync_multivm::{ + interface::ExecutionResult, vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, +}; use zksync_system_constants::MAX_ENCODED_TX_SIZE; use zksync_types::{ api::{BlockId, BlockNumber, DebugCall, ResultDebugCall, TracerConfig}, @@ -195,6 +197,7 @@ impl DebugNamespace { block_args, self.sender_config().vm_execution_cache_misses_limit, custom_tracers, + None, ) .await?; diff --git a/core/node/api_server/src/web3/namespaces/eth.rs b/core/node/api_server/src/web3/namespaces/eth.rs index 397ce77c050f..68030763fd60 100644 --- a/core/node/api_server/src/web3/namespaces/eth.rs +++ b/core/node/api_server/src/web3/namespaces/eth.rs @@ -3,8 +3,8 @@ use zksync_dal::{CoreDal, DalError}; use zksync_system_constants::DEFAULT_L2_TX_GAS_PER_PUBDATA_BYTE; use zksync_types::{ api::{ - BlockId, BlockNumber, GetLogsFilter, Transaction, TransactionId, TransactionReceipt, - TransactionVariant, + state_override::StateOverride, BlockId, BlockNumber, GetLogsFilter, Transaction, + TransactionId, TransactionReceipt, TransactionVariant, }, l2::{L2Tx, TransactionType}, transaction_request::CallRequest, @@ -55,6 +55,7 @@ impl EthNamespace { &self, mut request: CallRequest, block_id: Option, + state_override: Option, ) -> Result { let block_id = block_id.unwrap_or(BlockId::Number(BlockNumber::Pending)); self.current_method().set_block_id(block_id); @@ -88,7 +89,7 @@ impl EthNamespace { let call_result: Vec = self .state .tx_sender - .eth_call(block_args, call_overrides, tx) + .eth_call(block_args, call_overrides, tx, state_override) .await?; Ok(call_result.into()) } @@ -97,6 +98,7 @@ impl EthNamespace { &self, request: CallRequest, _block: Option, + state_override: Option, ) -> Result { let mut request_with_gas_per_pubdata_overridden = request; self.state @@ -138,7 +140,12 @@ impl EthNamespace { let fee = self .state .tx_sender - .get_txs_fee_in_wei(tx.into(), scale_factor, acceptable_overestimation as u64) + .get_txs_fee_in_wei( + tx.into(), + scale_factor, + acceptable_overestimation as u64, + state_override, + ) .await?; Ok(fee.gas_limit) } @@ -407,7 +414,7 @@ impl EthNamespace { self.set_block_diff(block_number); let value = connection .storage_web3_dal() - .get_historical_value_unchecked(&storage_key, block_number) + .get_historical_value_unchecked(storage_key.hashed_key(), block_number) .await .map_err(DalError::generalize)?; Ok(value) @@ -688,6 +695,10 @@ impl EthNamespace { base_fee_per_gas.len() ]); + // We do not support EIP-4844, but per API specification we should return 0 for pre EIP-4844 blocks. + let base_fee_per_blob_gas = vec![U256::zero(); base_fee_per_gas.len()]; + let blob_gas_used_ratio = vec![0.0; base_fee_per_gas.len()]; + // `base_fee_per_gas` for next L2 block cannot be calculated, appending last fee as a placeholder. base_fee_per_gas.push(*base_fee_per_gas.last().unwrap()); Ok(FeeHistory { @@ -695,6 +706,8 @@ impl EthNamespace { base_fee_per_gas, gas_used_ratio, reward, + base_fee_per_blob_gas, + blob_gas_used_ratio, }) } diff --git a/core/node/api_server/src/web3/namespaces/mod.rs b/core/node/api_server/src/web3/namespaces/mod.rs index b9355f7181ff..bf35cac0409e 100644 --- a/core/node/api_server/src/web3/namespaces/mod.rs +++ b/core/node/api_server/src/web3/namespaces/mod.rs @@ -6,10 +6,12 @@ mod en; pub(crate) mod eth; mod net; mod snapshots; +mod unstable; mod web3; mod zks; pub(super) use self::{ debug::DebugNamespace, en::EnNamespace, eth::EthNamespace, net::NetNamespace, - snapshots::SnapshotsNamespace, web3::Web3Namespace, zks::ZksNamespace, + snapshots::SnapshotsNamespace, unstable::UnstableNamespace, web3::Web3Namespace, + zks::ZksNamespace, }; diff --git a/core/node/api_server/src/web3/namespaces/unstable.rs b/core/node/api_server/src/web3/namespaces/unstable.rs new file mode 100644 index 000000000000..b46ecd6dc530 --- /dev/null +++ b/core/node/api_server/src/web3/namespaces/unstable.rs @@ -0,0 +1,33 @@ +use zksync_dal::{CoreDal, DalError}; +use zksync_types::api::TransactionExecutionInfo; +use zksync_web3_decl::{error::Web3Error, types::H256}; + +use crate::web3::{backend_jsonrpsee::MethodTracer, RpcState}; + +#[derive(Debug)] +pub(crate) struct UnstableNamespace { + state: RpcState, +} + +impl UnstableNamespace { + pub fn new(state: RpcState) -> Self { + Self { state } + } + + pub(crate) fn current_method(&self) -> &MethodTracer { + &self.state.current_method + } + + pub async fn transaction_execution_info_impl( + &self, + hash: H256, + ) -> Result, Web3Error> { + let mut storage = self.state.acquire_connection().await?; + Ok(storage + .transactions_web3_dal() + .get_unstable_transaction_execution_info(hash) + .await + .map_err(DalError::generalize)? + .map(|execution_info| TransactionExecutionInfo { execution_info })) + } +} diff --git a/core/node/api_server/src/web3/namespaces/zks.rs b/core/node/api_server/src/web3/namespaces/zks.rs index 6b872bcf637e..4f88eb17e231 100644 --- a/core/node/api_server/src/web3/namespaces/zks.rs +++ b/core/node/api_server/src/web3/namespaces/zks.rs @@ -1,15 +1,15 @@ use std::{collections::HashMap, convert::TryInto}; use anyhow::Context as _; -use multivm::interface::VmExecutionResultAndLogs; use zksync_dal::{Connection, Core, CoreDal, DalError}; use zksync_metadata_calculator::api_server::TreeApiError; use zksync_mini_merkle_tree::MiniMerkleTree; +use zksync_multivm::interface::VmExecutionResultAndLogs; use zksync_system_constants::DEFAULT_L2_TX_GAS_PER_PUBDATA_BYTE; use zksync_types::{ api::{ - BlockDetails, BridgeAddresses, GetLogsFilter, L1BatchDetails, L2ToL1LogProof, Proof, - ProtocolVersion, StorageProof, TransactionDetails, + state_override::StateOverride, BlockDetails, BridgeAddresses, GetLogsFilter, + L1BatchDetails, L2ToL1LogProof, Proof, ProtocolVersion, StorageProof, TransactionDetails, }, fee::Fee, fee_model::{FeeParams, PubdataIndependentBatchFeeModelInput}, @@ -48,7 +48,11 @@ impl ZksNamespace { &self.state.current_method } - pub async fn estimate_fee_impl(&self, request: CallRequest) -> Result { + pub async fn estimate_fee_impl( + &self, + request: CallRequest, + state_override: Option, + ) -> Result { let mut request_with_gas_per_pubdata_overridden = request; self.state .set_nonce_for_call_request(&mut request_with_gas_per_pubdata_overridden) @@ -67,12 +71,13 @@ impl ZksNamespace { // not consider provided ones. tx.common_data.fee.max_priority_fee_per_gas = 0u64.into(); tx.common_data.fee.gas_per_pubdata_limit = U256::from(DEFAULT_L2_TX_GAS_PER_PUBDATA_BYTE); - self.estimate_fee(tx.into()).await + self.estimate_fee(tx.into(), state_override).await } pub async fn estimate_l1_to_l2_gas_impl( &self, request: CallRequest, + state_override: Option, ) -> Result { let mut request_with_gas_per_pubdata_overridden = request; // When we're estimating fee, we are trying to deduce values related to fee, so we should @@ -87,11 +92,15 @@ impl ZksNamespace { .try_into() .map_err(Web3Error::SerializationError)?; - let fee = self.estimate_fee(tx.into()).await?; + let fee = self.estimate_fee(tx.into(), state_override).await?; Ok(fee.gas_limit) } - async fn estimate_fee(&self, tx: Transaction) -> Result { + async fn estimate_fee( + &self, + tx: Transaction, + state_override: Option, + ) -> Result { let scale_factor = self.state.api_config.estimate_gas_scale_factor; let acceptable_overestimation = self.state.api_config.estimate_gas_acceptable_overestimation; @@ -99,7 +108,12 @@ impl ZksNamespace { Ok(self .state .tx_sender - .get_txs_fee_in_wei(tx, scale_factor, acceptable_overestimation as u64) + .get_txs_fee_in_wei( + tx, + scale_factor, + acceptable_overestimation as u64, + state_override, + ) .await?) } diff --git a/core/node/api_server/src/web3/state.rs b/core/node/api_server/src/web3/state.rs index 276e0b6755e7..b0db480b2fa9 100644 --- a/core/node/api_server/src/web3/state.rs +++ b/core/node/api_server/src/web3/state.rs @@ -132,8 +132,16 @@ impl InternalApiConfig { l2_erc20_default_bridge: contracts_config.l2_erc20_bridge_addr, l1_shared_default_bridge: contracts_config.l1_shared_bridge_proxy_addr, l2_shared_default_bridge: contracts_config.l2_shared_bridge_addr, - l1_weth_bridge: contracts_config.l1_weth_bridge_proxy_addr, - l2_weth_bridge: contracts_config.l2_weth_bridge_addr, + l1_weth_bridge: Some( + contracts_config + .l1_weth_bridge_proxy_addr + .unwrap_or_default(), + ), + l2_weth_bridge: Some( + contracts_config + .l1_weth_bridge_proxy_addr + .unwrap_or_default(), + ), }, bridgehub_proxy_addr: contracts_config .ecosystem_contracts diff --git a/core/node/api_server/src/web3/tests/mod.rs b/core/node/api_server/src/web3/tests/mod.rs index b2331a547707..d136971734aa 100644 --- a/core/node/api_server/src/web3/tests/mod.rs +++ b/core/node/api_server/src/web3/tests/mod.rs @@ -7,7 +7,6 @@ use std::{ use assert_matches::assert_matches; use async_trait::async_trait; -use multivm::zk_evm_latest::ethereum_types::U256; use tokio::sync::watch; use zksync_config::{ configs::{ @@ -18,6 +17,7 @@ use zksync_config::{ GenesisConfig, }; use zksync_dal::{transactions_dal::L2TxSubmissionResult, Connection, ConnectionPool, CoreDal}; +use zksync_multivm::zk_evm_latest::ethereum_types::U256; use zksync_node_genesis::{insert_genesis_batch, mock_genesis_config, GenesisParams}; use zksync_node_test_utils::{ create_l1_batch, create_l1_batch_metadata, create_l2_block, create_l2_transaction, @@ -73,13 +73,13 @@ const POLL_INTERVAL: Duration = Duration::from_millis(50); async fn setting_response_size_limits() { let mut rpc_module = RpcModule::new(()); rpc_module - .register_method("test_limited", |params, _ctx| { + .register_method("test_limited", |params, _ctx, _ext| { let response_size: usize = params.one()?; Ok::<_, ErrorObjectOwned>("!".repeat(response_size)) }) .unwrap(); rpc_module - .register_method("test_unlimited", |params, _ctx| { + .register_method("test_unlimited", |params, _ctx, _ext| { let response_size: usize = params.one()?; Ok::<_, ErrorObjectOwned>("!".repeat(response_size)) }) @@ -954,7 +954,7 @@ impl HttpTest for RpcCallsTracingTest { let calls = self.tracer.recorded_calls().take(); assert_eq!(calls.len(), 1); - assert!(calls[0].response.is_success()); + assert!(calls[0].error_code.is_none()); assert_eq!(calls[0].metadata.name, "eth_blockNumber"); assert_eq!(calls[0].metadata.block_id, None); assert_eq!(calls[0].metadata.block_diff, None); @@ -965,7 +965,7 @@ impl HttpTest for RpcCallsTracingTest { let calls = self.tracer.recorded_calls().take(); assert_eq!(calls.len(), 1); - assert!(calls[0].response.is_success()); + assert!(calls[0].error_code.is_none()); assert_eq!(calls[0].metadata.name, "eth_getBlockByNumber"); assert_eq!( calls[0].metadata.block_id, @@ -978,7 +978,7 @@ impl HttpTest for RpcCallsTracingTest { let calls = self.tracer.recorded_calls().take(); assert_eq!(calls.len(), 1); - assert!(calls[0].response.is_success()); + assert!(calls[0].error_code.is_none()); assert_eq!(calls[0].metadata.name, "eth_getBlockByNumber"); assert_eq!( calls[0].metadata.block_id, @@ -993,10 +993,7 @@ impl HttpTest for RpcCallsTracingTest { let calls = self.tracer.recorded_calls().take(); assert_eq!(calls.len(), 1); - assert_eq!( - calls[0].response.as_error_code(), - Some(ErrorCode::MethodNotFound.code()) - ); + assert_eq!(calls[0].error_code, Some(ErrorCode::MethodNotFound.code())); assert!(!calls[0].metadata.has_app_error); ClientT::request::(&client, "eth_getBlockByNumber", rpc_params![0]) @@ -1005,10 +1002,7 @@ impl HttpTest for RpcCallsTracingTest { let calls = self.tracer.recorded_calls().take(); assert_eq!(calls.len(), 1); - assert_eq!( - calls[0].response.as_error_code(), - Some(ErrorCode::InvalidParams.code()) - ); + assert_eq!(calls[0].error_code, Some(ErrorCode::InvalidParams.code())); assert!(!calls[0].metadata.has_app_error); // Check app-level error. @@ -1022,10 +1016,7 @@ impl HttpTest for RpcCallsTracingTest { let calls = self.tracer.recorded_calls().take(); assert_eq!(calls.len(), 1); - assert_eq!( - calls[0].response.as_error_code(), - Some(ErrorCode::InvalidParams.code()) - ); + assert_eq!(calls[0].error_code, Some(ErrorCode::InvalidParams.code())); assert!(calls[0].metadata.has_app_error); // Check batch RPC request. diff --git a/core/node/api_server/src/web3/tests/vm.rs b/core/node/api_server/src/web3/tests/vm.rs index cb59f2f88e25..50de027174f3 100644 --- a/core/node/api_server/src/web3/tests/vm.rs +++ b/core/node/api_server/src/web3/tests/vm.rs @@ -2,8 +2,9 @@ use std::sync::atomic::{AtomicU32, Ordering}; +use api::state_override::{OverrideAccount, StateOverride}; use itertools::Itertools; -use multivm::{ +use zksync_multivm::{ interface::{ExecutionResult, VmRevertReason}, vm_latest::{VmExecutionLogs, VmExecutionResultAndLogs}, }; @@ -63,7 +64,9 @@ impl HttpTest for CallTest { client: &DynClient, _pool: &ConnectionPool, ) -> anyhow::Result<()> { - let call_result = client.call(Self::call_request(b"pending"), None).await?; + let call_result = client + .call(Self::call_request(b"pending"), None, None) + .await?; assert_eq!(call_result.0, b"output"); let valid_block_numbers_and_calldata = [ @@ -74,7 +77,7 @@ impl HttpTest for CallTest { for (number, calldata) in valid_block_numbers_and_calldata { let number = api::BlockIdVariant::BlockNumber(number); let call_result = client - .call(Self::call_request(calldata), Some(number)) + .call(Self::call_request(calldata), Some(number), None) .await?; assert_eq!(call_result.0, b"output"); } @@ -82,7 +85,7 @@ impl HttpTest for CallTest { let invalid_block_number = api::BlockNumber::from(100); let number = api::BlockIdVariant::BlockNumber(invalid_block_number); let error = client - .call(Self::call_request(b"100"), Some(number)) + .call(Self::call_request(b"100"), Some(number), None) .await .unwrap_err(); if let ClientError::Call(error) = error { @@ -120,7 +123,7 @@ impl HttpTest for CallTestAfterSnapshotRecovery { _pool: &ConnectionPool, ) -> anyhow::Result<()> { let call_result = client - .call(CallTest::call_request(b"pending"), None) + .call(CallTest::call_request(b"pending"), None, None) .await?; assert_eq!(call_result.0, b"output"); let pending_block_number = api::BlockIdVariant::BlockNumber(api::BlockNumber::Pending); @@ -128,6 +131,7 @@ impl HttpTest for CallTestAfterSnapshotRecovery { .call( CallTest::call_request(b"pending"), Some(pending_block_number), + None, ) .await?; assert_eq!(call_result.0, b"output"); @@ -137,7 +141,7 @@ impl HttpTest for CallTestAfterSnapshotRecovery { for number in pruned_block_numbers { let number = api::BlockIdVariant::BlockNumber(number.into()); let error = client - .call(CallTest::call_request(b"pruned"), Some(number)) + .call(CallTest::call_request(b"pruned"), Some(number), None) .await .unwrap_err(); assert_pruned_block_error(&error, first_local_l2_block); @@ -147,7 +151,7 @@ impl HttpTest for CallTestAfterSnapshotRecovery { for number in first_l2_block_numbers { let number = api::BlockIdVariant::BlockNumber(number); let call_result = client - .call(CallTest::call_request(b"first"), Some(number)) + .call(CallTest::call_request(b"first"), Some(number), None) .await?; assert_eq!(call_result.0, b"output"); } @@ -499,7 +503,7 @@ impl HttpTest for TraceCallTestAfterSnapshotRecovery { for number in pruned_block_numbers { let number = api::BlockIdVariant::BlockNumber(number.into()); let error = client - .call(CallTest::call_request(b"pruned"), Some(number)) + .call(CallTest::call_request(b"pruned"), Some(number), None) .await .unwrap_err(); assert_pruned_block_error(&error, first_local_l2_block); @@ -579,7 +583,7 @@ impl HttpTest for EstimateGasTest { for threshold in [10_000, 50_000, 100_000, 1_000_000] { self.gas_limit_threshold.store(threshold, Ordering::Relaxed); let output = client - .estimate_gas(l2_transaction.clone().into(), None) + .estimate_gas(l2_transaction.clone().into(), None, None) .await?; assert!( output >= U256::from(threshold), @@ -604,10 +608,15 @@ impl HttpTest for EstimateGasTest { let mut call_request = CallRequest::from(l2_transaction); call_request.from = Some(SendRawTransactionTest::private_key().address()); call_request.value = Some(1_000_000.into()); - client.estimate_gas(call_request.clone(), None).await?; + client + .estimate_gas(call_request.clone(), None, None) + .await?; call_request.value = Some(U256::max_value()); - let error = client.estimate_gas(call_request, None).await.unwrap_err(); + let error = client + .estimate_gas(call_request, None, None) + .await + .unwrap_err(); if let ClientError::Call(error) = error { let error_msg = error.message(); assert!( @@ -630,3 +639,106 @@ async fn estimate_gas_basics() { async fn estimate_gas_after_snapshot_recovery() { test_http_server(EstimateGasTest::new(true)).await; } + +#[derive(Debug)] +struct EstimateGasWithStateOverrideTest { + gas_limit_threshold: Arc, + snapshot_recovery: bool, +} + +impl EstimateGasWithStateOverrideTest { + fn new(snapshot_recovery: bool) -> Self { + Self { + gas_limit_threshold: Arc::default(), + snapshot_recovery, + } + } +} + +#[async_trait] +impl HttpTest for EstimateGasWithStateOverrideTest { + fn storage_initialization(&self) -> StorageInitialization { + let snapshot_recovery = self.snapshot_recovery; + SendRawTransactionTest { snapshot_recovery }.storage_initialization() + } + + fn transaction_executor(&self) -> MockTransactionExecutor { + let mut tx_executor = MockTransactionExecutor::default(); + let pending_block_number = if self.snapshot_recovery { + StorageInitialization::SNAPSHOT_RECOVERY_BLOCK + 2 + } else { + L2BlockNumber(1) + }; + let gas_limit_threshold = self.gas_limit_threshold.clone(); + tx_executor.set_call_responses(move |tx, block_args| { + assert_eq!(tx.execute.calldata(), [] as [u8; 0]); + assert_eq!(tx.nonce(), Some(Nonce(0))); + assert_eq!(block_args.resolved_block_number(), pending_block_number); + + let gas_limit_threshold = gas_limit_threshold.load(Ordering::SeqCst); + if tx.gas_limit() >= U256::from(gas_limit_threshold) { + ExecutionResult::Success { output: vec![] } + } else { + ExecutionResult::Revert { + output: VmRevertReason::VmError, + } + } + }); + tx_executor + } + + async fn test( + &self, + client: &DynClient, + _pool: &ConnectionPool, + ) -> anyhow::Result<()> { + // Transaction with balance override + let l2_transaction = create_l2_transaction(10, 100); + let mut call_request = CallRequest::from(l2_transaction); + let request_initiator = Address::random(); + call_request.from = Some(request_initiator); + call_request.value = Some(1_000_000.into()); + + let state_override = HashMap::from([( + request_initiator, + OverrideAccount { + balance: Some(U256::max_value()), + ..OverrideAccount::default() + }, + )]); + let state_override = StateOverride::new(state_override); + + client + .estimate_gas(call_request.clone(), None, Some(state_override)) + .await?; + + // Transaction that should fail without balance override + let l2_transaction = create_l2_transaction(10, 100); + let mut call_request = CallRequest::from(l2_transaction); + call_request.from = Some(Address::random()); + call_request.value = Some(1_000_000.into()); + + let error = client + .estimate_gas(call_request.clone(), None, None) + .await + .unwrap_err(); + + if let ClientError::Call(error) = error { + let error_msg = error.message(); + assert!( + error_msg + .to_lowercase() + .contains("insufficient balance for transfer"), + "{error_msg}" + ); + } else { + panic!("Unexpected error: {error:?}"); + } + Ok(()) + } +} + +#[tokio::test] +async fn estimate_gas_with_state_override() { + test_http_server(EstimateGasWithStateOverrideTest::new(false)).await; +} diff --git a/core/node/base_token_adjuster/Cargo.toml b/core/node/base_token_adjuster/Cargo.toml new file mode 100644 index 000000000000..812cacaa1f73 --- /dev/null +++ b/core/node/base_token_adjuster/Cargo.toml @@ -0,0 +1,25 @@ +[package] +name = "zksync_base_token_adjuster" +description = "ZKsync base token adjuster" +version.workspace = true +edition.workspace = true +authors.workspace = true +homepage.workspace = true +repository.workspace = true +license.workspace = true +keywords.workspace = true +categories.workspace = true + + +[dependencies] +zksync_dal.workspace = true +zksync_config.workspace = true +zksync_types.workspace = true +zksync_external_price_api.workspace = true + +tokio = { workspace = true, features = ["time"] } +anyhow.workspace = true +tracing.workspace = true +chrono.workspace = true +rand.workspace = true +async-trait.workspace = true diff --git a/core/node/base_token_adjuster/README.md b/core/node/base_token_adjuster/README.md new file mode 100644 index 000000000000..c5b6dec2b17c --- /dev/null +++ b/core/node/base_token_adjuster/README.md @@ -0,0 +1,20 @@ +# Base Token Adjuster + +This crate contains all the logic to handle ZK Chain with custom base tokens. + +## Overview + +### The Base Token Ratio Persister + +Contains the building blockss for the `BaseTokenRatioPersisterLayer`. + +- Connects with external APIs to get the current price of the base token and of ETH. +- Persists the ETH<->BaseToken ratio in the database. +- Upon certain configured threshold, update the L1 ETH<->BaseToken conversion ratio. + +### The Base Token Ratio Provider + +Contains the building blocks for the `BaseTokenRatioProviderLayer`. + +- Periodically fetches from the DB and caches the latest ETH<->BaseToken conversion ratio. +- Exposes this ratio upon request. diff --git a/core/node/base_token_adjuster/src/base_token_ratio_persister.rs b/core/node/base_token_adjuster/src/base_token_ratio_persister.rs new file mode 100644 index 000000000000..8c94b19e0179 --- /dev/null +++ b/core/node/base_token_adjuster/src/base_token_ratio_persister.rs @@ -0,0 +1,115 @@ +use std::{fmt::Debug, sync::Arc, time::Duration}; + +use anyhow::Context as _; +use tokio::{sync::watch, time::sleep}; +use zksync_config::configs::base_token_adjuster::BaseTokenAdjusterConfig; +use zksync_dal::{ConnectionPool, Core, CoreDal}; +use zksync_external_price_api::PriceAPIClient; +use zksync_types::{base_token_ratio::BaseTokenAPIRatio, Address}; + +#[derive(Debug, Clone)] +pub struct BaseTokenRatioPersister { + pool: ConnectionPool, + config: BaseTokenAdjusterConfig, + base_token_address: Address, + price_api_client: Arc, +} + +impl BaseTokenRatioPersister { + pub fn new( + pool: ConnectionPool, + config: BaseTokenAdjusterConfig, + base_token_address: Address, + price_api_client: Arc, + ) -> Self { + Self { + pool, + config, + base_token_address, + price_api_client, + } + } + + /// Main loop for the base token ratio persister. + /// Orchestrates fetching a new ratio, persisting it, and conditionally updating the L1 with it. + pub async fn run(&mut self, mut stop_receiver: watch::Receiver) -> anyhow::Result<()> { + let mut timer = tokio::time::interval(self.config.price_polling_interval()); + + while !*stop_receiver.borrow_and_update() { + tokio::select! { + _ = timer.tick() => { /* continue iterations */ } + _ = stop_receiver.changed() => break, + } + + if let Err(err) = self.loop_iteration().await { + return Err(err) + .context("Failed to execute a base_token_ratio_persister loop iteration"); + } + } + + tracing::info!("Stop signal received, base_token_ratio_persister is shutting down"); + Ok(()) + } + + async fn loop_iteration(&self) -> anyhow::Result<()> { + // TODO(PE-148): Consider shifting retry upon adding external API redundancy. + let new_ratio = self.retry_fetch_ratio().await?; + + self.persist_ratio(new_ratio).await?; + // TODO(PE-128): Update L1 ratio + + Ok(()) + } + + async fn retry_fetch_ratio(&self) -> anyhow::Result { + let sleep_duration = Duration::from_secs(1); + let max_retries = 5; + let mut attempts = 0; + + loop { + match self + .price_api_client + .fetch_ratio(self.base_token_address) + .await + { + Ok(ratio) => { + return Ok(ratio); + } + Err(err) if attempts < max_retries => { + attempts += 1; + tracing::warn!( + "Attempt {}/{} to fetch ratio from coingecko failed with err: {}. Retrying...", + attempts, + max_retries, + err + ); + sleep(sleep_duration).await; + } + Err(err) => { + return Err(err) + .context("Failed to fetch base token ratio after multiple attempts"); + } + } + } + } + + async fn persist_ratio(&self, api_ratio: BaseTokenAPIRatio) -> anyhow::Result { + let mut conn = self + .pool + .connection_tagged("base_token_ratio_persister") + .await + .context("Failed to obtain connection to the database")?; + + let id = conn + .base_token_dal() + .insert_token_ratio( + api_ratio.numerator, + api_ratio.denominator, + &api_ratio.ratio_timestamp.naive_utc(), + ) + .await + .context("Failed to insert base token ratio into the database")?; + + Ok(id) + } +} diff --git a/core/node/base_token_adjuster/src/base_token_ratio_provider.rs b/core/node/base_token_adjuster/src/base_token_ratio_provider.rs new file mode 100644 index 000000000000..a89c2d909a15 --- /dev/null +++ b/core/node/base_token_adjuster/src/base_token_ratio_provider.rs @@ -0,0 +1,134 @@ +use std::{ + fmt::Debug, + num::NonZeroU64, + sync::{Arc, RwLock}, +}; + +use anyhow::Context; +use async_trait::async_trait; +use tokio::sync::watch; +use zksync_config::BaseTokenAdjusterConfig; +use zksync_dal::{ConnectionPool, Core, CoreDal}; +use zksync_types::fee_model::BaseTokenConversionRatio; + +#[async_trait] +pub trait BaseTokenRatioProvider: Debug + Send + Sync + 'static { + fn get_conversion_ratio(&self) -> BaseTokenConversionRatio; +} + +#[derive(Debug, Clone)] +pub struct DBBaseTokenRatioProvider { + pub pool: ConnectionPool, + pub latest_ratio: Arc>, + config: BaseTokenAdjusterConfig, +} + +impl DBBaseTokenRatioProvider { + pub async fn new( + pool: ConnectionPool, + config: BaseTokenAdjusterConfig, + ) -> anyhow::Result { + let fetcher = Self { + pool, + latest_ratio: Arc::default(), + config, + }; + fetcher.update_latest_price().await?; + + // TODO(PE-129): Implement latest ratio usability logic. + + tracing::debug!( + "Starting the base token ratio provider with conversion ratio: {:?}", + fetcher.latest_ratio + ); + Ok(fetcher) + } + + fn get_latest_ratio(&self) -> BaseTokenConversionRatio { + *self.latest_ratio.read().unwrap() + } + + pub async fn run(&self, mut stop_receiver: watch::Receiver) -> anyhow::Result<()> { + let mut timer = tokio::time::interval(self.config.price_cache_update_interval()); + + while !*stop_receiver.borrow_and_update() { + tokio::select! { + _ = timer.tick() => { /* continue iterations */ } + _ = stop_receiver.changed() => break, + } + + // TODO(PE-129): Implement latest ratio usability logic. + self.update_latest_price().await?; + } + + tracing::info!("Stop signal received, base_token_ratio_provider is shutting down"); + Ok(()) + } + + async fn update_latest_price(&self) -> anyhow::Result<()> { + let latest_storage_ratio = self + .pool + .connection_tagged("db_base_token_ratio_provider") + .await + .context("Failed to obtain connection to the database")? + .base_token_dal() + .get_latest_ratio() + .await; + + let ratio = match latest_storage_ratio { + Ok(Some(latest_storage_price)) => BaseTokenConversionRatio { + numerator: latest_storage_price.numerator, + denominator: latest_storage_price.denominator, + }, + Ok(None) => { + // TODO(PE-136): Insert initial ratio from genesis. + // Though the DB should be populated very soon after the server starts, it is possible + // to have no ratios in the DB right after genesis. Having initial ratios in the DB + // from the genesis stage will eliminate this possibility. + tracing::error!("No latest price found in the database. Using default ratio."); + BaseTokenConversionRatio::default() + } + Err(err) => anyhow::bail!("Failed to get latest base token ratio: {:?}", err), + }; + + *self.latest_ratio.write().unwrap() = ratio; + Ok(()) + } +} + +#[async_trait] +impl BaseTokenRatioProvider for DBBaseTokenRatioProvider { + fn get_conversion_ratio(&self) -> BaseTokenConversionRatio { + self.get_latest_ratio() + } +} + +// Struct for a no-op BaseTokenRatioProvider (conversion ratio is either always 1:1 or a forced ratio). +#[derive(Debug, Clone)] +pub struct NoOpRatioProvider { + pub latest_ratio: BaseTokenConversionRatio, +} + +impl NoOpRatioProvider { + pub fn new(latest_ratio: BaseTokenConversionRatio) -> Self { + Self { latest_ratio } + } +} + +impl Default for NoOpRatioProvider { + fn default() -> Self { + Self { + latest_ratio: BaseTokenConversionRatio { + numerator: NonZeroU64::new(1).unwrap(), + denominator: NonZeroU64::new(1).unwrap(), + }, + } + } +} + +#[async_trait] +impl BaseTokenRatioProvider for NoOpRatioProvider { + fn get_conversion_ratio(&self) -> BaseTokenConversionRatio { + self.latest_ratio + } +} diff --git a/core/node/base_token_adjuster/src/lib.rs b/core/node/base_token_adjuster/src/lib.rs new file mode 100644 index 000000000000..2340ca56c2a7 --- /dev/null +++ b/core/node/base_token_adjuster/src/lib.rs @@ -0,0 +1,9 @@ +pub use self::{ + base_token_ratio_persister::BaseTokenRatioPersister, + base_token_ratio_provider::{ + BaseTokenRatioProvider, DBBaseTokenRatioProvider, NoOpRatioProvider, + }, +}; + +mod base_token_ratio_persister; +mod base_token_ratio_provider; diff --git a/core/node/block_reverter/Cargo.toml b/core/node/block_reverter/Cargo.toml index 68fdf72acd83..b61d14abccbc 100644 --- a/core/node/block_reverter/Cargo.toml +++ b/core/node/block_reverter/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "zksync_block_reverter" -version = "0.1.0" +description = "ZKsync block reverter library" +version.workspace = true edition.workspace = true authors.workspace = true homepage.workspace = true diff --git a/core/node/block_reverter/src/lib.rs b/core/node/block_reverter/src/lib.rs index b0ee48563b7e..da1bf091ea3b 100644 --- a/core/node/block_reverter/src/lib.rs +++ b/core/node/block_reverter/src/lib.rs @@ -3,7 +3,7 @@ use std::{path::Path, sync::Arc, time::Duration}; use anyhow::Context as _; use serde::Serialize; use tokio::{fs, sync::Semaphore}; -use zksync_config::{configs::chain::NetworkConfig, ContractsConfig, EthConfig}; +use zksync_config::{ContractsConfig, EthConfig}; use zksync_contracts::hyperchain_contract; use zksync_dal::{ConnectionPool, Core, CoreDal}; // Public re-export to simplify the API use. @@ -36,15 +36,13 @@ pub struct BlockReverterEthConfig { validator_timelock_addr: H160, default_priority_fee_per_gas: u64, hyperchain_id: L2ChainId, - era_chain_id: L2ChainId, } impl BlockReverterEthConfig { pub fn new( eth_config: &EthConfig, contract: &ContractsConfig, - network_config: &NetworkConfig, - era_chain_id: L2ChainId, + hyperchain_id: L2ChainId, ) -> anyhow::Result { Ok(Self { diamond_proxy_addr: contract.diamond_proxy_addr, @@ -54,8 +52,7 @@ impl BlockReverterEthConfig { .as_ref() .context("gas adjuster")? .default_priority_fee_per_gas, - hyperchain_id: network_config.zksync_network_id, - era_chain_id, + hyperchain_id, }) } } @@ -484,27 +481,15 @@ impl BlockReverter { let contract = hyperchain_contract(); - // It is expected that for all new chains `revertBatchesSharedBridge` can be used. - // For Era, we are using `revertBatches` function for backwards compatibility in case the migration - // to the shared bridge is not yet complete. - let data = if eth_config.hyperchain_id == eth_config.era_chain_id { - let revert_function = contract - .function("revertBatches") - .context("`revertBatches` function must be present in contract")?; - revert_function - .encode_input(&[Token::Uint(last_l1_batch_to_keep.0.into())]) - .context("failed encoding `revertBatches` input")? - } else { - let revert_function = contract - .function("revertBatchesSharedBridge") - .context("`revertBatchesSharedBridge` function must be present in contract")?; - revert_function - .encode_input(&[ - Token::Uint(eth_config.hyperchain_id.as_u64().into()), - Token::Uint(last_l1_batch_to_keep.0.into()), - ]) - .context("failed encoding `revertBatchesSharedBridge` input")? - }; + let revert_function = contract + .function("revertBatchesSharedBridge") + .context("`revertBatchesSharedBridge` function must be present in contract")?; + let data = revert_function + .encode_input(&[ + Token::Uint(eth_config.hyperchain_id.as_u64().into()), + Token::Uint(last_l1_batch_to_keep.0.into()), + ]) + .context("failed encoding `revertBatchesSharedBridge` input")?; let options = Options { nonce: Some(nonce.into()), diff --git a/core/node/block_reverter/src/tests.rs b/core/node/block_reverter/src/tests.rs index 7b989574b094..161ac3ed00c4 100644 --- a/core/node/block_reverter/src/tests.rs +++ b/core/node/block_reverter/src/tests.rs @@ -32,7 +32,11 @@ fn initialize_merkle_tree(path: &Path, storage_logs: &[StorageLog]) -> Vec let mut tree = ZkSyncTree::new(db.into()).unwrap(); let hashes = storage_logs.iter().enumerate().map(|(i, log)| { let output = tree - .process_l1_batch(&[TreeInstruction::write(log.key, i as u64 + 1, log.value)]) + .process_l1_batch(&[TreeInstruction::write( + log.key.hashed_key_u256(), + i as u64 + 1, + log.value, + )]) .unwrap(); tree.save().unwrap(); output.root_hash @@ -101,7 +105,7 @@ async fn setup_storage(storage: &mut Connection<'_, Core>, storage_logs: &[Stora .unwrap(); storage .storage_logs_dedup_dal() - .insert_initial_writes(l1_batch_header.number, &[storage_log.key]) + .insert_initial_writes(l1_batch_header.number, &[storage_log.key.hashed_key()]) .await .unwrap(); } @@ -237,7 +241,7 @@ async fn create_mock_snapshot( let key = object_store .put( key, - &SnapshotStorageLogsChunk { + &SnapshotStorageLogsChunk:: { storage_logs: vec![], }, ) diff --git a/core/node/commitment_generator/Cargo.toml b/core/node/commitment_generator/Cargo.toml index 24752691348b..a88b494a7d86 100644 --- a/core/node/commitment_generator/Cargo.toml +++ b/core/node/commitment_generator/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "zksync_commitment_generator" -version = "0.1.0" +description = "ZKsync commitment generator" +version.workspace = true edition.workspace = true authors.workspace = true homepage.workspace = true @@ -18,7 +19,7 @@ zksync_l1_contract_interface.workspace = true zksync_utils.workspace = true zksync_eth_client.workspace = true zksync_contracts.workspace = true -multivm.workspace = true +zksync_multivm.workspace = true circuit_sequencer_api_1_4_0.workspace = true circuit_sequencer_api_1_4_1.workspace = true circuit_sequencer_api_1_5_0.workspace = true diff --git a/core/node/commitment_generator/src/lib.rs b/core/node/commitment_generator/src/lib.rs index cbb6279481ca..6dc1ef2d29fa 100644 --- a/core/node/commitment_generator/src/lib.rs +++ b/core/node/commitment_generator/src/lib.rs @@ -2,11 +2,11 @@ use std::{num::NonZeroU32, ops, sync::Arc, time::Duration}; use anyhow::Context; use itertools::Itertools; -use multivm::zk_evm_latest::ethereum_types::U256; use tokio::{sync::watch, task::JoinHandle}; use zksync_dal::{ConnectionPool, Core, CoreDal}; use zksync_health_check::{Health, HealthStatus, HealthUpdater, ReactiveHealthCheck}; use zksync_l1_contract_interface::i_executor::commit::kzg::pubdata_to_blob_commitments; +use zksync_multivm::zk_evm_latest::ethereum_types::U256; use zksync_types::{ blob::num_blobs_required, commitment::{ @@ -180,7 +180,7 @@ impl CommitmentGenerator { }; let touched_slots = connection .storage_logs_dal() - .get_touched_slots_for_l1_batch(l1_batch_number) + .get_touched_slots_for_executed_l1_batch(l1_batch_number) .await?; let touched_hashed_keys: Vec<_> = touched_slots.keys().map(|key| key.hashed_key()).collect(); diff --git a/core/node/commitment_generator/src/tests.rs b/core/node/commitment_generator/src/tests.rs index 29f17fa1646f..d857013a7699 100644 --- a/core/node/commitment_generator/src/tests.rs +++ b/core/node/commitment_generator/src/tests.rs @@ -31,7 +31,7 @@ async fn seal_l1_batch(storage: &mut Connection<'_, Core>, number: L1BatchNumber .unwrap(); storage .storage_logs_dedup_dal() - .insert_initial_writes(number, &[storage_key]) + .insert_initial_writes(number, &[storage_key.hashed_key()]) .await .unwrap(); diff --git a/core/node/commitment_generator/src/utils.rs b/core/node/commitment_generator/src/utils.rs index 9a12f0c43165..b4e6bc542e97 100644 --- a/core/node/commitment_generator/src/utils.rs +++ b/core/node/commitment_generator/src/utils.rs @@ -2,7 +2,6 @@ use std::fmt; -use multivm::utils::get_used_bootloader_memory_bytes; use zk_evm_1_3_3::{ aux_structures::Timestamp as Timestamp_1_3_3, zk_evm_abstractions::queries::LogQuery as LogQuery_1_3_3, @@ -15,6 +14,7 @@ use zk_evm_1_5_0::{ aux_structures::Timestamp as Timestamp_1_5_0, zk_evm_abstractions::queries::LogQuery as LogQuery_1_5_0, }; +use zksync_multivm::utils::get_used_bootloader_memory_bytes; use zksync_types::{zk_evm_types::LogQuery, ProtocolVersionId, VmVersion, H256, U256}; use zksync_utils::expand_memory_contents; diff --git a/core/node/consensus/Cargo.toml b/core/node/consensus/Cargo.toml index 5fc95b6c91f3..68fffa56dcbc 100644 --- a/core/node/consensus/Cargo.toml +++ b/core/node/consensus/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "zksync_node_consensus" -version = "0.1.0" +description = "Consensus integration for ZKsync node" +version.workspace = true edition.workspace = true authors.workspace = true homepage.workspace = true @@ -36,6 +37,7 @@ anyhow.workspace = true async-trait.workspace = true secrecy.workspace = true tempfile.workspace = true +thiserror.workspace = true tracing.workspace = true [dev-dependencies] diff --git a/core/node/consensus/src/batch.rs b/core/node/consensus/src/batch.rs index d393a845ec6d..08246c4e5c04 100644 --- a/core/node/consensus/src/batch.rs +++ b/core/node/consensus/src/batch.rs @@ -14,7 +14,7 @@ use zksync_types::{ }; use zksync_utils::{h256_to_u256, u256_to_h256}; -use crate::ConnectionPool; +use crate::storage::ConnectionPool; /// Commitment to the last block of a batch. pub(crate) struct LastBlockCommit { diff --git a/core/node/consensus/src/config.rs b/core/node/consensus/src/config.rs index b0dfd3fbfef6..f2ca16956a2d 100644 --- a/core/node/consensus/src/config.rs +++ b/core/node/consensus/src/config.rs @@ -10,7 +10,7 @@ use zksync_config::{ }; use zksync_consensus_crypto::{Text, TextFmt}; use zksync_consensus_executor as executor; -use zksync_consensus_roles::{node, validator}; +use zksync_consensus_roles::{attester, node, validator}; fn read_secret_text(text: Option<&Secret>) -> anyhow::Result> { text.map(|text| Text::new(text.expose_secret()).decode()) @@ -24,6 +24,12 @@ pub(super) fn validator_key( read_secret_text(secrets.validator_key.as_ref().map(|x| &x.0)) } +pub(super) fn attester_key( + secrets: &ConsensusSecrets, +) -> anyhow::Result> { + read_secret_text(secrets.attester_key.as_ref().map(|x| &x.0)) +} + /// Consensus genesis specification. /// It is a digest of the `validator::Genesis`, /// which allows to initialize genesis (if not present) @@ -33,6 +39,7 @@ pub(super) struct GenesisSpec { pub(super) chain_id: validator::ChainId, pub(super) protocol_version: validator::ProtocolVersion, pub(super) validators: validator::Committee, + pub(super) attesters: Option, pub(super) leader_selection: validator::LeaderSelectionMode, } @@ -41,7 +48,8 @@ impl GenesisSpec { Self { chain_id: g.chain_id, protocol_version: g.protocol_version, - validators: g.committee.clone(), + validators: g.validators.clone(), + attesters: g.attesters.clone(), leader_selection: g.leader_selection.clone(), } } @@ -59,6 +67,20 @@ impl GenesisSpec { }) .collect::>() .context("validators")?; + + let attesters: Vec<_> = x + .attesters + .iter() + .enumerate() + .map(|(i, v)| { + Ok(attester::WeightedAttester { + key: Text::new(&v.key.0).decode().context("key").context(i)?, + weight: v.weight, + }) + }) + .collect::>() + .context("attesters")?; + Ok(Self { chain_id: validator::ChainId(x.chain_id.as_u64()), protocol_version: validator::ProtocolVersion(x.protocol_version.0), @@ -66,6 +88,11 @@ impl GenesisSpec { Text::new(&x.leader.0).decode().context("leader")?, ), validators: validator::Committee::new(validators).context("validators")?, + attesters: if attesters.is_empty() { + None + } else { + Some(attester::Committee::new(attesters).context("attesters")?) + }, }) } } @@ -91,10 +118,15 @@ pub(super) fn executor( append(k, v).with_context(|| format!("gossip_static_outbound[{i}]"))?; } } + + let mut rpc = executor::RpcConfig::default(); + rpc.get_block_rate = cfg.rpc().get_block_rate(); + Ok(executor::Config { server_addr: cfg.server_addr, public_addr: net::Host(cfg.public_addr.0.clone()), max_payload_size: cfg.max_payload_size, + max_batch_size: cfg.max_batch_size, node_key: node_key(secrets) .context("node_key")? .context("missing node_key")?, @@ -107,5 +139,8 @@ pub(super) fn executor( .collect::>() .context("gossip_static_inbound")?, gossip_static_outbound, + rpc, + // TODO: Add to configuration + debug_page: None, }) } diff --git a/core/node/consensus/src/en.rs b/core/node/consensus/src/en.rs index 685bc982bd07..66bdc822c058 100644 --- a/core/node/consensus/src/en.rs +++ b/core/node/consensus/src/en.rs @@ -2,15 +2,15 @@ use anyhow::Context as _; use zksync_concurrency::{ctx, error::Wrap as _, scope, time}; use zksync_consensus_executor as executor; use zksync_consensus_roles::validator; -use zksync_consensus_storage::BlockStore; +use zksync_consensus_storage::{BatchStore, BlockStore}; use zksync_node_sync::{ fetcher::FetchedBlock, sync_action::ActionQueueSender, MainNodeClient, SyncState, }; use zksync_types::L2BlockNumber; use zksync_web3_decl::client::{DynClient, L2}; -use super::{config, storage::Store, ConnectionPool, ConsensusConfig, ConsensusSecrets}; -use crate::storage; +use super::{config, storage::Store, ConsensusConfig, ConsensusSecrets}; +use crate::storage::{self, ConnectionPool}; /// External node. pub(super) struct EN { @@ -32,6 +32,15 @@ impl EN { cfg: ConsensusConfig, secrets: ConsensusSecrets, ) -> anyhow::Result<()> { + let attester = config::attester_key(&secrets) + .context("attester_key")? + .map(|key| executor::Attester { key }); + + tracing::debug!( + is_attester = attester.is_some(), + "external node attester mode" + ); + let res: ctx::Result<()> = scope::run!(ctx, |ctx, s| async { // Update sync state in the background. s.spawn_bg(self.fetch_state_loop(ctx)); @@ -39,18 +48,23 @@ impl EN { // Initialize genesis. let genesis = self.fetch_genesis(ctx).await.wrap("fetch_genesis()")?; let mut conn = self.pool.connection(ctx).await.wrap("connection()")?; + conn.try_update_genesis(ctx, &genesis) .await .wrap("set_genesis()")?; + let mut payload_queue = conn .new_payload_queue(ctx, actions, self.sync_state.clone()) .await .wrap("new_payload_queue()")?; + drop(conn); // Fetch blocks before the genesis. self.fetch_blocks(ctx, &mut payload_queue, Some(genesis.first_block)) - .await?; + .await + .wrap("fetch_blocks()")?; + // Monitor the genesis of the main node. // If it changes, it means that a hard fork occurred and we need to reset the consensus state. s.spawn_bg::<()>(async { @@ -69,17 +83,26 @@ impl EN { }); // Run consensus component. + // External nodes have a payload queue which they use to fetch data from the main node. let (store, runner) = Store::new(ctx, self.pool.clone(), Some(payload_queue)) .await .wrap("Store::new()")?; s.spawn_bg(async { Ok(runner.run(ctx).await?) }); + let (block_store, runner) = BlockStore::new(ctx, Box::new(store.clone())) .await .wrap("BlockStore::new()")?; s.spawn_bg(async { Ok(runner.run(ctx).await?) }); + + let (batch_store, runner) = BatchStore::new(ctx, Box::new(store.clone())) + .await + .wrap("BatchStore::new()")?; + s.spawn_bg(async { Ok(runner.run(ctx).await?) }); + let executor = executor::Executor { config: config::executor(&cfg, &secrets)?, block_store, + batch_store, validator: config::validator_key(&secrets) .context("validator_key")? .map(|key| executor::Validator { @@ -87,8 +110,10 @@ impl EN { replica_store: Box::new(store.clone()), payload_manager: Box::new(store.clone()), }), + attester, }; executor.run(ctx).await?; + Ok(()) }) .await; @@ -104,6 +129,10 @@ impl EN { ctx: &ctx::Ctx, actions: ActionQueueSender, ) -> anyhow::Result<()> { + tracing::warn!("\ + WARNING: this node is using ZKsync API synchronization, which will be deprecated soon. \ + Please follow this instruction to switch to p2p synchronization: \ + https://github.com/matter-labs/zksync-era/blob/main/docs/guides/external-node/09_decentralization.md"); let res: ctx::Result<()> = scope::run!(ctx, |ctx, s| async { // Update sync state in the background. s.spawn_bg(self.fetch_state_loop(ctx)); diff --git a/core/node/consensus/src/era.rs b/core/node/consensus/src/era.rs index 0e73c29f7741..574e496f4d11 100644 --- a/core/node/consensus/src/era.rs +++ b/core/node/consensus/src/era.rs @@ -10,7 +10,7 @@ use zksync_dal::Core; use zksync_node_sync::{sync_action::ActionQueueSender, SyncState}; use zksync_web3_decl::client::{DynClient, L2}; -use super::{en, storage::ConnectionPool}; +use super::{en, mn, storage::ConnectionPool}; /// Runs the consensus task in the main node mode. pub async fn run_main_node( @@ -19,10 +19,15 @@ pub async fn run_main_node( secrets: ConsensusSecrets, pool: zksync_dal::ConnectionPool, ) -> anyhow::Result<()> { + tracing::info!( + is_attester = secrets.attester_key.is_some(), + is_validator = secrets.validator_key.is_some(), + "running main node" + ); // Consensus is a new component. // For now in case of error we just log it and allow the server // to continue running. - if let Err(err) = super::run_main_node(ctx, cfg, secrets, ConnectionPool(pool)).await { + if let Err(err) = mn::run_main_node(ctx, cfg, secrets, ConnectionPool(pool)).await { tracing::error!("Consensus actor failed: {err:#}"); } else { tracing::info!("Consensus actor stopped"); @@ -33,7 +38,7 @@ pub async fn run_main_node( /// Runs the consensus node for the external node. /// If `cfg` is `None`, it will just fetch blocks from the main node /// using JSON RPC, without starting the consensus node. -pub async fn run_en( +pub async fn run_external_node( ctx: &ctx::Ctx, cfg: Option<(ConsensusConfig, ConsensusSecrets)>, pool: zksync_dal::ConnectionPool, @@ -47,8 +52,18 @@ pub async fn run_en( client: main_node_client.for_component("block_fetcher"), }; let res = match cfg { - Some((cfg, secrets)) => en.run(ctx, actions, cfg, secrets).await, - None => en.run_fetcher(ctx, actions).await, + Some((cfg, secrets)) => { + tracing::info!( + is_attester = secrets.attester_key.is_some(), + is_validator = secrets.validator_key.is_some(), + "running external node" + ); + en.run(ctx, actions, cfg, secrets).await + } + None => { + tracing::info!("running fetcher"); + en.run_fetcher(ctx, actions).await + } }; tracing::info!("Consensus actor stopped"); res diff --git a/core/node/consensus/src/lib.rs b/core/node/consensus/src/lib.rs index bc9776c42df5..13d918b5b6ee 100644 --- a/core/node/consensus/src/lib.rs +++ b/core/node/consensus/src/lib.rs @@ -2,14 +2,8 @@ #![allow(clippy::redundant_locals)] #![allow(clippy::needless_pass_by_ref_mut)] -use anyhow::Context as _; -use zksync_concurrency::{ctx, error::Wrap as _, scope}; -use zksync_config::configs::consensus::{ConsensusConfig, ConsensusSecrets}; -use zksync_consensus_executor as executor; -use zksync_consensus_roles::validator; -use zksync_consensus_storage::BlockStore; -use crate::storage::{ConnectionPool, Store}; +use zksync_config::configs::consensus::{ConsensusConfig, ConsensusSecrets}; // Currently `batch` module is only used in tests, // but will be used in production once batch syncing is implemented in consensus. @@ -18,56 +12,9 @@ mod batch; mod config; mod en; pub mod era; +mod mn; mod storage; #[cfg(test)] pub(crate) mod testonly; #[cfg(test)] mod tests; - -/// Task running a consensus validator for the main node. -/// Main node is currently the only leader of the consensus - i.e. it proposes all the -/// L2 blocks (generated by `Statekeeper`). -async fn run_main_node( - ctx: &ctx::Ctx, - cfg: ConsensusConfig, - secrets: ConsensusSecrets, - pool: ConnectionPool, -) -> anyhow::Result<()> { - let validator_key = config::validator_key(&secrets) - .context("validator_key")? - .context("missing validator_key")?; - scope::run!(&ctx, |ctx, s| async { - if let Some(spec) = &cfg.genesis_spec { - let spec = config::GenesisSpec::parse(spec).context("GenesisSpec::parse()")?; - pool.connection(ctx) - .await - .wrap("connection()")? - .adjust_genesis(ctx, &spec) - .await - .wrap("adjust_genesis()")?; - } - let (store, runner) = Store::new(ctx, pool, None).await.wrap("Store::new()")?; - s.spawn_bg(runner.run(ctx)); - let (block_store, runner) = BlockStore::new(ctx, Box::new(store.clone())) - .await - .wrap("BlockStore::new()")?; - s.spawn_bg(runner.run(ctx)); - anyhow::ensure!( - block_store.genesis().leader_selection - == validator::LeaderSelectionMode::Sticky(validator_key.public()), - "unsupported leader selection mode - main node has to be the leader" - ); - - let executor = executor::Executor { - config: config::executor(&cfg, &secrets)?, - block_store, - validator: Some(executor::Validator { - key: validator_key, - replica_store: Box::new(store.clone()), - payload_manager: Box::new(store.clone()), - }), - }; - executor.run(ctx).await - }) - .await -} diff --git a/core/node/consensus/src/mn.rs b/core/node/consensus/src/mn.rs new file mode 100644 index 000000000000..29cacf7a548f --- /dev/null +++ b/core/node/consensus/src/mn.rs @@ -0,0 +1,78 @@ +use anyhow::Context as _; +use zksync_concurrency::{ctx, error::Wrap as _, scope}; +use zksync_config::configs::consensus::{ConsensusConfig, ConsensusSecrets}; +use zksync_consensus_executor::{self as executor, Attester}; +use zksync_consensus_roles::validator; +use zksync_consensus_storage::{BatchStore, BlockStore}; + +use crate::{ + config, + storage::{ConnectionPool, Store}, +}; + +/// Task running a consensus validator for the main node. +/// Main node is currently the only leader of the consensus - i.e. it proposes all the +/// L2 blocks (generated by `Statekeeper`). +pub async fn run_main_node( + ctx: &ctx::Ctx, + cfg: ConsensusConfig, + secrets: ConsensusSecrets, + pool: ConnectionPool, +) -> anyhow::Result<()> { + let validator_key = config::validator_key(&secrets) + .context("validator_key")? + .context("missing validator_key")?; + + let attester = config::attester_key(&secrets) + .context("attester_key")? + .map(|key| Attester { key }); + + tracing::debug!(is_attester = attester.is_some(), "main node attester mode"); + + scope::run!(&ctx, |ctx, s| async { + if let Some(spec) = &cfg.genesis_spec { + let spec = config::GenesisSpec::parse(spec).context("GenesisSpec::parse()")?; + + pool.connection(ctx) + .await + .wrap("connection()")? + .adjust_genesis(ctx, &spec) + .await + .wrap("adjust_genesis()")?; + } + + // The main node doesn't have a payload queue as it produces all the L2 blocks itself. + let (store, runner) = Store::new(ctx, pool, None).await.wrap("Store::new()")?; + s.spawn_bg(runner.run(ctx)); + + let (block_store, runner) = BlockStore::new(ctx, Box::new(store.clone())) + .await + .wrap("BlockStore::new()")?; + s.spawn_bg(runner.run(ctx)); + + anyhow::ensure!( + block_store.genesis().leader_selection + == validator::LeaderSelectionMode::Sticky(validator_key.public()), + "unsupported leader selection mode - main node has to be the leader" + ); + + let (batch_store, runner) = BatchStore::new(ctx, Box::new(store.clone())) + .await + .wrap("BatchStore::new()")?; + s.spawn_bg(runner.run(ctx)); + + let executor = executor::Executor { + config: config::executor(&cfg, &secrets)?, + block_store, + batch_store, + validator: Some(executor::Validator { + key: validator_key, + replica_store: Box::new(store.clone()), + payload_manager: Box::new(store.clone()), + }), + attester, + }; + executor.run(ctx).await + }) + .await +} diff --git a/core/node/consensus/src/storage/connection.rs b/core/node/consensus/src/storage/connection.rs new file mode 100644 index 000000000000..7bff2c4bcf0e --- /dev/null +++ b/core/node/consensus/src/storage/connection.rs @@ -0,0 +1,421 @@ +use anyhow::Context as _; +use zksync_concurrency::{ctx, error::Wrap as _, time}; +use zksync_consensus_roles::{attester, validator}; +use zksync_consensus_storage::{self as storage, BatchStoreState}; +use zksync_dal::{consensus_dal::Payload, Core, CoreDal, DalError}; +use zksync_l1_contract_interface::i_executor::structures::StoredBatchInfo; +use zksync_node_sync::{fetcher::IoCursorExt as _, ActionQueueSender, SyncState}; +use zksync_state_keeper::io::common::IoCursor; +use zksync_types::{commitment::L1BatchWithMetadata, L1BatchNumber}; + +use super::{InsertCertificateError, PayloadQueue}; +use crate::config; + +/// Context-aware `zksync_dal::ConnectionPool` wrapper. +#[derive(Debug, Clone)] +pub(crate) struct ConnectionPool(pub(crate) zksync_dal::ConnectionPool); + +impl ConnectionPool { + /// Wrapper for `connection_tagged()`. + pub(crate) async fn connection<'a>(&'a self, ctx: &ctx::Ctx) -> ctx::Result> { + Ok(Connection( + ctx.wait(self.0.connection_tagged("consensus")) + .await? + .map_err(DalError::generalize)?, + )) + } + + /// Waits for the `number` L2 block. + pub async fn wait_for_payload( + &self, + ctx: &ctx::Ctx, + number: validator::BlockNumber, + ) -> ctx::Result { + const POLL_INTERVAL: time::Duration = time::Duration::milliseconds(50); + loop { + if let Some(payload) = self + .connection(ctx) + .await + .wrap("connection()")? + .payload(ctx, number) + .await + .with_wrap(|| format!("payload({number})"))? + { + return Ok(payload); + } + ctx.sleep(POLL_INTERVAL).await?; + } + } +} + +/// Context-aware `zksync_dal::Connection` wrapper. +pub(crate) struct Connection<'a>(pub(crate) zksync_dal::Connection<'a, Core>); + +impl<'a> Connection<'a> { + /// Wrapper for `start_transaction()`. + pub async fn start_transaction<'b, 'c: 'b>( + &'c mut self, + ctx: &ctx::Ctx, + ) -> ctx::Result> { + Ok(Connection( + ctx.wait(self.0.start_transaction()) + .await? + .context("sqlx")?, + )) + } + + /// Wrapper for `commit()`. + pub async fn commit(self, ctx: &ctx::Ctx) -> ctx::Result<()> { + Ok(ctx.wait(self.0.commit()).await?.context("sqlx")?) + } + + /// Wrapper for `consensus_dal().block_payload()`. + pub async fn payload( + &mut self, + ctx: &ctx::Ctx, + number: validator::BlockNumber, + ) -> ctx::Result> { + Ok(ctx + .wait(self.0.consensus_dal().block_payload(number)) + .await? + .map_err(DalError::generalize)?) + } + + /// Wrapper for `consensus_dal().block_payloads()`. + pub async fn payloads( + &mut self, + ctx: &ctx::Ctx, + numbers: std::ops::Range, + ) -> ctx::Result> { + Ok(ctx + .wait(self.0.consensus_dal().block_payloads(numbers)) + .await? + .map_err(DalError::generalize)?) + } + + /// Wrapper for `consensus_dal().block_certificate()`. + pub async fn block_certificate( + &mut self, + ctx: &ctx::Ctx, + number: validator::BlockNumber, + ) -> ctx::Result> { + Ok(ctx + .wait(self.0.consensus_dal().block_certificate(number)) + .await??) + } + + /// Wrapper for `consensus_dal().insert_block_certificate()`. + pub async fn insert_block_certificate( + &mut self, + ctx: &ctx::Ctx, + cert: &validator::CommitQC, + ) -> Result<(), InsertCertificateError> { + Ok(ctx + .wait(self.0.consensus_dal().insert_block_certificate(cert)) + .await??) + } + + /// Wrapper for `consensus_dal().insert_batch_certificate()`. + pub async fn insert_batch_certificate( + &mut self, + ctx: &ctx::Ctx, + cert: &attester::BatchQC, + ) -> Result<(), InsertCertificateError> { + use crate::storage::consensus_dal::InsertCertificateError as E; + + let l1_batch_number = L1BatchNumber(cert.message.number.0 as u32); + + let Some(l1_batch) = self + .0 + .blocks_dal() + .get_l1_batch_metadata(l1_batch_number) + .await + .map_err(E::Dal)? + else { + return Err(E::MissingPayload.into()); + }; + + let l1_batch_info = StoredBatchInfo::from(&l1_batch); + + if l1_batch_info.hash().0 != *cert.message.hash.0.as_bytes() { + return Err(E::PayloadMismatch.into()); + } + + Ok(ctx + .wait(self.0.consensus_dal().insert_batch_certificate(cert)) + .await??) + } + + /// Wrapper for `consensus_dal().replica_state()`. + pub async fn replica_state(&mut self, ctx: &ctx::Ctx) -> ctx::Result { + Ok(ctx + .wait(self.0.consensus_dal().replica_state()) + .await? + .map_err(DalError::generalize)?) + } + + /// Wrapper for `consensus_dal().set_replica_state()`. + pub async fn set_replica_state( + &mut self, + ctx: &ctx::Ctx, + state: &storage::ReplicaState, + ) -> ctx::Result<()> { + Ok(ctx + .wait(self.0.consensus_dal().set_replica_state(state)) + .await? + .context("sqlx")?) + } + + /// Wrapper for `blocks_dal().get_l1_batch_metadata()`. + pub async fn batch( + &mut self, + ctx: &ctx::Ctx, + number: L1BatchNumber, + ) -> ctx::Result> { + Ok(ctx + .wait(self.0.blocks_dal().get_l1_batch_metadata(number)) + .await? + .context("get_l1_batch_metadata()")?) + } + + /// Wrapper for `FetcherCursor::new()`. + pub async fn new_payload_queue( + &mut self, + ctx: &ctx::Ctx, + actions: ActionQueueSender, + sync_state: SyncState, + ) -> ctx::Result { + Ok(PayloadQueue { + inner: ctx.wait(IoCursor::for_fetcher(&mut self.0)).await??, + actions, + sync_state, + }) + } + + /// Wrapper for `consensus_dal().genesis()`. + pub async fn genesis(&mut self, ctx: &ctx::Ctx) -> ctx::Result> { + Ok(ctx + .wait(self.0.consensus_dal().genesis()) + .await? + .map_err(DalError::generalize)?) + } + + /// Wrapper for `consensus_dal().try_update_genesis()`. + pub async fn try_update_genesis( + &mut self, + ctx: &ctx::Ctx, + genesis: &validator::Genesis, + ) -> ctx::Result<()> { + Ok(ctx + .wait(self.0.consensus_dal().try_update_genesis(genesis)) + .await??) + } + + /// Wrapper for `consensus_dal().next_block()`. + async fn next_block(&mut self, ctx: &ctx::Ctx) -> ctx::Result { + Ok(ctx.wait(self.0.consensus_dal().next_block()).await??) + } + + /// Wrapper for `consensus_dal().block_certificates_range()`. + pub(crate) async fn block_certificates_range( + &mut self, + ctx: &ctx::Ctx, + ) -> ctx::Result { + Ok(ctx + .wait(self.0.consensus_dal().block_certificates_range()) + .await??) + } + + /// (Re)initializes consensus genesis to start at the last L2 block in storage. + /// Noop if `spec` matches the current genesis. + pub(crate) async fn adjust_genesis( + &mut self, + ctx: &ctx::Ctx, + spec: &config::GenesisSpec, + ) -> ctx::Result<()> { + let mut txn = self + .start_transaction(ctx) + .await + .wrap("start_transaction()")?; + + let old = txn.genesis(ctx).await.wrap("genesis()")?; + if let Some(old) = &old { + if &config::GenesisSpec::from_genesis(old) == spec { + // Hard fork is not needed. + return Ok(()); + } + } + + tracing::info!("Performing a hard fork of consensus."); + let genesis = validator::GenesisRaw { + chain_id: spec.chain_id, + fork_number: old + .as_ref() + .map_or(validator::ForkNumber(0), |old| old.fork_number.next()), + first_block: txn.next_block(ctx).await.context("next_block()")?, + protocol_version: spec.protocol_version, + validators: spec.validators.clone(), + attesters: spec.attesters.clone(), + leader_selection: spec.leader_selection.clone(), + } + .with_hash(); + + txn.try_update_genesis(ctx, &genesis) + .await + .wrap("try_update_genesis()")?; + txn.commit(ctx).await.wrap("commit()")?; + Ok(()) + } + + /// Fetches a block from storage. + pub(crate) async fn block( + &mut self, + ctx: &ctx::Ctx, + number: validator::BlockNumber, + ) -> ctx::Result> { + let Some(justification) = self + .block_certificate(ctx, number) + .await + .wrap("block_certificate()")? + else { + return Ok(None); + }; + + let payload = self + .payload(ctx, number) + .await + .wrap("payload()")? + .context("L2 block disappeared from storage")?; + + Ok(Some(validator::FinalBlock { + payload: payload.encode(), + justification, + })) + } + + /// Wrapper for `blocks_dal().get_sealed_l1_batch_number()`. + pub async fn get_last_batch_number( + &mut self, + ctx: &ctx::Ctx, + ) -> ctx::Result> { + Ok(ctx + .wait(self.0.blocks_dal().get_sealed_l1_batch_number()) + .await? + .context("get_sealed_l1_batch_number()")? + .map(|nr| attester::BatchNumber(nr.0 as u64))) + } + + /// Wrapper for `consensus_dal().get_last_batch_certificate_number()`. + pub async fn get_last_batch_certificate_number( + &mut self, + ctx: &ctx::Ctx, + ) -> ctx::Result> { + Ok(ctx + .wait(self.0.consensus_dal().get_last_batch_certificate_number()) + .await? + .context("get_last_batch_certificate_number()")?) + } + + /// Wrapper for `consensus_dal().batch_certificate()`. + pub async fn batch_certificate( + &mut self, + ctx: &ctx::Ctx, + number: attester::BatchNumber, + ) -> ctx::Result> { + Ok(ctx + .wait(self.0.consensus_dal().batch_certificate(number)) + .await? + .context("batch_certificate()")?) + } + + /// Wrapper for `blocks_dal().get_l2_block_range_of_l1_batch()`. + pub async fn get_l2_block_range_of_l1_batch( + &mut self, + ctx: &ctx::Ctx, + number: attester::BatchNumber, + ) -> ctx::Result> { + let number = L1BatchNumber(number.0.try_into().context("number")?); + + let range = ctx + .wait(self.0.blocks_dal().get_l2_block_range_of_l1_batch(number)) + .await? + .context("get_l2_block_range_of_l1_batch()")?; + + Ok(range.map(|(min, max)| { + let min = validator::BlockNumber(min.0 as u64); + let max = validator::BlockNumber(max.0 as u64); + (min, max) + })) + } + + /// Construct the [attester::SyncBatch] for a given batch number. + pub async fn get_batch( + &mut self, + ctx: &ctx::Ctx, + number: attester::BatchNumber, + ) -> ctx::Result> { + let Some((min, max)) = self + .get_l2_block_range_of_l1_batch(ctx, number) + .await + .context("get_l2_block_range_of_l1_batch()")? + else { + return Ok(None); + }; + + let payloads = self.payloads(ctx, min..max).await.wrap("payloads()")?; + let payloads = payloads.into_iter().map(|p| p.encode()).collect(); + + // TODO: Fill out the proof when we have the stateless L1 batch validation story finished. + // It is supposed to be a Merkle proof that the rolling hash of the batch has been included + // in the L1 system contract state tree. It is *not* the Ethereum state root hash, so producing + // it can be done without an L1 client, which is only required for validation. + let batch = attester::SyncBatch { + number, + payloads, + proof: Vec::new(), + }; + + Ok(Some(batch)) + } + + /// Construct the [storage::BatchStoreState] which contains the earliest batch and the last available [attester::SyncBatch]. + pub async fn batches_range(&mut self, ctx: &ctx::Ctx) -> ctx::Result { + let first = self + .0 + .blocks_dal() + .get_earliest_l1_batch_number() + .await + .context("get_earliest_l1_batch_number()")?; + + let first = if first.is_some() { + first + } else { + self.0 + .snapshot_recovery_dal() + .get_applied_snapshot_status() + .await + .context("get_earliest_l1_batch_number()")? + .map(|s| s.l1_batch_number) + }; + + // TODO: In the future when we start filling in the `SyncBatch::proof` field, + // we can only run `get_batch` expecting `Some` result on numbers where the + // L1 state root hash is already available, so that we can produce some + // Merkle proof that the rolling hash of the L2 blocks in the batch has + // been included in the L1 state tree. At that point we probably can't + // call `get_last_batch_number` here, but something that indicates that + // the hashes/commitments on the L1 batch are ready and the thing has + // been included in L1; that potentially requires an API client as well. + let last = self + .get_last_batch_number(ctx) + .await + .context("get_last_batch_number()")?; + + Ok(BatchStoreState { + first: first + .map(|n| attester::BatchNumber(n.0 as u64)) + .unwrap_or(attester::BatchNumber(0)), + last, + }) + } +} diff --git a/core/node/consensus/src/storage/mod.rs b/core/node/consensus/src/storage/mod.rs index bc8a0b8b8409..6660f75332bc 100644 --- a/core/node/consensus/src/storage/mod.rs +++ b/core/node/consensus/src/storage/mod.rs @@ -1,350 +1,49 @@ //! Storage implementation based on DAL. -use std::sync::Arc; -use anyhow::Context as _; -use zksync_concurrency::{ctx, error::Wrap as _, sync, time}; -use zksync_consensus_bft::PayloadManager; +use zksync_concurrency::ctx; use zksync_consensus_roles::validator; -use zksync_consensus_storage as storage; -use zksync_dal::{consensus_dal::Payload, Core, CoreDal, DalError}; +use zksync_dal::consensus_dal; use zksync_node_sync::{ - fetcher::{FetchedBlock, FetchedTransaction, IoCursorExt as _}, + fetcher::{FetchedBlock, IoCursorExt as _}, sync_action::ActionQueueSender, SyncState, }; use zksync_state_keeper::io::common::IoCursor; -use zksync_types::{commitment::L1BatchWithMetadata, L1BatchNumber, L2BlockNumber}; -use super::config; +mod connection; +mod store; + +pub(crate) use connection::*; +pub(crate) use store::*; #[cfg(test)] pub(crate) mod testonly; -/// Context-aware `zksync_dal::ConnectionPool` wrapper. -#[derive(Debug, Clone)] -pub(super) struct ConnectionPool(pub(super) zksync_dal::ConnectionPool); - -impl ConnectionPool { - /// Wrapper for `connection_tagged()`. - pub(super) async fn connection<'a>(&'a self, ctx: &ctx::Ctx) -> ctx::Result> { - Ok(Connection( - ctx.wait(self.0.connection_tagged("consensus")) - .await? - .map_err(DalError::generalize)?, - )) - } - - /// Waits for the `number` L2 block. - pub async fn wait_for_payload( - &self, - ctx: &ctx::Ctx, - number: validator::BlockNumber, - ) -> ctx::Result { - const POLL_INTERVAL: time::Duration = time::Duration::milliseconds(50); - loop { - if let Some(payload) = self - .connection(ctx) - .await - .wrap("connection()")? - .payload(ctx, number) - .await - .wrap("payload()")? - { - return Ok(payload); - } - ctx.sleep(POLL_INTERVAL).await?; - } - } -} - -/// Context-aware `zksync_dal::Connection` wrapper. -pub(super) struct Connection<'a>(pub(super) zksync_dal::Connection<'a, Core>); - -impl<'a> Connection<'a> { - /// Wrapper for `start_transaction()`. - pub async fn start_transaction<'b, 'c: 'b>( - &'c mut self, - ctx: &ctx::Ctx, - ) -> ctx::Result> { - Ok(Connection( - ctx.wait(self.0.start_transaction()) - .await? - .context("sqlx")?, - )) - } - - /// Wrapper for `commit()`. - pub async fn commit(self, ctx: &ctx::Ctx) -> ctx::Result<()> { - Ok(ctx.wait(self.0.commit()).await?.context("sqlx")?) - } - - /// Wrapper for `consensus_dal().block_range()`. - pub async fn block_range( - &mut self, - ctx: &ctx::Ctx, - ) -> ctx::Result> { - Ok(ctx - .wait(self.0.consensus_dal().block_range()) - .await? - .context("sqlx")?) - } - - /// Wrapper for `consensus_dal().block_payload()`. - pub async fn payload( - &mut self, - ctx: &ctx::Ctx, - number: validator::BlockNumber, - ) -> ctx::Result> { - Ok(ctx - .wait(self.0.consensus_dal().block_payload(number)) - .await? - .map_err(DalError::generalize)?) - } - - /// Wrapper for `consensus_dal().block_payloads()`. - pub async fn payloads( - &mut self, - ctx: &ctx::Ctx, - numbers: std::ops::Range, - ) -> ctx::Result> { - Ok(ctx - .wait(self.0.consensus_dal().block_payloads(numbers)) - .await? - .map_err(DalError::generalize)?) - } - - /// Wrapper for `consensus_dal().first_certificate()`. - pub async fn first_certificate( - &mut self, - ctx: &ctx::Ctx, - ) -> ctx::Result> { - Ok(ctx - .wait(self.0.consensus_dal().first_certificate()) - .await? - .map_err(DalError::generalize)?) - } - - /// Wrapper for `consensus_dal().last_certificate()`. - pub async fn last_certificate( - &mut self, - ctx: &ctx::Ctx, - ) -> ctx::Result> { - Ok(ctx - .wait(self.0.consensus_dal().last_certificate()) - .await? - .map_err(DalError::generalize)?) - } - - /// Wrapper for `consensus_dal().certificate()`. - pub async fn certificate( - &mut self, - ctx: &ctx::Ctx, - number: validator::BlockNumber, - ) -> ctx::Result> { - Ok(ctx - .wait(self.0.consensus_dal().certificate(number)) - .await? - .map_err(DalError::generalize)?) - } - - /// Wrapper for `consensus_dal().insert_certificate()`. - pub async fn insert_certificate( - &mut self, - ctx: &ctx::Ctx, - cert: &validator::CommitQC, - ) -> ctx::Result<()> { - Ok(ctx - .wait(self.0.consensus_dal().insert_certificate(cert)) - .await??) - } - - /// Wrapper for `consensus_dal().replica_state()`. - pub async fn replica_state(&mut self, ctx: &ctx::Ctx) -> ctx::Result { - Ok(ctx - .wait(self.0.consensus_dal().replica_state()) - .await? - .map_err(DalError::generalize)?) - } - - /// Wrapper for `consensus_dal().set_replica_state()`. - pub async fn set_replica_state( - &mut self, - ctx: &ctx::Ctx, - state: &storage::ReplicaState, - ) -> ctx::Result<()> { - Ok(ctx - .wait(self.0.consensus_dal().set_replica_state(state)) - .await? - .context("sqlx")?) - } - - /// Wrapper for `consensus_dal().get_l1_batch_metadata()`. - pub async fn batch( - &mut self, - ctx: &ctx::Ctx, - number: L1BatchNumber, - ) -> ctx::Result> { - Ok(ctx - .wait(self.0.blocks_dal().get_l1_batch_metadata(number)) - .await? - .context("get_l1_batch_metadata()")?) - } - - /// Wrapper for `FetcherCursor::new()`. - pub async fn new_payload_queue( - &mut self, - ctx: &ctx::Ctx, - actions: ActionQueueSender, - sync_state: SyncState, - ) -> ctx::Result { - Ok(PayloadQueue { - inner: ctx.wait(IoCursor::for_fetcher(&mut self.0)).await??, - actions, - sync_state, - }) - } - - pub async fn genesis(&mut self, ctx: &ctx::Ctx) -> ctx::Result> { - Ok(ctx - .wait(self.0.consensus_dal().genesis()) - .await? - .map_err(DalError::generalize)?) - } - - pub async fn try_update_genesis( - &mut self, - ctx: &ctx::Ctx, - genesis: &validator::Genesis, - ) -> ctx::Result<()> { - Ok(ctx - .wait(self.0.consensus_dal().try_update_genesis(genesis)) - .await??) - } - - /// Fetches and verifies consistency of certificates in storage. - async fn certificates_range( - &mut self, - ctx: &ctx::Ctx, - ) -> ctx::Result { - // Fetch the range of L2 blocks in storage. - let block_range = self.block_range(ctx).await.context("block_range")?; - - // Fetch the range of certificates in storage. - let genesis = self - .genesis(ctx) - .await - .wrap("genesis()")? - .context("genesis missing")?; - let first_expected_cert = genesis.first_block.max(block_range.start); - let last_cert = self - .last_certificate(ctx) - .await - .wrap("last_certificate()")?; - let next_expected_cert = last_cert - .as_ref() - .map_or(first_expected_cert, |cert| cert.header().number.next()); - - // Check that the first certificate in storage has the expected L2 block number. - if let Some(got) = self - .first_certificate(ctx) - .await - .wrap("first_certificate()")? - { - if got.header().number != first_expected_cert { - return Err(anyhow::format_err!( - "inconsistent storage: certificates should start at {first_expected_cert}, while they start at {}", - got.header().number, - ).into()); - } - } - - // Check that the node has all the blocks before the next expected certificate, because - // the node needs to know the state of the chain up to block `X` to process block `X+1`. - if block_range.end < next_expected_cert { - return Err(anyhow::format_err!("inconsistent storage: cannot start consensus for L2 block {next_expected_cert}, because earlier blocks are missing").into()); - } - Ok(storage::BlockStoreState { - first: first_expected_cert, - last: last_cert, - }) - } - - /// (Re)initializes consensus genesis to start at the last L2 block in storage. - /// Noop if `spec` matches the current genesis. - pub(super) async fn adjust_genesis( - &mut self, - ctx: &ctx::Ctx, - spec: &config::GenesisSpec, - ) -> ctx::Result<()> { - let block_range = self.block_range(ctx).await.wrap("block_range()")?; - let mut txn = self - .start_transaction(ctx) - .await - .wrap("start_transaction()")?; - let old = txn.genesis(ctx).await.wrap("genesis()")?; - if let Some(old) = &old { - if &config::GenesisSpec::from_genesis(old) == spec { - // Hard fork is not needed. - return Ok(()); - } - } - tracing::info!("Performing a hard fork of consensus."); - let genesis = validator::GenesisRaw { - chain_id: spec.chain_id, - fork_number: old - .as_ref() - .map_or(validator::ForkNumber(0), |old| old.fork_number.next()), - first_block: block_range.end, - - protocol_version: spec.protocol_version, - committee: spec.validators.clone(), - leader_selection: spec.leader_selection.clone(), - } - .with_hash(); - txn.try_update_genesis(ctx, &genesis) - .await - .wrap("try_update_genesis()")?; - txn.commit(ctx).await.wrap("commit()")?; - Ok(()) - } - - pub(super) async fn block( - &mut self, - ctx: &ctx::Ctx, - number: validator::BlockNumber, - ) -> ctx::Result> { - let Some(justification) = self.certificate(ctx, number).await.wrap("certificate()")? else { - return Ok(None); - }; - let payload = self - .payload(ctx, number) - .await - .wrap("payload()")? - .context("L2 block disappeared from storage")?; - Ok(Some(validator::FinalBlock { - payload: payload.encode(), - justification, - })) - } +#[derive(thiserror::Error, Debug)] +pub enum InsertCertificateError { + #[error(transparent)] + Canceled(#[from] ctx::Canceled), + #[error(transparent)] + Inner(#[from] consensus_dal::InsertCertificateError), } #[derive(Debug)] -pub(super) struct PayloadQueue { +pub(crate) struct PayloadQueue { inner: IoCursor, actions: ActionQueueSender, sync_state: SyncState, } impl PayloadQueue { - pub(super) fn next(&self) -> validator::BlockNumber { + pub(crate) fn next(&self) -> validator::BlockNumber { validator::BlockNumber(self.inner.next_l2_block.0.into()) } /// Advances the cursor by converting the block into actions and pushing them /// to the actions queue. - /// Does nothing and returns Ok() if the block has been already processed. + /// Does nothing and returns `Ok(())` if the block has been already processed. /// Returns an error if a block with an earlier block number was expected. - pub(super) async fn send(&mut self, block: FetchedBlock) -> anyhow::Result<()> { + pub(crate) async fn send(&mut self, block: FetchedBlock) -> anyhow::Result<()> { let want = self.inner.next_l2_block; // Some blocks are missing. if block.number > want { @@ -354,253 +53,7 @@ impl PayloadQueue { if block.number < want { return Ok(()); } - self.actions.push_actions(self.inner.advance(block)).await; - Ok(()) - } -} - -fn to_fetched_block( - number: validator::BlockNumber, - payload: &validator::Payload, -) -> anyhow::Result { - let number = L2BlockNumber( - number - .0 - .try_into() - .context("Integer overflow converting block number")?, - ); - let payload = Payload::decode(payload).context("Payload::decode()")?; - Ok(FetchedBlock { - number, - l1_batch_number: payload.l1_batch_number, - last_in_batch: payload.last_in_batch, - protocol_version: payload.protocol_version, - timestamp: payload.timestamp, - reference_hash: Some(payload.hash), - l1_gas_price: payload.l1_gas_price, - l2_fair_gas_price: payload.l2_fair_gas_price, - fair_pubdata_price: payload.fair_pubdata_price, - virtual_blocks: payload.virtual_blocks, - operator_address: payload.operator_address, - transactions: payload - .transactions - .into_iter() - .map(FetchedTransaction::new) - .collect(), - }) -} - -/// Wrapper of `ConnectionPool` implementing `ReplicaStore`, `PayloadManager` and -/// `PersistentBlockStore`. -#[derive(Clone, Debug)] -pub(super) struct Store { - pub(super) pool: ConnectionPool, - payloads: Arc>>, - certificates: ctx::channel::UnboundedSender, - persisted: sync::watch::Receiver, -} - -/// Background task of the `Store`. -pub struct StoreRunner { - pool: ConnectionPool, - persisted: sync::watch::Sender, - certificates: ctx::channel::UnboundedReceiver, -} - -impl Store { - pub(super) async fn new( - ctx: &ctx::Ctx, - pool: ConnectionPool, - payload_queue: Option, - ) -> ctx::Result<(Store, StoreRunner)> { - let persisted = pool - .connection(ctx) - .await - .wrap("connection()")? - .certificates_range(ctx) - .await - .wrap("certificates_range()")?; - let persisted = sync::watch::channel(persisted).0; - let (certs_send, certs_recv) = ctx::channel::unbounded(); - Ok(( - Store { - pool: pool.clone(), - certificates: certs_send, - payloads: Arc::new(sync::Mutex::new(payload_queue)), - persisted: persisted.subscribe(), - }, - StoreRunner { - pool, - persisted, - certificates: certs_recv, - }, - )) - } -} - -impl StoreRunner { - pub async fn run(mut self, ctx: &ctx::Ctx) -> anyhow::Result<()> { - let res = async { - loop { - let cert = self.certificates.recv(ctx).await?; - self.pool - .wait_for_payload(ctx, cert.header().number) - .await - .wrap("wait_for_payload()")?; - self.pool - .connection(ctx) - .await - .wrap("connection()")? - .insert_certificate(ctx, &cert) - .await - .wrap("insert_certificate()")?; - self.persisted.send_modify(|p| p.last = Some(cert)); - } - } - .await; - match res { - Err(ctx::Error::Canceled(_)) | Ok(()) => Ok(()), - Err(ctx::Error::Internal(err)) => Err(err), - } - } -} - -#[async_trait::async_trait] -impl storage::PersistentBlockStore for Store { - async fn genesis(&self, ctx: &ctx::Ctx) -> ctx::Result { - Ok(self - .pool - .connection(ctx) - .await - .wrap("connection")? - .genesis(ctx) - .await? - .context("not found")?) - } - - fn persisted(&self) -> sync::watch::Receiver { - self.persisted.clone() - } - - async fn block( - &self, - ctx: &ctx::Ctx, - number: validator::BlockNumber, - ) -> ctx::Result { - Ok(self - .pool - .connection(ctx) - .await - .wrap("connection")? - .block(ctx, number) - .await? - .context("not found")?) - } - - /// If actions queue is set (and the block has not been stored yet), - /// the block will be translated into a sequence of actions. - /// The received actions should be fed - /// to `ExternalIO`, so that `StateKeeper` will store the corresponding L2 block in the db. - /// - /// `store_next_block()` call will wait synchronously for the L2 block. - /// Once the L2 block is observed in storage, `store_next_block()` will store a cert for this - /// L2 block. - async fn queue_next_block( - &self, - ctx: &ctx::Ctx, - block: validator::FinalBlock, - ) -> ctx::Result<()> { - let mut payloads = sync::lock(ctx, &self.payloads).await?.into_async(); - if let Some(payloads) = &mut *payloads { - payloads - .send(to_fetched_block(block.number(), &block.payload).context("to_fetched_block")?) - .await - .context("payload_queue.send()")?; - } - self.certificates.send(block.justification); - Ok(()) - } -} - -#[async_trait::async_trait] -impl storage::ReplicaStore for Store { - async fn state(&self, ctx: &ctx::Ctx) -> ctx::Result { - self.pool - .connection(ctx) - .await - .wrap("connection()")? - .replica_state(ctx) - .await - .wrap("replica_state()") - } - - async fn set_state(&self, ctx: &ctx::Ctx, state: &storage::ReplicaState) -> ctx::Result<()> { - self.pool - .connection(ctx) - .await - .wrap("connection()")? - .set_replica_state(ctx, state) - .await - .wrap("set_replica_state()") - } -} - -#[async_trait::async_trait] -impl PayloadManager for Store { - /// Currently (for the main node) proposing is implemented as just converting an L2 block from db (without a cert) into a payload. - async fn propose( - &self, - ctx: &ctx::Ctx, - block_number: validator::BlockNumber, - ) -> ctx::Result { - const LARGE_PAYLOAD_SIZE: usize = 1 << 20; - let payload = self.pool.wait_for_payload(ctx, block_number).await?; - let encoded_payload = payload.encode(); - if encoded_payload.0.len() > LARGE_PAYLOAD_SIZE { - tracing::warn!( - "large payload ({}B) with {} transactions", - encoded_payload.0.len(), - payload.transactions.len() - ); - } - Ok(encoded_payload) - } - - /// Verify that `payload` is a correct proposal for the block `block_number`. - /// * for the main node it checks whether the same block is already present in storage. - /// * for the EN validator - /// * if the block with this number was already applied, it checks that it was the - /// same block. It should always be true, because main node is the only proposer and - /// to propose a different block a hard fork is needed. - /// * otherwise, EN attempts to apply the received block. If the block was incorrect - /// the statekeeper is expected to crash the whole EN. Otherwise OK is returned. - async fn verify( - &self, - ctx: &ctx::Ctx, - block_number: validator::BlockNumber, - payload: &validator::Payload, - ) -> ctx::Result<()> { - let mut payloads = sync::lock(ctx, &self.payloads).await?.into_async(); - if let Some(payloads) = &mut *payloads { - let block = to_fetched_block(block_number, payload).context("to_fetched_block")?; - let n = block.number; - payloads.send(block).await.context("payload_queue.send()")?; - // Wait for the block to be processed, without waiting for it to be stored. - // TODO(BFT-459): this is not ideal, because we don't check here whether the - // processed block is the same as `payload`. It will work correctly - // with the current implementation of EN, but we should make it more - // precise when block reverting support is implemented. - ctx.wait(payloads.sync_state.wait_for_local_block(n)) - .await?; - } else { - let want = self.pool.wait_for_payload(ctx, block_number).await?; - let got = Payload::decode(payload).context("Payload::decode(got)")?; - if got != want { - return Err( - anyhow::format_err!("unexpected payload: got {got:?} want {want:?}").into(), - ); - } - } + self.actions.push_actions(self.inner.advance(block)).await?; Ok(()) } } diff --git a/core/node/consensus/src/storage/store.rs b/core/node/consensus/src/storage/store.rs new file mode 100644 index 000000000000..b1dc3e0b60c2 --- /dev/null +++ b/core/node/consensus/src/storage/store.rs @@ -0,0 +1,592 @@ +use std::sync::Arc; + +use anyhow::Context as _; +use zksync_concurrency::{ctx, error::Wrap as _, scope, sync, time}; +use zksync_consensus_bft::PayloadManager; +use zksync_consensus_crypto::keccak256::Keccak256; +use zksync_consensus_roles::{attester, validator}; +use zksync_consensus_storage::{self as storage, BatchStoreState}; +use zksync_dal::consensus_dal::{self, Payload}; +use zksync_l1_contract_interface::i_executor::structures::StoredBatchInfo; +use zksync_node_sync::fetcher::{FetchedBlock, FetchedTransaction}; +use zksync_types::{L1BatchNumber, L2BlockNumber}; + +use super::{Connection, PayloadQueue}; +use crate::storage::{ConnectionPool, InsertCertificateError}; + +fn to_fetched_block( + number: validator::BlockNumber, + payload: &validator::Payload, +) -> anyhow::Result { + let number = L2BlockNumber( + number + .0 + .try_into() + .context("Integer overflow converting block number")?, + ); + let payload = Payload::decode(payload).context("Payload::decode()")?; + Ok(FetchedBlock { + number, + l1_batch_number: payload.l1_batch_number, + last_in_batch: payload.last_in_batch, + protocol_version: payload.protocol_version, + timestamp: payload.timestamp, + reference_hash: Some(payload.hash), + l1_gas_price: payload.l1_gas_price, + l2_fair_gas_price: payload.l2_fair_gas_price, + fair_pubdata_price: payload.fair_pubdata_price, + virtual_blocks: payload.virtual_blocks, + operator_address: payload.operator_address, + transactions: payload + .transactions + .into_iter() + .map(FetchedTransaction::new) + .collect(), + }) +} + +/// Wrapper of `ConnectionPool` implementing `ReplicaStore`, `PayloadManager`, +/// `PersistentBlockStore` and `PersistentBatchStore`. +/// +/// Contains queues to save Quorum Certificates received over gossip to the store +/// as and when the payload they are over becomes available. +#[derive(Clone, Debug)] +pub(crate) struct Store { + pub(super) pool: ConnectionPool, + /// Action queue to fetch/store L2 block payloads + block_payloads: Arc>>, + /// L2 block QCs received from consensus + block_certificates: ctx::channel::UnboundedSender, + /// L1 batch QCs received from consensus + batch_certificates: ctx::channel::UnboundedSender, + /// Range of L2 blocks for which we have a QC persisted. + blocks_persisted: sync::watch::Receiver, + /// Range of L1 batches we have persisted. + batches_persisted: sync::watch::Receiver, +} + +struct PersistedBlockState(sync::watch::Sender); + +/// Background task of the `Store`. +pub struct StoreRunner { + pool: ConnectionPool, + blocks_persisted: PersistedBlockState, + batches_persisted: sync::watch::Sender, + block_certificates: ctx::channel::UnboundedReceiver, + batch_certificates: ctx::channel::UnboundedReceiver, +} + +impl Store { + pub(crate) async fn new( + ctx: &ctx::Ctx, + pool: ConnectionPool, + payload_queue: Option, + ) -> ctx::Result<(Store, StoreRunner)> { + let mut conn = pool.connection(ctx).await.wrap("connection()")?; + + // Initial state of persisted blocks + let blocks_persisted = conn + .block_certificates_range(ctx) + .await + .wrap("block_certificates_range()")?; + + // Initial state of persisted batches + let batches_persisted = conn.batches_range(ctx).await.wrap("batches_range()")?; + + drop(conn); + + let blocks_persisted = sync::watch::channel(blocks_persisted).0; + let batches_persisted = sync::watch::channel(batches_persisted).0; + let (block_certs_send, block_certs_recv) = ctx::channel::unbounded(); + let (batch_certs_send, batch_certs_recv) = ctx::channel::unbounded(); + + Ok(( + Store { + pool: pool.clone(), + block_certificates: block_certs_send, + batch_certificates: batch_certs_send, + block_payloads: Arc::new(sync::Mutex::new(payload_queue)), + blocks_persisted: blocks_persisted.subscribe(), + batches_persisted: batches_persisted.subscribe(), + }, + StoreRunner { + pool, + blocks_persisted: PersistedBlockState(blocks_persisted), + batches_persisted, + block_certificates: block_certs_recv, + batch_certificates: batch_certs_recv, + }, + )) + } + + /// Get a fresh connection from the pool. + async fn conn(&self, ctx: &ctx::Ctx) -> ctx::Result { + self.pool.connection(ctx).await.wrap("connection") + } +} + +impl PersistedBlockState { + /// Updates `persisted` to new. + /// Ends of the range can only be moved forward. + /// If `persisted.first` is moved forward, it means that blocks have been pruned. + /// If `persisted.last` is moved forward, it means that new blocks with certificates have been + /// persisted. + fn update(&self, new: storage::BlockStoreState) { + self.0.send_if_modified(|p| { + if &new == p { + return false; + } + p.first = p.first.max(new.first); + if p.next() < new.next() { + p.last = new.last; + } + true + }); + } + + /// Checks if the given certificate is exactly the next one that should + /// be persisted. + fn should_be_persisted(&self, cert: &validator::CommitQC) -> bool { + self.0.borrow().next() == cert.header().number + } + + /// Appends the `cert` to `persisted` range. + fn advance(&self, cert: validator::CommitQC) { + self.0.send_if_modified(|p| { + if p.next() != cert.header().number { + return false; + } + p.last = Some(cert); + true + }); + } +} + +impl StoreRunner { + pub async fn run(self, ctx: &ctx::Ctx) -> anyhow::Result<()> { + let StoreRunner { + pool, + blocks_persisted, + batches_persisted, + mut block_certificates, + mut batch_certificates, + } = self; + + let res = scope::run!(ctx, |ctx, s| async { + s.spawn::<()>(async { + // Loop updating `blocks_persisted` whenever blocks get pruned. + const POLL_INTERVAL: time::Duration = time::Duration::seconds(1); + loop { + let range = pool + .connection(ctx) + .await? + .block_certificates_range(ctx) + .await + .wrap("block_certificates_range()")?; + blocks_persisted.update(range); + ctx.sleep(POLL_INTERVAL).await?; + } + }); + + // NOTE: Running this update loop will trigger the gossip of `SyncBatches` which is currently + // pointless as there is no proof and we have to ignore them. We can disable it, but bear in + // mind that any node which gossips the availability will cause pushes and pulls in the consensus. + s.spawn::<()>(async { + // Loop updating `batches_persisted` whenever a new L1 batch is available in the database. + // We have to do this because the L1 batch is produced as L2 blocks are executed, + // which can happen on a different machine or in a different process, so we can't rely on some + // DAL method updating this memory construct. However I'm not sure that `BatchStoreState` + // really has to contain the full blown last batch, or whether it could have for example + // just the number of it. We can't just use the `attester::BatchQC`, which would make it + // analogous to the `BlockStoreState`, because the `SyncBatch` mechanism is for catching + // up with L1 batches from peers _without_ the QC, based on L1 inclusion proofs instead. + // Nevertheless since the `SyncBatch` contains all transactions for all L2 blocks, + // we can try to make it less frequent by querying just the last batch number first. + const POLL_INTERVAL: time::Duration = time::Duration::seconds(1); + let mut next_batch_number = { batches_persisted.borrow().next() }; + loop { + let mut conn = pool.connection(ctx).await?; + if let Some(last_batch_number) = conn + .get_last_batch_number(ctx) + .await + .wrap("last_batch_number()")? + { + if last_batch_number >= next_batch_number { + let range = conn.batches_range(ctx).await.wrap("batches_range()")?; + next_batch_number = last_batch_number.next(); + batches_persisted.send_replace(range); + } + } + ctx.sleep(POLL_INTERVAL).await?; + } + }); + + s.spawn::<()>(async { + // Loop inserting batch certificates into storage + const POLL_INTERVAL: time::Duration = time::Duration::milliseconds(50); + loop { + let cert = batch_certificates.recv(ctx).await?; + + loop { + use consensus_dal::InsertCertificateError as E; + // Try to insert the cert. + let res = pool + .connection(ctx) + .await? + .insert_batch_certificate(ctx, &cert) + .await; + + match res { + Ok(()) => { + break; + } + Err(InsertCertificateError::Inner(E::MissingPayload)) => { + // The L1 batch isn't available yet. + // We can wait until it's produced/received, or we could modify gossip + // so that we don't even accept votes until we have the corresponding batch. + ctx.sleep(POLL_INTERVAL).await?; + } + Err(InsertCertificateError::Inner(err)) => { + return Err(ctx::Error::Internal(anyhow::Error::from(err))) + } + Err(InsertCertificateError::Canceled(err)) => { + return Err(ctx::Error::Canceled(err)) + } + } + } + } + }); + + // Loop inserting block certs to storage. + const POLL_INTERVAL: time::Duration = time::Duration::milliseconds(50); + loop { + let cert = block_certificates.recv(ctx).await?; + // Wait for the block to be persisted, so that we can attach a cert to it. + // We may exit this loop without persisting the certificate in case the + // corresponding block has been pruned in the meantime. + while blocks_persisted.should_be_persisted(&cert) { + use consensus_dal::InsertCertificateError as E; + // Try to insert the cert. + let res = pool + .connection(ctx) + .await? + .insert_block_certificate(ctx, &cert) + .await; + match res { + Ok(()) => { + // Insertion succeeded: update persisted state + // and wait for the next cert. + blocks_persisted.advance(cert); + break; + } + Err(InsertCertificateError::Inner(E::MissingPayload)) => { + // the payload is not in storage, it's either not yet persisted + // or already pruned. We will retry after a delay. + ctx.sleep(POLL_INTERVAL).await?; + } + Err(InsertCertificateError::Canceled(err)) => { + return Err(ctx::Error::Canceled(err)) + } + Err(InsertCertificateError::Inner(err)) => { + return Err(ctx::Error::Internal(anyhow::Error::from(err))) + } + } + } + } + }) + .await; + + match res { + Err(ctx::Error::Canceled(_)) | Ok(()) => Ok(()), + Err(ctx::Error::Internal(err)) => Err(err), + } + } +} + +#[async_trait::async_trait] +impl storage::PersistentBlockStore for Store { + async fn genesis(&self, ctx: &ctx::Ctx) -> ctx::Result { + Ok(self + .conn(ctx) + .await? + .genesis(ctx) + .await? + .context("not found")?) + } + + fn persisted(&self) -> sync::watch::Receiver { + self.blocks_persisted.clone() + } + + async fn block( + &self, + ctx: &ctx::Ctx, + number: validator::BlockNumber, + ) -> ctx::Result { + Ok(self + .conn(ctx) + .await? + .block(ctx, number) + .await? + .context("not found")?) + } + + /// If actions queue is set (and the block has not been stored yet), + /// the block will be translated into a sequence of actions. + /// The received actions should be fed + /// to `ExternalIO`, so that `StateKeeper` will store the corresponding L2 block in the db. + /// + /// `store_next_block()` call will wait synchronously for the L2 block. + /// Once the L2 block is observed in storage, `store_next_block()` will store a cert for this + /// L2 block. + async fn queue_next_block( + &self, + ctx: &ctx::Ctx, + block: validator::FinalBlock, + ) -> ctx::Result<()> { + let mut payloads = sync::lock(ctx, &self.block_payloads).await?.into_async(); + if let Some(payloads) = &mut *payloads { + payloads + .send(to_fetched_block(block.number(), &block.payload).context("to_fetched_block")?) + .await + .context("payload_queue.send()")?; + } + self.block_certificates.send(block.justification); + Ok(()) + } +} + +#[async_trait::async_trait] +impl storage::ReplicaStore for Store { + async fn state(&self, ctx: &ctx::Ctx) -> ctx::Result { + self.conn(ctx) + .await? + .replica_state(ctx) + .await + .wrap("replica_state()") + } + + async fn set_state(&self, ctx: &ctx::Ctx, state: &storage::ReplicaState) -> ctx::Result<()> { + self.conn(ctx) + .await? + .set_replica_state(ctx, state) + .await + .wrap("set_replica_state()") + } +} + +#[async_trait::async_trait] +impl PayloadManager for Store { + /// Currently (for the main node) proposing is implemented as just converting an L2 block from db (without a cert) into a payload. + async fn propose( + &self, + ctx: &ctx::Ctx, + block_number: validator::BlockNumber, + ) -> ctx::Result { + const LARGE_PAYLOAD_SIZE: usize = 1 << 20; + let payload = self + .pool + .wait_for_payload(ctx, block_number) + .await + .wrap("wait_for_payload")?; + let encoded_payload = payload.encode(); + if encoded_payload.0.len() > LARGE_PAYLOAD_SIZE { + tracing::warn!( + "large payload ({}B) with {} transactions", + encoded_payload.0.len(), + payload.transactions.len() + ); + } + Ok(encoded_payload) + } + + /// Verify that `payload` is a correct proposal for the block `block_number`. + /// * for the main node it checks whether the same block is already present in storage. + /// * for the EN validator + /// * if the block with this number was already applied, it checks that it was the + /// same block. It should always be true, because main node is the only proposer and + /// to propose a different block a hard fork is needed. + /// * otherwise, EN attempts to apply the received block. If the block was incorrect + /// the statekeeper is expected to crash the whole EN. Otherwise OK is returned. + async fn verify( + &self, + ctx: &ctx::Ctx, + block_number: validator::BlockNumber, + payload: &validator::Payload, + ) -> ctx::Result<()> { + let mut payloads = sync::lock(ctx, &self.block_payloads).await?.into_async(); + if let Some(payloads) = &mut *payloads { + let block = to_fetched_block(block_number, payload).context("to_fetched_block")?; + let n = block.number; + payloads.send(block).await.context("payload_queue.send()")?; + // Wait for the block to be processed, without waiting for it to be stored. + // TODO(BFT-459): this is not ideal, because we don't check here whether the + // processed block is the same as `payload`. It will work correctly + // with the current implementation of EN, but we should make it more + // precise when block reverting support is implemented. + ctx.wait(payloads.sync_state.wait_for_local_block(n)) + .await?; + } else { + let want = self.pool.wait_for_payload(ctx, block_number).await?; + let got = Payload::decode(payload).context("Payload::decode(got)")?; + if got != want { + return Err( + anyhow::format_err!("unexpected payload: got {got:?} want {want:?}").into(), + ); + } + } + Ok(()) + } +} + +#[async_trait::async_trait] +impl storage::PersistentBatchStore for Store { + /// Range of batches persisted in storage. + fn persisted(&self) -> sync::watch::Receiver { + self.batches_persisted.clone() + } + + /// Get the earliest L1 batch number which has to be signed by attesters. + async fn earliest_batch_number_to_sign( + &self, + ctx: &ctx::Ctx, + ) -> ctx::Result> { + // This is the rough roadmap of how this logic will evolve: + // 1. Make best effort at gossiping and collecting votes; the `BatchVotes` in consensus only considers the last vote per attesters. + // Still, we can re-sign more than the last batch, anticipating step 2. + // 2. Ask the Main Node what is the earliest batch number that it still expects votes for (ie. what is the last submission + 1). + // 3. Change `BatchVotes` to handle multiple pending batch numbers, anticipating that batch intervals might decrease dramatically. + // 4. Once QC is required to submit to L1, Look at L1 to figure out what is the last submission, and sign after that. + + // Originally this method returned all unsigned batch numbers by doing a DAL query, but we decided it should be okay and cheap + // to resend signatures for already signed batches, and we don't have to worry about skipping them. Because of that, we also + // didn't think it makes sense to query the database for the earliest unsigned batch *after* the submission, because we might + // as well just re-sign everything. Until we have a way to argue about the "last submission" we just re-sign the last 10 to + // try to produce as many QCs as the voting register allows, within reason. + + // The latest decision is not to store batches with gaps between in the database *of the main node*. + // Once we have an API to serve to external nodes the earliest number the main node wants them to sign, + // we can get rid of this method: on the main node we can sign from what `last_batch_qc` returns, and + // while external nodes we can go from whatever the API returned. + + const NUM_BATCHES_TO_SIGN: u64 = 10; + + let Some(last_batch_number) = self + .conn(ctx) + .await? + .get_last_batch_number(ctx) + .await + .wrap("get_last_batch_number")? + else { + return Ok(None); + }; + + Ok(Some(attester::BatchNumber( + last_batch_number.0.saturating_sub(NUM_BATCHES_TO_SIGN), + ))) + } + + /// Get the L1 batch QC from storage with the highest number. + /// + /// This might have gaps before it. Until there is a way to catch up with missing + /// certificates by fetching from the main node, returning the last inserted one + /// is the best we can do. + async fn last_batch_qc(&self, ctx: &ctx::Ctx) -> ctx::Result> { + let Some(number) = self + .conn(ctx) + .await? + .get_last_batch_certificate_number(ctx) + .await + .wrap("get_last_batch_certificate_number")? + else { + return Ok(None); + }; + + self.get_batch_qc(ctx, number).await + } + + /// Returns the batch with the given number. + async fn get_batch( + &self, + ctx: &ctx::Ctx, + number: attester::BatchNumber, + ) -> ctx::Result> { + self.conn(ctx) + .await? + .get_batch(ctx, number) + .await + .wrap("get_batch") + } + + /// Returns the [attester::Batch] with the given number, which is the `message` that + /// appears in [attester::BatchQC], and represents the content that needs to be signed + /// by the attesters. + async fn get_batch_to_sign( + &self, + ctx: &ctx::Ctx, + number: attester::BatchNumber, + ) -> ctx::Result> { + let Some(batch) = self + .conn(ctx) + .await? + .batch( + ctx, + L1BatchNumber(u32::try_from(number.0).context("number")?), + ) + .await + .wrap("batch")? + else { + return Ok(None); + }; + + let info = StoredBatchInfo::from(&batch); + let hash = Keccak256::from_bytes(info.hash().0); + + Ok(Some(attester::Batch { + number, + hash: attester::BatchHash(hash), + })) + } + + /// Returns the QC of the batch with the given number. + async fn get_batch_qc( + &self, + ctx: &ctx::Ctx, + number: attester::BatchNumber, + ) -> ctx::Result> { + self.conn(ctx) + .await? + .batch_certificate(ctx, number) + .await + .wrap("batch_certificate") + } + + /// Store the given QC in the storage. + /// + /// Storing a QC is allowed even if it creates a gap in the L1 batch history. + /// If we need the last batch QC that still needs to be signed then the queries need to look for gaps. + async fn store_qc(&self, _ctx: &ctx::Ctx, qc: attester::BatchQC) -> ctx::Result<()> { + // Storing asynchronously because we might get the QC before the L1 batch itself. + self.batch_certificates.send(qc); + Ok(()) + } + + /// Queue the batch to be persisted in storage. + /// + /// The caller [BatchStore] ensures that this is only called when the batch is the next expected one. + async fn queue_next_batch( + &self, + _ctx: &ctx::Ctx, + _batch: attester::SyncBatch, + ) -> ctx::Result<()> { + // Currently the gossiping of `SyncBatch` and the `BatchStoreState` is unconditionally started by the `Network::run_stream` in consensus, + // and as long as any node reports new batches available by updating the `PersistentBatchStore::persisted` here, the other nodes + // will start pulling the corresponding batches, which will end up being passed to this method. + // If we return an error here or panic, it will stop the whole consensus task tree due to the way scopes work, so instead just return immediately. + // In the future we have to validate the proof agains the L1 state root hash, which IIUC we can't do just yet. + + // Err(anyhow::format_err!("unimplemented: queue_next_batch should not be called until we have the stateless L1 batch story completed.").into()) + + Ok(()) + } +} diff --git a/core/node/consensus/src/storage/testonly.rs b/core/node/consensus/src/storage/testonly.rs index f5f30021b7c4..c73d20982c16 100644 --- a/core/node/consensus/src/storage/testonly.rs +++ b/core/node/consensus/src/storage/testonly.rs @@ -4,6 +4,7 @@ use anyhow::Context as _; use zksync_concurrency::{ctx, error::Wrap as _, time}; use zksync_consensus_roles::validator; use zksync_contracts::BaseSystemContracts; +use zksync_dal::CoreDal as _; use zksync_node_genesis::{insert_genesis_batch, mock_genesis_config, GenesisParams}; use zksync_node_test_utils::{recover, snapshot, Snapshot}; use zksync_types::{ @@ -34,10 +35,10 @@ impl ConnectionPool { ) -> ConnectionPool { match from_snapshot { true => { - ConnectionPool::from_snapshot(Snapshot::make( + ConnectionPool::from_snapshot(Snapshot::new( L1BatchNumber(23), L2BlockNumber(87), - &[], + vec![], mock_genesis_params(protocol_version), )) .await @@ -47,7 +48,7 @@ impl ConnectionPool { } /// Waits for the `number` L2 block to have a certificate. - pub async fn wait_for_certificate( + pub async fn wait_for_block_certificate( &self, ctx: &ctx::Ctx, number: validator::BlockNumber, @@ -57,9 +58,9 @@ impl ConnectionPool { .connection(ctx) .await .wrap("connection()")? - .certificate(ctx, number) + .block_certificate(ctx, number) .await - .wrap("certificate()")? + .wrap("block_certificate()")? .is_none() { ctx.sleep(POLL_INTERVAL).await?; @@ -118,26 +119,20 @@ impl ConnectionPool { } /// Waits for `want_last` block to have certificate then fetches all L2 blocks with certificates. - pub async fn wait_for_certificates( + pub async fn wait_for_block_certificates( &self, ctx: &ctx::Ctx, want_last: validator::BlockNumber, ) -> ctx::Result> { - self.wait_for_certificate(ctx, want_last).await?; + self.wait_for_block_certificate(ctx, want_last).await?; let mut conn = self.connection(ctx).await.wrap("connection()")?; - let last_cert = conn - .last_certificate(ctx) + let range = conn + .block_certificates_range(ctx) .await - .wrap("last_certificate()")? - .unwrap(); - let first_cert = conn - .first_certificate(ctx) - .await - .wrap("first_certificate()")? - .unwrap(); - assert_eq!(want_last, last_cert.header().number); + .wrap("certificates_range()")?; + assert_eq!(want_last.next(), range.next()); let mut blocks: Vec = vec![]; - for i in first_cert.header().number.0..=last_cert.header().number.0 { + for i in range.first.0..range.next().0 { let i = validator::BlockNumber(i); let block = conn.block(ctx, i).await.context("block()")?.unwrap(); blocks.push(block); @@ -146,12 +141,12 @@ impl ConnectionPool { } /// Same as `wait_for_certificates`, but additionally verifies all the blocks against genesis. - pub async fn wait_for_certificates_and_verify( + pub async fn wait_for_block_certificates_and_verify( &self, ctx: &ctx::Ctx, want_last: validator::BlockNumber, ) -> ctx::Result> { - let blocks = self.wait_for_certificates(ctx, want_last).await?; + let blocks = self.wait_for_block_certificates(ctx, want_last).await?; let genesis = self .connection(ctx) .await @@ -165,4 +160,32 @@ impl ConnectionPool { } Ok(blocks) } + + pub async fn prune_batches( + &self, + ctx: &ctx::Ctx, + last_batch: L1BatchNumber, + ) -> ctx::Result<()> { + let mut conn = self.connection(ctx).await.context("connection()")?; + let (_, last_block) = ctx + .wait( + conn.0 + .blocks_dal() + .get_l2_block_range_of_l1_batch(last_batch), + ) + .await? + .context("get_l2_block_range_of_l1_batch()")? + .context("batch not found")?; + conn.0 + .pruning_dal() + .soft_prune_batches_range(last_batch, last_block) + .await + .context("soft_prune_batches_range()")?; + conn.0 + .pruning_dal() + .hard_prune_batches_range(last_batch, last_block) + .await + .context("hard_prune_batches_range()")?; + Ok(()) + } } diff --git a/core/node/consensus/src/testonly.rs b/core/node/consensus/src/testonly.rs index ce16efed2225..a2009d14dece 100644 --- a/core/node/consensus/src/testonly.rs +++ b/core/node/consensus/src/testonly.rs @@ -7,7 +7,7 @@ use zksync_concurrency::{ctx, error::Wrap as _, scope, sync, time}; use zksync_config::{ configs, configs::{ - chain::OperationsManagerConfig, + chain::{OperationsManagerConfig, StateKeeperConfig}, consensus as config, database::{MerkleTreeConfig, MerkleTreeMode}, }, @@ -49,7 +49,8 @@ use zksync_web3_decl::client::{Client, DynClient, L2}; use crate::{ batch::{L1BatchCommit, L1BatchWithWitness, LastBlockCommit}, - en, ConnectionPool, + en, + storage::ConnectionPool, }; /// Fake StateKeeper for tests. @@ -78,6 +79,7 @@ pub(super) fn config(cfg: &network::Config) -> (config::ConsensusConfig, config: server_addr: *cfg.server_addr, public_addr: config::Host(cfg.public_addr.0.clone()), max_payload_size: usize::MAX, + max_batch_size: usize::MAX, gossip_dynamic_inbound_limit: cfg.gossip.dynamic_inbound_limit, gossip_static_inbound: cfg .gossip @@ -98,8 +100,15 @@ pub(super) fn config(cfg: &network::Config) -> (config::ConsensusConfig, config: key: config::ValidatorPublicKey(key.public().encode()), weight: 1, }], + // We only have access to the main node attester key in the `cfg`, which is fine + // for validators because at the moment there is only one leader. It doesn't + // allow us to form a full attester committee. However in the current tests + // the `new_configs` used to produce the array of `network::Config` doesn't + // assign an attester key, so it doesn't matter. + attesters: Vec::new(), leader: config::ValidatorPublicKey(key.public().encode()), }), + rpc: None, }, config::ConsensusSecrets { node_key: Some(config::NodeSecretKey(cfg.gossip.key.encode().into())), @@ -107,6 +116,10 @@ pub(super) fn config(cfg: &network::Config) -> (config::ConsensusConfig, config: .validator_key .as_ref() .map(|k| config::ValidatorSecretKey(k.encode().into())), + attester_key: cfg + .attester_key + .as_ref() + .map(|k| config::AttesterSecretKey(k.encode().into())), }, ) } @@ -166,8 +179,15 @@ impl StateKeeper { let operation_manager_config = OperationsManagerConfig { delay_interval: 100, //`100ms` }; - let config = - MetadataCalculatorConfig::for_main_node(&merkle_tree_config, &operation_manager_config); + let state_keeper_config = StateKeeperConfig { + protective_reads_persistence_enabled: true, + ..Default::default() + }; + let config = MetadataCalculatorConfig::for_main_node( + &merkle_tree_config, + &operation_manager_config, + &state_keeper_config, + ); let metadata_calculator = MetadataCalculator::new(config, None, pool.0.clone()) .await .context("MetadataCalculator::new()")?; @@ -251,7 +271,7 @@ impl StateKeeper { actions.push(FetchedTransaction::new(tx).into()); } actions.push(SyncAction::SealL2Block); - self.actions_sender.push_actions(actions).await; + self.actions_sender.push_actions(actions).await.unwrap(); } /// Pushes `SealBatch` command to the `StateKeeper`. @@ -259,7 +279,7 @@ impl StateKeeper { // Each batch ends with an empty block (aka fictive block). let mut actions = vec![self.open_block()]; actions.push(SyncAction::SealBatch); - self.actions_sender.push_actions(actions).await; + self.actions_sender.push_actions(actions).await.unwrap(); self.batch_sealed = true; } @@ -474,8 +494,7 @@ impl StateKeeperRunner { self.actions_queue, Box::::default(), L2ChainId::default(), - ) - .await?; + )?; s.spawn_bg(async { Ok(l2_block_sealer @@ -587,8 +606,7 @@ impl StateKeeperRunner { self.actions_queue, Box::::default(), L2ChainId::default(), - ) - .await?; + )?; s.spawn_bg(async { Ok(l2_block_sealer .run() diff --git a/core/node/consensus/src/tests.rs b/core/node/consensus/src/tests.rs index b16c66e478bb..7d269376b65c 100644 --- a/core/node/consensus/src/tests.rs +++ b/core/node/consensus/src/tests.rs @@ -1,8 +1,7 @@ -#![allow(unused)] use anyhow::Context as _; use test_casing::{test_casing, Product}; use tracing::Instrument as _; -use zksync_concurrency::{ctx, scope}; +use zksync_concurrency::{ctx, error::Wrap, scope}; use zksync_config::configs::consensus::{ValidatorPublicKey, WeightedValidator}; use zksync_consensus_crypto::TextFmt as _; use zksync_consensus_network::testonly::{new_configs, new_fullnode}; @@ -10,17 +9,20 @@ use zksync_consensus_roles::{ validator, validator::testonly::{Setup, SetupSpec}, }; -use zksync_dal::CoreDal; -use zksync_node_test_utils::Snapshot; -use zksync_types::{L1BatchNumber, L2BlockNumber, ProtocolVersionId}; +use zksync_consensus_storage::BlockStore; +use zksync_types::{L1BatchNumber, ProtocolVersionId}; -use super::*; +use crate::{ + mn::run_main_node, + storage::{ConnectionPool, Store}, + testonly, +}; const VERSIONS: [ProtocolVersionId; 2] = [ProtocolVersionId::latest(), ProtocolVersionId::next()]; const FROM_SNAPSHOT: [bool; 2] = [true, false]; #[test_casing(2, VERSIONS)] -#[tokio::test(flavor = "multi_thread")] +#[tokio::test] async fn test_validator_block_store(version: ProtocolVersionId) { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); @@ -71,7 +73,7 @@ async fn test_validator_block_store(version: ProtocolVersionId) { .await .unwrap(); let got = pool - .wait_for_certificates(ctx, block.number()) + .wait_for_block_certificates(ctx, block.number()) .await .unwrap(); assert_eq!(want[..=i], got); @@ -82,11 +84,81 @@ async fn test_validator_block_store(version: ProtocolVersionId) { } } +#[test_casing(4, Product((FROM_SNAPSHOT,VERSIONS)))] +#[tokio::test] +async fn test_connection_get_batch(from_snapshot: bool, version: ProtocolVersionId) { + zksync_concurrency::testonly::abort_on_panic(); + let ctx = &ctx::test_root(&ctx::RealClock); + let rng = &mut ctx.rng(); + let pool = ConnectionPool::test(from_snapshot, version).await; + + // Fill storage with unsigned L2 blocks and L1 batches in a way that the + // last L1 batch is guaranteed to have some L2 blocks executed in it. + scope::run!(ctx, |ctx, s| async { + // Start state keeper. + let (mut sk, runner) = testonly::StateKeeper::new(ctx, pool.clone()).await?; + s.spawn_bg(runner.run(ctx)); + + for _ in 0..3 { + for _ in 0..2 { + sk.push_random_block(rng).await; + } + sk.seal_batch().await; + } + sk.push_random_block(rng).await; + + pool.wait_for_payload(ctx, sk.last_block()).await?; + + Ok(()) + }) + .await + .unwrap(); + + // Now we can try to retrieve the batch. + scope::run!(ctx, |ctx, _s| async { + let mut conn = pool.connection(ctx).await?; + let batches = conn.batches_range(ctx).await?; + let last = batches.last.expect("last is set"); + let (min, max) = conn + .get_l2_block_range_of_l1_batch(ctx, last) + .await? + .unwrap(); + + let last_batch = conn + .get_batch(ctx, last) + .await? + .expect("last batch can be retrieved"); + + assert_eq!( + last_batch.payloads.len(), + (max.0 - min.0) as usize, + "all block payloads present" + ); + + let first_payload = last_batch + .payloads + .first() + .expect("last batch has payloads"); + + let want_payload = conn.payload(ctx, min).await?.expect("payload is in the DB"); + let want_payload = want_payload.encode(); + + assert_eq!( + first_payload, &want_payload, + "first payload is the right number" + ); + + anyhow::Ok(()) + }) + .await + .unwrap(); +} + // In the current implementation, consensus certificates are created asynchronously // for the L2 blocks constructed by the StateKeeper. This means that consensus actor // is effectively just back filling the consensus certificates for the L2 blocks in storage. #[test_casing(4, Product((FROM_SNAPSHOT,VERSIONS)))] -#[tokio::test(flavor = "multi_thread")] +#[tokio::test] async fn test_validator(from_snapshot: bool, version: ProtocolVersionId) { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); @@ -119,24 +191,24 @@ async fn test_validator(from_snapshot: bool, version: ProtocolVersionId) { tracing::info!("Generate couple more blocks and wait for consensus to catch up."); sk.push_random_blocks(rng, 3).await; pool - .wait_for_certificate(ctx, sk.last_block()) + .wait_for_block_certificate(ctx, sk.last_block()) .await - .context("wait_for_certificate(<2nd phase>)")?; + .context("wait_for_block_certificate(<2nd phase>)")?; tracing::info!("Synchronously produce blocks one by one, and wait for consensus."); for _ in 0..2 { sk.push_random_blocks(rng, 1).await; pool - .wait_for_certificate(ctx, sk.last_block()) + .wait_for_block_certificate(ctx, sk.last_block()) .await - .context("wait_for_certificate(<3rd phase>)")?; + .context("wait_for_block_certificate(<3rd phase>)")?; } tracing::info!("Verify all certificates"); pool - .wait_for_certificates_and_verify(ctx, sk.last_block()) + .wait_for_block_certificates_and_verify(ctx, sk.last_block()) .await - .context("wait_for_certificates_and_verify()")?; + .context("wait_for_block_certificates_and_verify()")?; Ok(()) }) .await @@ -150,7 +222,7 @@ async fn test_validator(from_snapshot: bool, version: ProtocolVersionId) { // Test running a validator node and 2 full nodes recovered from different snapshots. #[test_casing(2, VERSIONS)] -#[tokio::test(flavor = "multi_thread")] +#[tokio::test] async fn test_nodes_from_various_snapshots(version: ProtocolVersionId) { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); @@ -171,7 +243,7 @@ async fn test_nodes_from_various_snapshots(version: ProtocolVersionId) { validator.push_random_blocks(rng, 5).await; validator.seal_batch().await; validator_pool - .wait_for_certificate(ctx, validator.last_block()) + .wait_for_block_certificate(ctx, validator.last_block()) .await?; tracing::info!("take snapshot and start a node from it"); @@ -189,7 +261,7 @@ async fn test_nodes_from_various_snapshots(version: ProtocolVersionId) { validator.push_random_blocks(rng, 5).await; validator.seal_batch().await; node_pool - .wait_for_certificate(ctx, validator.last_block()) + .wait_for_block_certificate(ctx, validator.last_block()) .await?; tracing::info!("take another snapshot and start a node from it"); @@ -206,15 +278,15 @@ async fn test_nodes_from_various_snapshots(version: ProtocolVersionId) { tracing::info!("produce more blocks and compare storages"); validator.push_random_blocks(rng, 5).await; let want = validator_pool - .wait_for_certificates_and_verify(ctx, validator.last_block()) + .wait_for_block_certificates_and_verify(ctx, validator.last_block()) .await?; // node stores should be suffixes for validator store. for got in [ node_pool - .wait_for_certificates_and_verify(ctx, validator.last_block()) + .wait_for_block_certificates_and_verify(ctx, validator.last_block()) .await?, node_pool2 - .wait_for_certificates_and_verify(ctx, validator.last_block()) + .wait_for_block_certificates_and_verify(ctx, validator.last_block()) .await?, ] { assert_eq!(want[want.len() - got.len()..], got[..]); @@ -229,7 +301,7 @@ async fn test_nodes_from_various_snapshots(version: ProtocolVersionId) { // Validator is producing signed blocks and fetchers are expected to fetch // them directly or indirectly. #[test_casing(4, Product((FROM_SNAPSHOT,VERSIONS)))] -#[tokio::test(flavor = "multi_thread")] +#[tokio::test] async fn test_full_nodes(from_snapshot: bool, version: ProtocolVersionId) { const NODES: usize = 2; @@ -296,12 +368,12 @@ async fn test_full_nodes(from_snapshot: bool, version: ProtocolVersionId) { validator.push_random_blocks(rng, 5).await; let want_last = validator.last_block(); let want = validator_pool - .wait_for_certificates_and_verify(ctx, want_last) + .wait_for_block_certificates_and_verify(ctx, want_last) .await?; for pool in &node_pools { assert_eq!( want, - pool.wait_for_certificates_and_verify(ctx, want_last) + pool.wait_for_block_certificates_and_verify(ctx, want_last) .await? ); } @@ -313,7 +385,7 @@ async fn test_full_nodes(from_snapshot: bool, version: ProtocolVersionId) { // Test running external node (non-leader) validators. #[test_casing(4, Product((FROM_SNAPSHOT,VERSIONS)))] -#[tokio::test(flavor = "multi_thread")] +#[tokio::test] async fn test_en_validators(from_snapshot: bool, version: ProtocolVersionId) { const NODES: usize = 3; @@ -351,7 +423,7 @@ async fn test_en_validators(from_snapshot: bool, version: ProtocolVersionId) { tracing::info!("Run main node with all nodes being validators."); let (mut cfg, secrets) = testonly::config(&cfgs[0]); cfg.genesis_spec.as_mut().unwrap().validators = setup - .keys + .validator_keys .iter() .map(|k| WeightedValidator { key: ValidatorPublicKey(k.public().encode()), @@ -382,12 +454,12 @@ async fn test_en_validators(from_snapshot: bool, version: ProtocolVersionId) { main_node.push_random_blocks(rng, 5).await; let want_last = main_node.last_block(); let want = main_node_pool - .wait_for_certificates_and_verify(ctx, want_last) + .wait_for_block_certificates_and_verify(ctx, want_last) .await?; for pool in &ext_node_pools { assert_eq!( want, - pool.wait_for_certificates_and_verify(ctx, want_last) + pool.wait_for_block_certificates_and_verify(ctx, want_last) .await? ); } @@ -399,7 +471,7 @@ async fn test_en_validators(from_snapshot: bool, version: ProtocolVersionId) { // Test fetcher back filling missing certs. #[test_casing(4, Product((FROM_SNAPSHOT,VERSIONS)))] -#[tokio::test(flavor = "multi_thread")] +#[tokio::test] async fn test_p2p_fetcher_backfill_certs(from_snapshot: bool, version: ProtocolVersionId) { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); @@ -429,7 +501,7 @@ async fn test_p2p_fetcher_backfill_certs(from_snapshot: bool, version: ProtocolV s.spawn_bg(node.run_consensus(ctx, client.clone(), &node_cfg)); validator.push_random_blocks(rng, 3).await; node_pool - .wait_for_certificate(ctx, validator.last_block()) + .wait_for_block_certificate(ctx, validator.last_block()) .await?; Ok(()) }) @@ -457,10 +529,10 @@ async fn test_p2p_fetcher_backfill_certs(from_snapshot: bool, version: ProtocolV s.spawn_bg(node.run_consensus(ctx, client.clone(), &node_cfg)); validator.push_random_blocks(rng, 3).await; let want = validator_pool - .wait_for_certificates_and_verify(ctx, validator.last_block()) + .wait_for_block_certificates_and_verify(ctx, validator.last_block()) .await?; let got = node_pool - .wait_for_certificates_and_verify(ctx, validator.last_block()) + .wait_for_block_certificates_and_verify(ctx, validator.last_block()) .await?; assert_eq!(want, got); Ok(()) @@ -473,6 +545,91 @@ async fn test_p2p_fetcher_backfill_certs(from_snapshot: bool, version: ProtocolV .unwrap(); } +#[test_casing(2, VERSIONS)] +#[tokio::test] +async fn test_with_pruning(version: ProtocolVersionId) { + zksync_concurrency::testonly::abort_on_panic(); + let ctx = &ctx::test_root(&ctx::RealClock); + let rng = &mut ctx.rng(); + let setup = Setup::new(rng, 1); + let validator_cfg = new_configs(rng, &setup, 0)[0].clone(); + let node_cfg = new_fullnode(rng, &validator_cfg); + + scope::run!(ctx, |ctx, s| async { + let validator_pool = ConnectionPool::test(false, version).await; + let (mut validator, runner) = + testonly::StateKeeper::new(ctx, validator_pool.clone()).await?; + s.spawn_bg(async { + runner + .run(ctx) + .instrument(tracing::info_span!("validator")) + .await + .context("validator") + }); + tracing::info!("Run validator."); + let (cfg, secrets) = testonly::config(&validator_cfg); + s.spawn_bg({ + let validator_pool = validator_pool.clone(); + async { + run_main_node(ctx, cfg, secrets, validator_pool) + .await + .context("run_main_node()") + } + }); + // TODO: ensure at least L1 batch in `testonly::StateKeeper::new()` to make it fool proof. + validator.seal_batch().await; + + tracing::info!("Run node."); + let node_pool = ConnectionPool::test(false, version).await; + let (node, runner) = testonly::StateKeeper::new(ctx, node_pool.clone()).await?; + s.spawn_bg(async { + runner + .run(ctx) + .instrument(tracing::info_span!("node")) + .await + .context("node") + }); + let conn = validator.connect(ctx).await?; + s.spawn_bg(async { + node.run_consensus(ctx, conn, &node_cfg) + .await + .context("run_consensus()") + }); + + tracing::info!("Sync some blocks"); + validator.push_random_blocks(rng, 5).await; + validator.seal_batch().await; + let to_prune = validator.last_sealed_batch(); + tracing::info!( + "to_prune = batch {}; block {}", + to_prune, + validator.last_block() + ); + tracing::info!( + "Seal another batch to make sure that there is at least 1 sealed batch after pruning." + ); + validator.push_random_blocks(rng, 5).await; + validator.seal_batch().await; + validator_pool + .wait_for_batch(ctx, validator.last_sealed_batch()) + .await?; + + tracing::info!("Prune some blocks and sync more"); + validator_pool + .prune_batches(ctx, to_prune) + .await + .context("prune_batches")?; + validator.push_random_blocks(rng, 5).await; + node_pool + .wait_for_block_certificates(ctx, validator.last_block()) + .await + .context("wait_for_block_certificates()")?; + Ok(()) + }) + .await + .unwrap(); +} + #[test_casing(4, Product((FROM_SNAPSHOT,VERSIONS)))] #[tokio::test] async fn test_centralized_fetcher(from_snapshot: bool, version: ProtocolVersionId) { diff --git a/core/node/consistency_checker/Cargo.toml b/core/node/consistency_checker/Cargo.toml index 41fe90fabe27..769690b493a4 100644 --- a/core/node/consistency_checker/Cargo.toml +++ b/core/node/consistency_checker/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "zksync_consistency_checker" -version = "0.1.0" +description = "Consistency checker for ZKsync network" +version.workspace = true edition.workspace = true authors.workspace = true homepage.workspace = true diff --git a/core/node/consistency_checker/src/lib.rs b/core/node/consistency_checker/src/lib.rs index e4634c86e403..ba8085333a4c 100644 --- a/core/node/consistency_checker/src/lib.rs +++ b/core/node/consistency_checker/src/lib.rs @@ -262,6 +262,7 @@ pub fn detect_da( /// These are used by the L1 Contracts to indicate what DA layer is used for pubdata const PUBDATA_SOURCE_CALLDATA: u8 = 0; const PUBDATA_SOURCE_BLOBS: u8 = 1; + const PUBDATA_SOURCE_CUSTOM: u8 = 2; fn parse_error(message: impl Into>) -> ethabi::Error { ethabi::Error::Other(message.into()) @@ -292,6 +293,7 @@ pub fn detect_da( match last_reference_token.first() { Some(&byte) if byte == PUBDATA_SOURCE_CALLDATA => Ok(PubdataDA::Calldata), Some(&byte) if byte == PUBDATA_SOURCE_BLOBS => Ok(PubdataDA::Blobs), + Some(&byte) if byte == PUBDATA_SOURCE_CUSTOM => Ok(PubdataDA::Custom), Some(&byte) => Err(parse_error(format!( "unexpected first byte of the last reference token; expected one of [{PUBDATA_SOURCE_CALLDATA}, {PUBDATA_SOURCE_BLOBS}], \ got {byte}" diff --git a/core/node/consistency_checker/src/tests/mod.rs b/core/node/consistency_checker/src/tests/mod.rs index 853090b1907d..13c1caec381a 100644 --- a/core/node/consistency_checker/src/tests/mod.rs +++ b/core/node/consistency_checker/src/tests/mod.rs @@ -382,6 +382,7 @@ fn l1_batch_commit_log(l1_batch: &L1BatchWithMetadata) -> Log { transaction_log_index: None, log_type: Some("mined".into()), removed: None, + block_timestamp: None, } } diff --git a/core/node/contract_verification_server/Cargo.toml b/core/node/contract_verification_server/Cargo.toml index ee38d30906fb..eeb2c7828467 100644 --- a/core/node/contract_verification_server/Cargo.toml +++ b/core/node/contract_verification_server/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "zksync_contract_verification_server" -version = "0.1.0" +description = "ZKsync contract verification server" +version.workspace = true edition.workspace = true authors.workspace = true homepage.workspace = true @@ -18,7 +19,7 @@ vise.workspace = true anyhow.workspace = true axum.workspace = true tokio = { workspace = true, features = ["time"] } -tower-http.workspace = true +tower-http = { workspace = true, features = ["cors"] } tracing.workspace = true serde.workspace = true serde_json.workspace = true diff --git a/core/node/contract_verification_server/src/lib.rs b/core/node/contract_verification_server/src/lib.rs index 83a53cfc98f3..eea45f8564bf 100644 --- a/core/node/contract_verification_server/src/lib.rs +++ b/core/node/contract_verification_server/src/lib.rs @@ -18,8 +18,10 @@ pub async fn start_server( let bind_address = config.bind_addr(); let api = RestApi::new(master_connection_pool, replica_connection_pool).into_router(); - axum::Server::bind(&bind_address) - .serve(api.into_make_service()) + let listener = tokio::net::TcpListener::bind(bind_address) + .await + .context("Cannot bind to the specified address")?; + axum::serve(listener, api) .with_graceful_shutdown(async move { if stop_receiver.changed().await.is_err() { tracing::warn!("Stop signal sender for contract verification server was dropped without sending a signal"); diff --git a/core/node/da_dispatcher/Cargo.toml b/core/node/da_dispatcher/Cargo.toml new file mode 100644 index 000000000000..8a10d6813a5a --- /dev/null +++ b/core/node/da_dispatcher/Cargo.toml @@ -0,0 +1,27 @@ +[package] +name = "zksync_da_dispatcher" +description = "ZKsync data availability dispatcher" +version.workspace = true +edition.workspace = true +authors.workspace = true +homepage.workspace = true +repository.workspace = true +license.workspace = true +keywords.workspace = true +categories.workspace = true + + +[dependencies] +vise.workspace = true +zksync_dal.workspace = true +zksync_utils.workspace = true +zksync_config.workspace = true +zksync_types.workspace = true +zksync_da_client.workspace = true + +tokio = { workspace = true, features = ["time"] } +anyhow.workspace = true +tracing.workspace = true +chrono.workspace = true +rand.workspace = true +futures.workspace = true diff --git a/core/node/da_dispatcher/README.md b/core/node/da_dispatcher/README.md new file mode 100644 index 000000000000..a7ea6351a5ed --- /dev/null +++ b/core/node/da_dispatcher/README.md @@ -0,0 +1,18 @@ +# DA dispatcher + +This crate contains an implementation of the DataAvailability dispatcher component, which sends a blobs of data to the +corresponding DA layer. + +## Overview + +The implementation of the DA clients is abstracted away from the dispatcher. The dispatcher is responsible for storing +the DA blobs info in the Postgres database and use it to get the inclusion proofs for the blobs. The retries logic is +also part of the DA dispatcher. + +This component assumes that batches are being sent to the L1 sequentially and that there is no need to fetch the +inclusion data for their DA in parallel. Same with dispatching DA blobs, there is no need to do that in parallel unless +we are facing performance issues when the sequencer is trying to catch up after some outage. + +This is a singleton component, only one instance of the DA dispatcher should be running at a time. In case multiple +instances are started, they will be dispatching the same pubdata blobs to the DA layer. It is not going to cause any +critical issues, but it is wasteful. diff --git a/core/node/da_dispatcher/src/da_dispatcher.rs b/core/node/da_dispatcher/src/da_dispatcher.rs new file mode 100644 index 000000000000..80c030dff338 --- /dev/null +++ b/core/node/da_dispatcher/src/da_dispatcher.rs @@ -0,0 +1,211 @@ +use std::{future::Future, time::Duration}; + +use anyhow::Context; +use chrono::{NaiveDateTime, Utc}; +use rand::Rng; +use tokio::sync::watch::Receiver; +use zksync_config::DADispatcherConfig; +use zksync_da_client::{types::DAError, DataAvailabilityClient}; +use zksync_dal::{ConnectionPool, Core, CoreDal}; +use zksync_types::L1BatchNumber; + +use crate::metrics::METRICS; + +#[derive(Debug)] +pub struct DataAvailabilityDispatcher { + client: Box, + pool: ConnectionPool, + config: DADispatcherConfig, +} + +impl DataAvailabilityDispatcher { + pub fn new( + pool: ConnectionPool, + config: DADispatcherConfig, + client: Box, + ) -> Self { + Self { + pool, + config, + client, + } + } + + pub async fn run(self, mut stop_receiver: Receiver) -> anyhow::Result<()> { + loop { + if *stop_receiver.borrow() { + break; + } + + let subtasks = futures::future::join( + async { + if let Err(err) = self.dispatch().await { + tracing::error!("dispatch error {err:?}"); + } + }, + async { + if let Err(err) = self.poll_for_inclusion().await { + tracing::error!("poll_for_inclusion error {err:?}"); + } + }, + ); + + tokio::select! { + _ = subtasks => {}, + _ = stop_receiver.changed() => { + break; + } + } + + if tokio::time::timeout(self.config.polling_interval(), stop_receiver.changed()) + .await + .is_ok() + { + break; + } + } + + tracing::info!("Stop signal received, da_dispatcher is shutting down"); + Ok(()) + } + + /// Dispatches the blobs to the data availability layer, and saves the blob_id in the database. + async fn dispatch(&self) -> anyhow::Result<()> { + let mut conn = self.pool.connection_tagged("da_dispatcher").await?; + let batches = conn + .data_availability_dal() + .get_ready_for_da_dispatch_l1_batches(self.config.max_rows_to_dispatch() as usize) + .await?; + drop(conn); + + for batch in batches { + let dispatch_latency = METRICS.blob_dispatch_latency.start(); + let dispatch_response = retry(self.config.max_retries(), batch.l1_batch_number, || { + self.client + .dispatch_blob(batch.l1_batch_number.0, batch.pubdata.clone()) + }) + .await + .with_context(|| { + format!( + "failed to dispatch a blob with batch_number: {}, pubdata_len: {}", + batch.l1_batch_number, + batch.pubdata.len() + ) + })?; + let dispatch_latency_duration = dispatch_latency.observe(); + + let sent_at = + NaiveDateTime::from_timestamp_millis(Utc::now().timestamp_millis()).unwrap(); + + let mut conn = self.pool.connection_tagged("da_dispatcher").await?; + conn.data_availability_dal() + .insert_l1_batch_da( + batch.l1_batch_number, + dispatch_response.blob_id.as_str(), + sent_at, + ) + .await?; + drop(conn); + + METRICS + .last_dispatched_l1_batch + .set(batch.l1_batch_number.0 as usize); + METRICS.blob_size.observe(batch.pubdata.len()); + tracing::info!( + "Dispatched a DA for batch_number: {}, pubdata_size: {}, dispatch_latency: {dispatch_latency_duration:?}", + batch.l1_batch_number, + batch.pubdata.len(), + ); + } + + Ok(()) + } + + /// Polls the data availability layer for inclusion data, and saves it in the database. + async fn poll_for_inclusion(&self) -> anyhow::Result<()> { + let mut conn = self.pool.connection_tagged("da_dispatcher").await?; + let blob_info = conn + .data_availability_dal() + .get_first_da_blob_awaiting_inclusion() + .await?; + drop(conn); + + let Some(blob_info) = blob_info else { + return Ok(()); + }; + + let inclusion_data = self + .client + .get_inclusion_data(blob_info.blob_id.as_str()) + .await + .with_context(|| { + format!( + "failed to get inclusion data for blob_id: {}, batch_number: {}", + blob_info.blob_id, blob_info.l1_batch_number + ) + })?; + + let Some(inclusion_data) = inclusion_data else { + return Ok(()); + }; + + let mut conn = self.pool.connection_tagged("da_dispatcher").await?; + conn.data_availability_dal() + .save_l1_batch_inclusion_data( + L1BatchNumber(blob_info.l1_batch_number.0), + inclusion_data.data.as_slice(), + ) + .await?; + drop(conn); + + let inclusion_latency = Utc::now().signed_duration_since(blob_info.sent_at); + if let Ok(latency) = inclusion_latency.to_std() { + METRICS.inclusion_latency.observe(latency); + } + METRICS + .last_included_l1_batch + .set(blob_info.l1_batch_number.0 as usize); + + tracing::info!( + "Received an inclusion data for a batch_number: {}, inclusion_latency_seconds: {}", + blob_info.l1_batch_number, + inclusion_latency.num_seconds() + ); + + Ok(()) + } +} + +async fn retry( + max_retries: u16, + batch_number: L1BatchNumber, + mut f: F, +) -> Result +where + Fut: Future>, + F: FnMut() -> Fut, +{ + let mut retries = 1; + let mut backoff_secs = 1; + loop { + match f().await { + Ok(result) => { + METRICS.dispatch_call_retries.observe(retries as usize); + return Ok(result); + } + Err(err) => { + if !err.is_transient() || retries > max_retries { + return Err(err); + } + + retries += 1; + let sleep_duration = Duration::from_secs(backoff_secs) + .mul_f32(rand::thread_rng().gen_range(0.8..1.2)); + tracing::warn!(%err, "Failed DA dispatch request {retries}/{max_retries} for batch {batch_number}, retrying in {} milliseconds.", sleep_duration.as_millis()); + tokio::time::sleep(sleep_duration).await; + + backoff_secs = (backoff_secs * 2).min(128); // cap the back-off at 128 seconds + } + } + } +} diff --git a/core/node/da_dispatcher/src/lib.rs b/core/node/da_dispatcher/src/lib.rs new file mode 100644 index 000000000000..cb41ea1f7c25 --- /dev/null +++ b/core/node/da_dispatcher/src/lib.rs @@ -0,0 +1,4 @@ +pub use self::da_dispatcher::DataAvailabilityDispatcher; + +mod da_dispatcher; +mod metrics; diff --git a/core/node/da_dispatcher/src/metrics.rs b/core/node/da_dispatcher/src/metrics.rs new file mode 100644 index 000000000000..67ac5ed68222 --- /dev/null +++ b/core/node/da_dispatcher/src/metrics.rs @@ -0,0 +1,33 @@ +use std::time::Duration; + +use vise::{Buckets, Gauge, Histogram, Metrics, Unit}; + +/// Buckets for `blob_dispatch_latency` (from 0.1 to 120 seconds). +const DISPATCH_LATENCIES: Buckets = + Buckets::values(&[0.1, 0.5, 1.0, 2.0, 5.0, 10.0, 30.0, 60.0, 120.0]); + +#[derive(Debug, Metrics)] +#[metrics(prefix = "server_da_dispatcher")] +pub(super) struct DataAvailabilityDispatcherMetrics { + /// Latency of the dispatch of the blob. + #[metrics(buckets = DISPATCH_LATENCIES, unit = Unit::Seconds)] + pub blob_dispatch_latency: Histogram, + /// The duration between the moment when the blob is dispatched and the moment when it is included. + #[metrics(buckets = Buckets::LATENCIES)] + pub inclusion_latency: Histogram, + /// Size of the dispatched blob. + /// Buckets are bytes ranging from 1 KB to 16 MB, which has to satisfy all blob size values. + #[metrics(buckets = Buckets::exponential(1_024.0..=16.0 * 1_024.0 * 1_024.0, 2.0), unit = Unit::Bytes)] + pub blob_size: Histogram, + + /// Number of transactions resent by the DA dispatcher. + #[metrics(buckets = Buckets::linear(0.0..=10.0, 1.0))] + pub dispatch_call_retries: Histogram, + /// Last L1 batch that was dispatched to the DA layer. + pub last_dispatched_l1_batch: Gauge, + /// Last L1 batch that has its inclusion finalized by DA layer. + pub last_included_l1_batch: Gauge, +} + +#[vise::register] +pub(super) static METRICS: vise::Global = vise::Global::new(); diff --git a/core/node/db_pruner/Cargo.toml b/core/node/db_pruner/Cargo.toml index d56d9fb4df55..eb21e3e476db 100644 --- a/core/node/db_pruner/Cargo.toml +++ b/core/node/db_pruner/Cargo.toml @@ -1,5 +1,6 @@ [package] name = "zksync_node_db_pruner" +description = "ZKsync database pruner" version.workspace = true edition.workspace = true authors.workspace = true diff --git a/core/node/db_pruner/README.md b/core/node/db_pruner/README.md index 4ae0b848b3d1..ee1317d01e46 100644 --- a/core/node/db_pruner/README.md +++ b/core/node/db_pruner/README.md @@ -3,15 +3,20 @@ Database pruner is a component that regularly removes the oldest l1 batches from the database together with corresponding L2 blocks, events, etc. -**There are two types of objects that are not fully cleaned:** +There are two types of objects that are not fully cleaned: -**Transactions** - Transactions only have BYTEA fields cleaned as many of other components rely on transactions -existence. +- **Transactions** only have `BYTEA` fields cleaned as some components rely on transactions existence. +- **Storage logs:** only storage logs that have been overwritten are removed -**Storage logs** - We only remove storage logs that have been overwritten +## Pruning workflow -### Soft and Hard pruning +_(See [node docs](../../../docs/guides/external-node/08_pruning.md) for a high-level pruning overview)_ -There are two 'phases' of pruning an L1 batch, soft pruning and hard pruning. Every batch that would have it's records -removed if first soft pruned. Soft pruned batches can't safely be used. One minute (this is configurable) after soft -pruning, hard pruning is performed, where hard means physically removing those batches from the database +There are two phases of pruning an L1 batch, soft pruning and hard pruning. Every batch that would have its records +removed if first _soft-pruned_. Soft-pruned batches cannot safely be used. One minute (this is configurable) after soft +pruning, _hard pruning_ is performed, where hard means physically removing data from the database. + +The reasoning behind this split is to allow node components such as the API server to become aware of planned data +pruning, and restrict access to the pruned data in advance. This ensures that data does not unexpectedly (from the +component perspective) disappear from Postgres in a middle of an operation (like serving a Web3 request). At least in +some case, like in VM-related Web3 methods, we cannot rely on database transactions for this purpose. diff --git a/core/node/db_pruner/src/lib.rs b/core/node/db_pruner/src/lib.rs index 22a1e4453614..4b4a53c68aa0 100644 --- a/core/node/db_pruner/src/lib.rs +++ b/core/node/db_pruner/src/lib.rs @@ -1,6 +1,9 @@ //! Postgres pruning component. -use std::{sync::Arc, time::Duration}; +use std::{ + sync::Arc, + time::{Duration, Instant}, +}; use anyhow::Context as _; use serde::{Deserialize, Serialize}; @@ -10,7 +13,7 @@ use zksync_health_check::{Health, HealthStatus, HealthUpdater, ReactiveHealthChe use zksync_types::{L1BatchNumber, L2BlockNumber}; use self::{ - metrics::{MetricPruneType, METRICS}, + metrics::{ConditionOutcome, PruneType, METRICS}, prune_conditions::{ ConsistencyCheckerProcessedBatch, L1BatchExistsCondition, L1BatchOlderThanPruneCondition, NextL1BatchHasMetadataCondition, NextL1BatchWasExecutedCondition, PruneCondition, @@ -128,15 +131,24 @@ impl DbPruner { let mut errored_conditions = vec![]; for condition in &self.prune_conditions { - match condition.is_batch_prunable(l1_batch_number).await { - Ok(true) => successful_conditions.push(condition.to_string()), - Ok(false) => failed_conditions.push(condition.to_string()), + let outcome = match condition.is_batch_prunable(l1_batch_number).await { + Ok(true) => { + successful_conditions.push(condition.to_string()); + ConditionOutcome::Success + } + Ok(false) => { + failed_conditions.push(condition.to_string()); + ConditionOutcome::Fail + } Err(error) => { errored_conditions.push(condition.to_string()); tracing::warn!("Pruning condition '{condition}' resulted in an error: {error}"); + ConditionOutcome::Error } - } + }; + METRICS.observe_condition(condition.as_ref(), outcome); } + let result = failed_conditions.is_empty() && errored_conditions.is_empty(); if !result { tracing::debug!( @@ -172,7 +184,7 @@ impl DbPruner { } async fn soft_prune(&self, storage: &mut Connection<'_, Core>) -> anyhow::Result { - let latency = METRICS.pruning_chunk_duration[&MetricPruneType::Soft].start(); + let start = Instant::now(); let mut transaction = storage.start_transaction().await?; let mut current_pruning_info = transaction.pruning_dal().get_pruning_info().await?; @@ -184,7 +196,7 @@ impl DbPruner { + self.config.pruned_batch_chunk_size, ); if !self.is_l1_batch_prunable(next_l1_batch_to_prune).await { - latency.observe(); + METRICS.pruning_chunk_duration[&PruneType::NoOp].observe(start.elapsed()); return Ok(false); } @@ -200,7 +212,8 @@ impl DbPruner { transaction.commit().await?; - let latency = latency.observe(); + let latency = start.elapsed(); + METRICS.pruning_chunk_duration[&PruneType::Soft].observe(latency); tracing::info!( "Soft pruned db l1_batches up to {next_l1_batch_to_prune} and L2 blocks up to {next_l2_block_to_prune}, operation took {latency:?}", ); @@ -216,7 +229,7 @@ impl DbPruner { storage: &mut Connection<'_, Core>, stop_receiver: &mut watch::Receiver, ) -> anyhow::Result { - let latency = METRICS.pruning_chunk_duration[&MetricPruneType::Hard].start(); + let latency = METRICS.pruning_chunk_duration[&PruneType::Hard].start(); let mut transaction = storage.start_transaction().await?; let mut current_pruning_info = transaction.pruning_dal().get_pruning_info().await?; diff --git a/core/node/db_pruner/src/metrics.rs b/core/node/db_pruner/src/metrics.rs index 0d4d88513dbc..2833bc97f9c1 100644 --- a/core/node/db_pruner/src/metrics.rs +++ b/core/node/db_pruner/src/metrics.rs @@ -1,11 +1,16 @@ use std::time::Duration; -use vise::{Buckets, EncodeLabelSet, EncodeLabelValue, Family, Gauge, Histogram, Metrics, Unit}; +use vise::{ + Buckets, Counter, EncodeLabelSet, EncodeLabelValue, Family, Gauge, Histogram, Metrics, Unit, +}; use zksync_dal::pruning_dal::HardPruningStats; +use crate::prune_conditions::PruneCondition; + #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelValue, EncodeLabelSet)] #[metrics(label = "prune_type", rename_all = "snake_case")] -pub(super) enum MetricPruneType { +pub(super) enum PruneType { + NoOp, Soft, Hard, } @@ -21,8 +26,23 @@ enum PrunedEntityType { CallTrace, } +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelValue)] +#[metrics(rename_all = "snake_case")] +pub(crate) enum ConditionOutcome { + Success, + Fail, + Error, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelSet)] +struct ConditionOutcomeLabels { + condition: &'static str, + outcome: ConditionOutcome, +} + const ENTITY_COUNT_BUCKETS: Buckets = Buckets::values(&[ 1.0, 2.0, 5.0, 10.0, 20.0, 50.0, 100.0, 200.0, 500.0, 1_000.0, 2_000.0, 5_000.0, 10_000.0, + 20_000.0, 50_000.0, 100_000.0, ]); #[derive(Debug, Metrics)] @@ -30,12 +50,14 @@ const ENTITY_COUNT_BUCKETS: Buckets = Buckets::values(&[ pub(super) struct DbPrunerMetrics { /// Total latency of pruning chunk of L1 batches. #[metrics(buckets = Buckets::LATENCIES, unit = Unit::Seconds)] - pub pruning_chunk_duration: Family>, + pub pruning_chunk_duration: Family>, /// Number of not-pruned L1 batches. pub not_pruned_l1_batches_count: Gauge, /// Number of entities deleted during a single hard pruning iteration, grouped by entity type. #[metrics(buckets = ENTITY_COUNT_BUCKETS)] deleted_entities: Family>, + /// Number of times a certain condition has resulted in a specific outcome (succeeded, failed, or errored). + condition_outcomes: Family, } impl DbPrunerMetrics { @@ -61,6 +83,14 @@ impl DbPrunerMetrics { self.deleted_entities[&PrunedEntityType::L2ToL1Log].observe(deleted_l2_to_l1_logs); self.deleted_entities[&PrunedEntityType::CallTrace].observe(deleted_call_traces); } + + pub fn observe_condition(&self, condition: &dyn PruneCondition, outcome: ConditionOutcome) { + let labels = ConditionOutcomeLabels { + condition: condition.metric_label(), + outcome, + }; + self.condition_outcomes[&labels].inc(); + } } #[vise::register] diff --git a/core/node/db_pruner/src/prune_conditions.rs b/core/node/db_pruner/src/prune_conditions.rs index fef6b57f3352..42f225f4a44b 100644 --- a/core/node/db_pruner/src/prune_conditions.rs +++ b/core/node/db_pruner/src/prune_conditions.rs @@ -7,6 +7,8 @@ use zksync_types::L1BatchNumber; #[async_trait] pub(crate) trait PruneCondition: fmt::Debug + fmt::Display + Send + Sync + 'static { + fn metric_label(&self) -> &'static str; + async fn is_batch_prunable(&self, l1_batch_number: L1BatchNumber) -> anyhow::Result; } @@ -24,6 +26,10 @@ impl fmt::Display for L1BatchOlderThanPruneCondition { #[async_trait] impl PruneCondition for L1BatchOlderThanPruneCondition { + fn metric_label(&self) -> &'static str { + "l1_batch_older_than_minimum_age" + } + async fn is_batch_prunable(&self, l1_batch_number: L1BatchNumber) -> anyhow::Result { let mut storage = self.pool.connection_tagged("db_pruner").await?; let l1_batch_header = storage @@ -50,6 +56,10 @@ impl fmt::Display for NextL1BatchWasExecutedCondition { #[async_trait] impl PruneCondition for NextL1BatchWasExecutedCondition { + fn metric_label(&self) -> &'static str { + "next_l1_batch_was_executed" + } + async fn is_batch_prunable(&self, l1_batch_number: L1BatchNumber) -> anyhow::Result { let mut storage = self.pool.connection_tagged("db_pruner").await?; let next_l1_batch_number = L1BatchNumber(l1_batch_number.0 + 1); @@ -76,6 +86,10 @@ impl fmt::Display for NextL1BatchHasMetadataCondition { #[async_trait] impl PruneCondition for NextL1BatchHasMetadataCondition { + fn metric_label(&self) -> &'static str { + "next_l1_batch_has_metadata" + } + async fn is_batch_prunable(&self, l1_batch_number: L1BatchNumber) -> anyhow::Result { let mut storage = self.pool.connection_tagged("db_pruner").await?; let next_l1_batch_number = L1BatchNumber(l1_batch_number.0 + 1); @@ -117,6 +131,10 @@ impl fmt::Display for L1BatchExistsCondition { #[async_trait] impl PruneCondition for L1BatchExistsCondition { + fn metric_label(&self) -> &'static str { + "l1_batch_exists" + } + async fn is_batch_prunable(&self, l1_batch_number: L1BatchNumber) -> anyhow::Result { let mut storage = self.pool.connection_tagged("db_pruner").await?; let l1_batch_header = storage @@ -140,6 +158,10 @@ impl fmt::Display for ConsistencyCheckerProcessedBatch { #[async_trait] impl PruneCondition for ConsistencyCheckerProcessedBatch { + fn metric_label(&self) -> &'static str { + "l1_batch_consistency_checked" + } + async fn is_batch_prunable(&self, l1_batch_number: L1BatchNumber) -> anyhow::Result { let mut storage = self.pool.connection_tagged("db_pruner").await?; let last_processed_l1_batch = storage diff --git a/core/node/db_pruner/src/tests.rs b/core/node/db_pruner/src/tests.rs index 9a962d518ec3..d4dbe4546035 100644 --- a/core/node/db_pruner/src/tests.rs +++ b/core/node/db_pruner/src/tests.rs @@ -47,10 +47,14 @@ impl fmt::Display for ConditionMock { #[async_trait] impl PruneCondition for ConditionMock { + fn metric_label(&self) -> &'static str { + "mock" + } + async fn is_batch_prunable(&self, l1_batch_number: L1BatchNumber) -> anyhow::Result { self.is_batch_prunable_responses .get(&l1_batch_number) - .cloned() + .copied() .context("error!") } } diff --git a/core/node/eth_sender/Cargo.toml b/core/node/eth_sender/Cargo.toml index c957ae2ce46b..4f2b27ff1d9f 100644 --- a/core/node/eth_sender/Cargo.toml +++ b/core/node/eth_sender/Cargo.toml @@ -1,5 +1,6 @@ [package] name = "zksync_eth_sender" +description = "ZKsync Ethereum sender" version.workspace = true edition.workspace = true authors.workspace = true diff --git a/core/node/eth_sender/src/abstract_l1_interface.rs b/core/node/eth_sender/src/abstract_l1_interface.rs index e9290df2eb14..acc7c265186d 100644 --- a/core/node/eth_sender/src/abstract_l1_interface.rs +++ b/core/node/eth_sender/src/abstract_l1_interface.rs @@ -1,6 +1,7 @@ use std::fmt; use async_trait::async_trait; +use vise::{EncodeLabelSet, EncodeLabelValue}; use zksync_eth_client::{ clients::{DynClient, L1}, BoundEthInterface, EnrichedClientResult, EthInterface, ExecutedTxStatus, FailureInfo, Options, @@ -32,6 +33,13 @@ pub(crate) struct L1BlockNumbers { pub latest: L1BlockNumber, } +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelSet, EncodeLabelValue)] +#[metrics(label = "type", rename_all = "snake_case")] +pub(crate) enum OperatorType { + NonBlob, + Blob, +} + #[async_trait] pub(super) trait AbstractL1Interface: 'static + Sync + Send + fmt::Debug { async fn failure_reason(&self, tx_hash: H256) -> Option; @@ -51,11 +59,7 @@ pub(super) trait AbstractL1Interface: 'static + Sync + Send + fmt::Debug { async fn get_operator_nonce( &self, block_numbers: L1BlockNumbers, - ) -> Result; - - async fn get_blobs_operator_nonce( - &self, - block_numbers: L1BlockNumbers, + operator_type: OperatorType, ) -> Result, EthSenderError>; async fn sign_tx( @@ -122,28 +126,13 @@ impl AbstractL1Interface for RealL1Interface { async fn get_operator_nonce( &self, block_numbers: L1BlockNumbers, - ) -> Result { - let finalized = self - .ethereum_gateway() - .nonce_at(block_numbers.finalized.0.into()) - .await? - .as_u32() - .into(); - - let latest = self - .ethereum_gateway() - .nonce_at(block_numbers.latest.0.into()) - .await? - .as_u32() - .into(); - Ok(OperatorNonce { finalized, latest }) - } - - async fn get_blobs_operator_nonce( - &self, - block_numbers: L1BlockNumbers, + operator_type: OperatorType, ) -> Result, EthSenderError> { - match &self.ethereum_gateway_blobs() { + let gateway = match operator_type { + OperatorType::NonBlob => Some(self.ethereum_gateway()), + OperatorType::Blob => self.ethereum_gateway_blobs(), + }; + match gateway { None => Ok(None), Some(gateway) => { let finalized = gateway diff --git a/core/node/eth_sender/src/aggregator.rs b/core/node/eth_sender/src/aggregator.rs index 966c9d1f1907..de6a6982088b 100644 --- a/core/node/eth_sender/src/aggregator.rs +++ b/core/node/eth_sender/src/aggregator.rs @@ -216,6 +216,7 @@ impl Aggregator { base_system_contracts_hashes.bootloader, base_system_contracts_hashes.default_aa, protocol_version_id, + self.commitment_mode != L1BatchCommitmentMode::Rollup, ) .await .unwrap() diff --git a/core/node/eth_sender/src/error.rs b/core/node/eth_sender/src/error.rs index 61d92bcbe132..ed4fdaaec25a 100644 --- a/core/node/eth_sender/src/error.rs +++ b/core/node/eth_sender/src/error.rs @@ -10,3 +10,12 @@ pub enum EthSenderError { #[error("Token parsing error: {0}")] Parse(#[from] contract::Error), } + +impl EthSenderError { + pub fn is_transient(&self) -> bool { + match self { + EthSenderError::EthereumGateway(err) => err.is_transient(), + _ => false, + } + } +} diff --git a/core/node/eth_sender/src/eth_fees_oracle.rs b/core/node/eth_sender/src/eth_fees_oracle.rs index 431ef4c8856b..c985a987eeb5 100644 --- a/core/node/eth_sender/src/eth_fees_oracle.rs +++ b/core/node/eth_sender/src/eth_fees_oracle.rs @@ -37,9 +37,9 @@ impl GasAdjusterFeesOracle { &self, previous_sent_tx: &Option, ) -> Result { - let base_fee_per_gas = self.gas_adjuster.get_base_fee(0); - let priority_fee_per_gas = self.gas_adjuster.get_priority_fee(); - let blob_base_fee_per_gas = Some(self.gas_adjuster.get_blob_base_fee()); + let base_fee_per_gas = self.gas_adjuster.get_blob_tx_base_fee(); + let priority_fee_per_gas = self.gas_adjuster.get_blob_tx_priority_fee(); + let blob_base_fee_per_gas = Some(self.gas_adjuster.get_blob_tx_blob_base_fee()); if let Some(previous_sent_tx) = previous_sent_tx { // for blob transactions on re-sending need to double all gas prices @@ -67,7 +67,7 @@ impl GasAdjusterFeesOracle { previous_sent_tx: &Option, time_in_mempool: u32, ) -> Result { - let base_fee_per_gas = self.gas_adjuster.get_base_fee(time_in_mempool); + let mut base_fee_per_gas = self.gas_adjuster.get_base_fee(time_in_mempool); if let Some(previous_sent_tx) = previous_sent_tx { self.verify_base_fee_not_too_low_on_resend( previous_sent_tx.id, @@ -84,6 +84,12 @@ impl GasAdjusterFeesOracle { priority_fee_per_gas, (previous_sent_tx.priority_fee_per_gas * 6) / 5 + 1, ); + + // same for base_fee_per_gas but 10% + base_fee_per_gas = max( + base_fee_per_gas, + previous_sent_tx.base_fee_per_gas + (previous_sent_tx.base_fee_per_gas / 10) + 1, + ); } // Extra check to prevent sending transaction will extremely high priority fee. diff --git a/core/node/eth_sender/src/eth_tx_manager.rs b/core/node/eth_sender/src/eth_tx_manager.rs index f635d12bae13..d2ee4380d68b 100644 --- a/core/node/eth_sender/src/eth_tx_manager.rs +++ b/core/node/eth_sender/src/eth_tx_manager.rs @@ -14,7 +14,9 @@ use zksync_utils::time::seconds_since_epoch; use super::{metrics::METRICS, EthSenderError}; use crate::{ - abstract_l1_interface::{AbstractL1Interface, L1BlockNumbers, OperatorNonce, RealL1Interface}, + abstract_l1_interface::{ + AbstractL1Interface, L1BlockNumbers, OperatorNonce, OperatorType, RealL1Interface, + }, eth_fees_oracle::{EthFees, EthFeesOracle, GasAdjusterFeesOracle}, metrics::TransactionType, }; @@ -68,7 +70,7 @@ impl EthTxManager { &self, storage: &mut Connection<'_, Core>, op: &EthTx, - ) -> Option { + ) -> Result, EthSenderError> { // Checking history items, starting from most recently sent. for history_item in storage .eth_sender_dal() @@ -80,16 +82,19 @@ impl EthTxManager { // because if we do and get an `Err`, we won't finish the for loop, // which means we might miss the transaction that actually succeeded. match self.l1_interface.get_tx_status(history_item.tx_hash).await { - Ok(Some(s)) => return Some(s), + Ok(Some(s)) => return Ok(Some(s)), Ok(_) => continue, - Err(err) => tracing::warn!( - "Can't check transaction {:?}: {:?}", - history_item.tx_hash, - err - ), + Err(err) => { + tracing::warn!( + "Can't check transaction {:?}: {:?}", + history_item.tx_hash, + err + ); + return Err(err); + } } } - None + Ok(None) } pub(crate) async fn send_eth_tx( @@ -229,48 +234,48 @@ impl EthTxManager { .remove_tx_history(tx_history_id) .await .unwrap(); + } else { + METRICS.l1_transient_errors.inc(); } Err(error.into()) } } } + pub(crate) fn operator_address(&self, operator_type: OperatorType) -> Option
{ + if operator_type == OperatorType::NonBlob { + None + } else { + self.l1_interface.get_blobs_operator_account() + } + } // Monitors the in-flight transactions, marks mined ones as confirmed, // returns the one that has to be resent (if there is one). - pub(super) async fn monitor_inflight_transactions( + pub(super) async fn monitor_inflight_transactions_single_operator( &mut self, storage: &mut Connection<'_, Core>, l1_block_numbers: L1BlockNumbers, + operator_type: OperatorType, ) -> Result, EthSenderError> { - METRICS.track_block_numbers(&l1_block_numbers); let operator_nonce = self .l1_interface - .get_operator_nonce(l1_block_numbers) + .get_operator_nonce(l1_block_numbers, operator_type) .await?; - let blobs_operator_nonce = self - .l1_interface - .get_blobs_operator_nonce(l1_block_numbers) - .await?; - let blobs_operator_address = self.l1_interface.get_blobs_operator_account(); - if let Some(res) = self - .monitor_inflight_transactions_inner(storage, l1_block_numbers, operator_nonce, None) - .await? - { - return Ok(Some(res)); - }; + if let Some(operator_nonce) = operator_nonce { + let inflight_txs = storage + .eth_sender_dal() + .get_inflight_txs(self.operator_address(operator_type)) + .await + .unwrap(); + METRICS.number_of_inflight_txs[&operator_type].set(inflight_txs.len()); - if let Some(blobs_operator_nonce) = blobs_operator_nonce { - // need to check if both nonce and address are `Some` - if blobs_operator_address.is_none() { - panic!("blobs_operator_address has to be set its nonce is known; qed"); - } Ok(self - .monitor_inflight_transactions_inner( + .apply_inflight_txs_statuses_and_get_first_to_resend( storage, l1_block_numbers, - blobs_operator_nonce, - blobs_operator_address, + operator_nonce, + inflight_txs, ) .await?) } else { @@ -278,16 +283,13 @@ impl EthTxManager { } } - async fn monitor_inflight_transactions_inner( + async fn apply_inflight_txs_statuses_and_get_first_to_resend( &mut self, storage: &mut Connection<'_, Core>, l1_block_numbers: L1BlockNumbers, operator_nonce: OperatorNonce, - operator_address: Option
, + inflight_txs: Vec, ) -> Result, EthSenderError> { - let inflight_txs = storage.eth_sender_dal().get_inflight_txs().await.unwrap(); - METRICS.number_of_inflight_txs.set(inflight_txs.len()); - tracing::trace!( "Going through not confirmed txs. \ Block numbers: latest {}, finalized {}, \ @@ -307,10 +309,6 @@ impl EthTxManager { tx.nonce, ); - if tx.from_addr != operator_address { - continue; - } - // If the `operator_nonce.latest` <= `tx.nonce`, this means // that `tx` is not mined and we should resend it. // We only resend the first un-mined transaction. @@ -346,12 +344,18 @@ impl EthTxManager { tx.nonce, ); + tracing::info!( + "Updating status of tx {} of type {} with nonce {}", + tx.id, + tx.tx_type, + tx.nonce + ); match self.check_all_sending_attempts(storage, &tx).await { - Some(tx_status) => { + Ok(Some(tx_status)) => { self.apply_tx_status(storage, &tx, tx_status, l1_block_numbers.finalized) .await; } - None => { + Ok(None) => { // The nonce has increased but we did not find the receipt. // This is an error because such a big re-org may cause transactions that were // previously recorded as confirmed to become pending again and we have to @@ -361,6 +365,13 @@ impl EthTxManager { &tx ); } + Err(err) => { + // An error here means that we weren't able to check status of one of the txs + // we can't continue to avoid situations with out-of-order confirmed txs + // (for instance Execute tx confirmed before PublishProof tx) as this would make + // our API return inconsistent block info + return Err(err); + } } } Ok(None) @@ -535,16 +546,13 @@ impl EthTxManager { tracing::info!("Stop signal received, eth_tx_manager is shutting down"); break; } + let l1_block_numbers = self.l1_interface.get_l1_block_numbers().await?; + METRICS.track_block_numbers(&l1_block_numbers); - match self.loop_iteration(&mut storage, last_known_l1_block).await { - Ok(block) => last_known_l1_block = block, - Err(e) => { - // Web3 API request failures can cause this, - // and anything more important is already properly reported. - tracing::warn!("eth_sender error {:?}", e); - } + if last_known_l1_block < l1_block_numbers.latest { + self.loop_iteration(&mut storage, l1_block_numbers).await; + last_known_l1_block = l1_block_numbers.latest; } - tokio::time::sleep(self.config.tx_poll_period()).await; } Ok(()) @@ -554,10 +562,11 @@ impl EthTxManager { &mut self, storage: &mut Connection<'_, Core>, current_block: L1BlockNumber, + operator_type: OperatorType, ) { let number_inflight_txs = storage .eth_sender_dal() - .get_inflight_txs() + .get_inflight_txs(self.operator_address(operator_type)) .await .unwrap() .len(); @@ -570,34 +579,42 @@ impl EthTxManager { // Get the new eth tx and create history item for them let new_eth_tx = storage .eth_sender_dal() - .get_new_eth_txs(number_of_available_slots_for_eth_txs) + .get_new_eth_txs( + number_of_available_slots_for_eth_txs, + &self.operator_address(operator_type), + ) .await .unwrap(); + if !new_eth_tx.is_empty() { + tracing::info!( + "Sending {} {operator_type:?} new transactions", + new_eth_tx.len() + ); + } else { + tracing::trace!("No new transactions to send"); + } for tx in new_eth_tx { - let _ = self.send_eth_tx(storage, &tx, 0, current_block).await; + let result = self.send_eth_tx(storage, &tx, 0, current_block).await; + // If one of the transactions doesn't succeed, this means we should return + // as new transactions have increasing nonces, so they will also result in an error + // about gapped nonces + if result.is_err() { + tracing::info!("Skipping sending rest of new transactions because of error"); + break; + } } } } - #[tracing::instrument(skip(self, storage))] - async fn loop_iteration( + async fn update_statuses_and_resend_if_needed( &mut self, storage: &mut Connection<'_, Core>, - previous_block: L1BlockNumber, - ) -> Result { - let l1_block_numbers = self.l1_interface.get_l1_block_numbers().await?; - - self.send_new_eth_txs(storage, l1_block_numbers.latest) - .await; - - if l1_block_numbers.latest <= previous_block { - // Nothing to do - no new blocks were mined. - return Ok(previous_block); - } - + l1_block_numbers: L1BlockNumbers, + operator_type: OperatorType, + ) -> Result<(), EthSenderError> { if let Some((tx, sent_at_block)) = self - .monitor_inflight_transactions(storage, l1_block_numbers) + .monitor_inflight_transactions_single_operator(storage, l1_block_numbers, operator_type) .await? { // New gas price depends on the time this tx spent in mempool. @@ -608,9 +625,37 @@ impl EthTxManager { // sending new operations. let _ = self .send_eth_tx(storage, &tx, time_in_mempool, l1_block_numbers.latest) - .await; + .await?; } + Ok(()) + } + + #[tracing::instrument(skip(self, storage))] + async fn loop_iteration( + &mut self, + storage: &mut Connection<'_, Core>, + l1_block_numbers: L1BlockNumbers, + ) { + tracing::trace!("Loop iteration at block {}", l1_block_numbers.latest); + // We can treat those two operators independently as they have different nonces and + // aggregator makes sure that corresponding Commit transaction is confirmed before creating + // a PublishProof transaction + for operator_type in [OperatorType::NonBlob, OperatorType::Blob] { + self.send_new_eth_txs(storage, l1_block_numbers.latest, operator_type) + .await; + let result = self + .update_statuses_and_resend_if_needed(storage, l1_block_numbers, operator_type) + .await; - Ok(l1_block_numbers.latest) + //We don't want an error in sending non-blob transactions interrupt sending blob txs + if let Err(error) = result { + // Web3 API request failures can cause this, + // and anything more important is already properly reported. + tracing::warn!("eth_sender error {:?}", error); + if error.is_transient() { + METRICS.l1_transient_errors.inc(); + } + } + } } } diff --git a/core/node/eth_sender/src/metrics.rs b/core/node/eth_sender/src/metrics.rs index dfebcc278b7e..462fe3ed6e59 100644 --- a/core/node/eth_sender/src/metrics.rs +++ b/core/node/eth_sender/src/metrics.rs @@ -8,7 +8,7 @@ use zksync_shared_metrics::{BlockL1Stage, BlockStage, APP_METRICS}; use zksync_types::{aggregated_operations::AggregatedActionType, eth_sender::EthTx}; use zksync_utils::time::seconds_since_epoch; -use crate::abstract_l1_interface::L1BlockNumbers; +use crate::abstract_l1_interface::{L1BlockNumbers, OperatorType}; #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelSet, EncodeLabelValue)] #[metrics(label = "kind", rename_all = "snake_case")] @@ -98,7 +98,7 @@ pub(super) struct EthSenderMetrics { /// Last L1 block observed by the Ethereum sender. pub last_known_l1_block: Family>, /// Number of in-flight txs produced by the Ethereum sender. - pub number_of_inflight_txs: Gauge, + pub number_of_inflight_txs: Family>, #[metrics(buckets = GAS_BUCKETS)] pub l1_gas_used: Family>, #[metrics(buckets = Buckets::LATENCIES)] @@ -107,6 +107,7 @@ pub(super) struct EthSenderMetrics { pub l1_blocks_waited_in_mempool: Family>, /// Number of L1 batches aggregated for publishing with a specific reason. pub block_aggregation_reason: Family, + pub l1_transient_errors: Counter, } impl EthSenderMetrics { @@ -130,24 +131,25 @@ impl EthSenderMetrics { tx_type: tx.tx_type, }; - let l1_batch_headers = connection + let l1_batches_statistics = connection .blocks_dal() - .get_l1_batches_for_eth_tx_id(tx.id) + .get_l1_batches_statistics_for_eth_tx_id(tx.id) .await .unwrap(); // This should be only the case when some blocks were reverted. - if l1_batch_headers.is_empty() { + if l1_batches_statistics.is_empty() { tracing::warn!("No L1 batches were found for eth_tx with id = {}", tx.id); return; } - for header in l1_batch_headers { + for statistics in l1_batches_statistics { APP_METRICS.block_latency[&stage].observe(Duration::from_secs( - seconds_since_epoch() - header.timestamp, + seconds_since_epoch() - statistics.timestamp, )); - APP_METRICS.processed_txs[&stage.into()].inc_by(header.tx_count() as u64); - APP_METRICS.processed_l1_txs[&stage.into()].inc_by(header.tx_count() as u64); + APP_METRICS.processed_txs[&stage.into()] + .inc_by(statistics.l2_tx_count as u64 + statistics.l1_tx_count as u64); + APP_METRICS.processed_l1_txs[&stage.into()].inc_by(statistics.l1_tx_count as u64); } metrics_latency.observe(); } diff --git a/core/node/eth_sender/src/tests.rs b/core/node/eth_sender/src/tests.rs index a3bb9951f44a..45835a50c33b 100644 --- a/core/node/eth_sender/src/tests.rs +++ b/core/node/eth_sender/src/tests.rs @@ -9,7 +9,7 @@ use zksync_config::{ }; use zksync_contracts::BaseSystemContractsHashes; use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; -use zksync_eth_client::clients::MockEthereum; +use zksync_eth_client::{clients::MockEthereum, BaseFees}; use zksync_l1_contract_interface::i_executor::methods::{ExecuteBatches, ProveBatches}; use zksync_node_fee_model::l1_gas_price::GasAdjuster; use zksync_node_test_utils::{create_l1_batch, l1_batch_metadata_to_commitment_artifacts}; @@ -28,8 +28,9 @@ use zksync_types::{ }; use crate::{ - abstract_l1_interface::L1BlockNumbers, aggregated_operations::AggregatedOperation, Aggregator, - EthSenderError, EthTxAggregator, EthTxManager, + abstract_l1_interface::{L1BlockNumbers, OperatorType}, + aggregated_operations::AggregatedOperation, + Aggregator, EthSenderError, EthTxAggregator, EthTxManager, }; // Alias to conveniently call static methods of `ETHSender`. @@ -130,12 +131,23 @@ impl EthSenderTester { ..eth_sender_config.clone().sender.unwrap() }; + let history: Vec<_> = history + .into_iter() + .map(|base_fee_per_gas| BaseFees { + base_fee_per_gas, + base_fee_per_blob_gas: 0.into(), + }) + .collect(); + let gateway = MockEthereum::builder() .with_fee_history( - std::iter::repeat(0) - .take(Self::WAIT_CONFIRMATIONS as usize) - .chain(history) - .collect(), + std::iter::repeat_with(|| BaseFees { + base_fee_per_gas: 0, + base_fee_per_blob_gas: 0.into(), + }) + .take(Self::WAIT_CONFIRMATIONS as usize) + .chain(history) + .collect(), ) .with_non_ordering_confirmation(non_ordering_confirmations) .with_call_handler(move |call, _| { @@ -321,7 +333,7 @@ async fn confirm_many( .storage() .await .eth_sender_dal() - .get_inflight_txs() + .get_inflight_txs(tester.manager.operator_address(OperatorType::NonBlob)) .await .unwrap() .len(), @@ -336,9 +348,10 @@ async fn confirm_many( let to_resend = tester .manager - .monitor_inflight_transactions( + .monitor_inflight_transactions_single_operator( &mut tester.conn.connection().await.unwrap(), tester.get_block_numbers().await, + OperatorType::NonBlob, ) .await?; @@ -348,7 +361,7 @@ async fn confirm_many( .storage() .await .eth_sender_dal() - .get_inflight_txs() + .get_inflight_txs(tester.manager.operator_address(OperatorType::NonBlob)) .await .unwrap() .len(), @@ -422,7 +435,7 @@ async fn resend_each_block(commitment_mode: L1BatchCommitmentMode) -> anyhow::Re .storage() .await .eth_sender_dal() - .get_inflight_txs() + .get_inflight_txs(tester.manager.operator_address(OperatorType::NonBlob)) .await .unwrap() .len(), @@ -450,7 +463,11 @@ async fn resend_each_block(commitment_mode: L1BatchCommitmentMode) -> anyhow::Re let (to_resend, _) = tester .manager - .monitor_inflight_transactions(&mut tester.conn.connection().await.unwrap(), block_numbers) + .monitor_inflight_transactions_single_operator( + &mut tester.conn.connection().await.unwrap(), + block_numbers, + OperatorType::NonBlob, + ) .await? .unwrap(); @@ -471,7 +488,7 @@ async fn resend_each_block(commitment_mode: L1BatchCommitmentMode) -> anyhow::Re .storage() .await .eth_sender_dal() - .get_inflight_txs() + .get_inflight_txs(tester.manager.operator_address(OperatorType::NonBlob)) .await .unwrap() .len(), @@ -557,7 +574,7 @@ async fn dont_resend_already_mined(commitment_mode: L1BatchCommitmentMode) -> an .storage() .await .eth_sender_dal() - .get_inflight_txs() + .get_inflight_txs(tester.manager.operator_address(OperatorType::NonBlob)) .await .unwrap() .len(), @@ -571,9 +588,10 @@ async fn dont_resend_already_mined(commitment_mode: L1BatchCommitmentMode) -> an let to_resend = tester .manager - .monitor_inflight_transactions( + .monitor_inflight_transactions_single_operator( &mut tester.conn.connection().await.unwrap(), tester.get_block_numbers().await, + OperatorType::NonBlob, ) .await?; @@ -583,7 +601,7 @@ async fn dont_resend_already_mined(commitment_mode: L1BatchCommitmentMode) -> an .storage() .await .eth_sender_dal() - .get_inflight_txs() + .get_inflight_txs(tester.manager.operator_address(OperatorType::NonBlob)) .await .unwrap() .len(), @@ -669,9 +687,10 @@ async fn three_scenarios(commitment_mode: L1BatchCommitmentMode) -> anyhow::Resu let (to_resend, _) = tester .manager - .monitor_inflight_transactions( + .monitor_inflight_transactions_single_operator( &mut tester.conn.connection().await.unwrap(), tester.get_block_numbers().await, + OperatorType::NonBlob, ) .await? .expect("we should be trying to resend the last tx"); @@ -682,7 +701,7 @@ async fn three_scenarios(commitment_mode: L1BatchCommitmentMode) -> anyhow::Resu .storage() .await .eth_sender_dal() - .get_inflight_txs() + .get_inflight_txs(tester.manager.operator_address(OperatorType::NonBlob)) .await .unwrap() .len(), @@ -756,9 +775,10 @@ async fn failed_eth_tx(commitment_mode: L1BatchCommitmentMode) { .execute_tx(hash, false, EthSenderTester::WAIT_CONFIRMATIONS); tester .manager - .monitor_inflight_transactions( + .monitor_inflight_transactions_single_operator( &mut tester.conn.connection().await.unwrap(), tester.get_block_numbers().await, + OperatorType::NonBlob, ) .await .unwrap(); @@ -1242,9 +1262,20 @@ async fn confirm_tx(tester: &mut EthSenderTester, hash: H256) { .execute_tx(hash, true, EthSenderTester::WAIT_CONFIRMATIONS); tester .manager - .monitor_inflight_transactions( + .monitor_inflight_transactions_single_operator( + &mut tester.conn.connection().await.unwrap(), + tester.get_block_numbers().await, + OperatorType::NonBlob, + ) + .await + .unwrap(); + + tester + .manager + .monitor_inflight_transactions_single_operator( &mut tester.conn.connection().await.unwrap(), tester.get_block_numbers().await, + OperatorType::Blob, ) .await .unwrap(); diff --git a/core/node/eth_watch/Cargo.toml b/core/node/eth_watch/Cargo.toml index 4e85d1332603..bbdc4ba27d34 100644 --- a/core/node/eth_watch/Cargo.toml +++ b/core/node/eth_watch/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "zksync_eth_watch" -version = "0.1.0" +description = "ZKsync Ethereum watcher" +version.workspace = true edition.workspace = true authors.workspace = true homepage.workspace = true diff --git a/core/node/eth_watch/src/client.rs b/core/node/eth_watch/src/client.rs index 764573002996..39b9b5e9f6b1 100644 --- a/core/node/eth_watch/src/client.rs +++ b/core/node/eth_watch/src/client.rs @@ -1,6 +1,7 @@ use std::fmt; -use zksync_contracts::verifier_contract; +use anyhow::Context; +use zksync_contracts::{state_transition_manager_contract, verifier_contract}; use zksync_eth_client::{ clients::{DynClient, L1}, CallFunctionArgs, ClientError, ContractCallError, EnrichedClientError, EnrichedClientResult, @@ -27,6 +28,11 @@ pub trait EthClient: 'static + fmt::Debug + Send + Sync { /// Returns scheduler verification key hash by verifier address. async fn scheduler_vk_hash(&self, verifier_address: Address) -> Result; + /// Returns upgrade diamond cut by packed protocol version. + async fn diamond_cut_by_version( + &self, + packed_version: H256, + ) -> EnrichedClientResult>>; /// Sets list of topics to return events for. fn set_topics(&mut self, topics: Vec); } @@ -42,8 +48,10 @@ pub struct EthHttpQueryClient { topics: Vec, diamond_proxy_addr: Address, governance_address: Address, + new_upgrade_cut_data_signature: H256, // Only present for post-shared bridge chains. state_transition_manager_address: Option
, + chain_admin_address: Option
, verifier_contract_abi: Contract, confirmations_for_eth_event: Option, } @@ -53,6 +61,7 @@ impl EthHttpQueryClient { client: Box>, diamond_proxy_addr: Address, state_transition_manager_address: Option
, + chain_admin_address: Option
, governance_address: Address, confirmations_for_eth_event: Option, ) -> Self { @@ -66,7 +75,13 @@ impl EthHttpQueryClient { topics: Vec::new(), diamond_proxy_addr, state_transition_manager_address, + chain_admin_address, governance_address, + new_upgrade_cut_data_signature: state_transition_manager_contract() + .event("NewUpgradeCutData") + .context("NewUpgradeCutData event is missing in ABI") + .unwrap() + .signature(), verifier_contract_abi: verifier_contract(), confirmations_for_eth_event, } @@ -84,6 +99,7 @@ impl EthHttpQueryClient { Some(self.diamond_proxy_addr), Some(self.governance_address), self.state_transition_manager_address, + self.chain_admin_address, ] .into_iter() .flatten() @@ -110,6 +126,29 @@ impl EthClient for EthHttpQueryClient { .await } + async fn diamond_cut_by_version( + &self, + packed_version: H256, + ) -> EnrichedClientResult>> { + let Some(state_transition_manager_address) = self.state_transition_manager_address else { + return Ok(None); + }; + + let filter = FilterBuilder::default() + .address(vec![state_transition_manager_address]) + .from_block(BlockNumber::Earliest) + .to_block(BlockNumber::Latest) + .topics( + Some(vec![self.new_upgrade_cut_data_signature]), + Some(vec![packed_version]), + None, + None, + ) + .build(); + let logs = self.client.logs(&filter).await?; + Ok(logs.into_iter().next().map(|log| log.data.0)) + } + async fn get_events( &self, from: BlockNumber, diff --git a/core/node/eth_watch/src/event_processors/decentralized_upgrades.rs b/core/node/eth_watch/src/event_processors/decentralized_upgrades.rs new file mode 100644 index 000000000000..dff10662e984 --- /dev/null +++ b/core/node/eth_watch/src/event_processors/decentralized_upgrades.rs @@ -0,0 +1,134 @@ +use anyhow::Context as _; +use zksync_dal::{Connection, Core, CoreDal, DalError}; +use zksync_types::{ + ethabi::Contract, protocol_version::ProtocolSemanticVersion, web3::Log, ProtocolUpgrade, H256, + U256, +}; + +use crate::{ + client::EthClient, + event_processors::{EventProcessor, EventProcessorError}, + metrics::{PollStage, METRICS}, +}; + +/// Listens to scheduling events coming from the chain admin contract and saves new protocol upgrade proposals to the database. +#[derive(Debug)] +pub struct DecentralizedUpgradesEventProcessor { + /// Last protocol version seen. Used to skip events for already known upgrade proposals. + last_seen_protocol_version: ProtocolSemanticVersion, + update_upgrade_timestamp_signature: H256, +} + +impl DecentralizedUpgradesEventProcessor { + pub fn new( + last_seen_protocol_version: ProtocolSemanticVersion, + chain_admin_contract: &Contract, + ) -> Self { + Self { + last_seen_protocol_version, + update_upgrade_timestamp_signature: chain_admin_contract + .event("UpdateUpgradeTimestamp") + .context("UpdateUpgradeTimestamp event is missing in ABI") + .unwrap() + .signature(), + } + } +} + +#[async_trait::async_trait] +impl EventProcessor for DecentralizedUpgradesEventProcessor { + async fn process_events( + &mut self, + storage: &mut Connection<'_, Core>, + client: &dyn EthClient, + events: Vec, + ) -> Result<(), EventProcessorError> { + let mut upgrades = Vec::new(); + for event in events { + let version = event.topics.get(1).copied().context("missing topic 1")?; + let timestamp: u64 = U256::from_big_endian(&event.data.0) + .try_into() + .ok() + .context("upgrade timestamp is too big")?; + + let diamond_cut = client + .diamond_cut_by_version(version) + .await? + .context("missing upgrade data on STM")?; + + let upgrade = ProtocolUpgrade { + timestamp, + ..ProtocolUpgrade::try_from_diamond_cut(&diamond_cut)? + }; + // Scheduler VK is not present in proposal event. It is hard coded in verifier contract. + let scheduler_vk_hash = if let Some(address) = upgrade.verifier_address { + Some(client.scheduler_vk_hash(address).await?) + } else { + None + }; + upgrades.push((upgrade, scheduler_vk_hash)); + } + + let new_upgrades: Vec<_> = upgrades + .into_iter() + .skip_while(|(v, _)| v.version <= self.last_seen_protocol_version) + .collect(); + + let Some((last_upgrade, _)) = new_upgrades.last() else { + return Ok(()); + }; + let versions: Vec<_> = new_upgrades + .iter() + .map(|(u, _)| u.version.to_string()) + .collect(); + tracing::debug!("Received upgrades with versions: {versions:?}"); + + let last_version = last_upgrade.version; + let stage_latency = METRICS.poll_eth_node[&PollStage::PersistUpgrades].start(); + for (upgrade, scheduler_vk_hash) in new_upgrades { + let latest_semantic_version = storage + .protocol_versions_dal() + .latest_semantic_version() + .await + .map_err(DalError::generalize)? + .context("expected some version to be present in DB")?; + + if upgrade.version > latest_semantic_version { + let latest_version = storage + .protocol_versions_dal() + .get_protocol_version_with_latest_patch(latest_semantic_version.minor) + .await + .map_err(DalError::generalize)? + .with_context(|| { + format!( + "expected minor version {} to be present in DB", + latest_semantic_version.minor as u16 + ) + })?; + + let new_version = latest_version.apply_upgrade(upgrade, scheduler_vk_hash); + if new_version.version.minor == latest_semantic_version.minor { + // Only verification parameters may change if only patch is bumped. + assert_eq!( + new_version.base_system_contracts_hashes, + latest_version.base_system_contracts_hashes + ); + assert!(new_version.tx.is_none()); + } + storage + .protocol_versions_dal() + .save_protocol_version_with_tx(&new_version) + .await + .map_err(DalError::generalize)?; + } + } + stage_latency.observe(); + + self.last_seen_protocol_version = last_version; + Ok(()) + } + + fn relevant_topic(&self) -> H256 { + self.update_upgrade_timestamp_signature + } +} diff --git a/core/node/eth_watch/src/event_processors/mod.rs b/core/node/eth_watch/src/event_processors/mod.rs index 396bcc2e1ca5..43ae259305a3 100644 --- a/core/node/eth_watch/src/event_processors/mod.rs +++ b/core/node/eth_watch/src/event_processors/mod.rs @@ -5,10 +5,12 @@ use zksync_eth_client::{ContractCallError, EnrichedClientError}; use zksync_types::{web3::Log, H256}; pub(crate) use self::{ + decentralized_upgrades::DecentralizedUpgradesEventProcessor, governance_upgrades::GovernanceUpgradesEventProcessor, priority_ops::PriorityOpsEventProcessor, }; use crate::client::EthClient; +mod decentralized_upgrades; mod governance_upgrades; mod priority_ops; diff --git a/core/node/eth_watch/src/lib.rs b/core/node/eth_watch/src/lib.rs index 7c27a6322c2f..72b6b29a2533 100644 --- a/core/node/eth_watch/src/lib.rs +++ b/core/node/eth_watch/src/lib.rs @@ -22,6 +22,7 @@ use self::{ }, metrics::{PollStage, METRICS}, }; +use crate::event_processors::DecentralizedUpgradesEventProcessor; mod client; mod event_processors; @@ -50,6 +51,7 @@ impl EthWatch { pub async fn new( diamond_proxy_addr: Address, governance_contract: &Contract, + chain_admin_contract: &Contract, mut client: Box, pool: ConnectionPool, poll_interval: Duration, @@ -66,9 +68,14 @@ impl EthWatch { state.last_seen_protocol_version, governance_contract, ); + let decentralized_upgrades_processor = DecentralizedUpgradesEventProcessor::new( + state.last_seen_protocol_version, + chain_admin_contract, + ); let event_processors: Vec> = vec![ Box::new(priority_ops_processor), Box::new(governance_upgrades_processor), + Box::new(decentralized_upgrades_processor), ]; let topics = event_processors diff --git a/core/node/eth_watch/src/tests.rs b/core/node/eth_watch/src/tests.rs index 6b15c71bd140..7ae3b5494e98 100644 --- a/core/node/eth_watch/src/tests.rs +++ b/core/node/eth_watch/src/tests.rs @@ -1,7 +1,7 @@ use std::{collections::HashMap, convert::TryInto, sync::Arc}; use tokio::sync::RwLock; -use zksync_contracts::{governance_contract, hyperchain_contract}; +use zksync_contracts::{chain_admin_contract, governance_contract, hyperchain_contract}; use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; use zksync_eth_client::{ContractCallError, EnrichedClientResult}; use zksync_types::{ @@ -135,6 +135,13 @@ impl EthClient for MockEthClient { async fn finalized_block_number(&self) -> EnrichedClientResult { Ok(self.inner.read().await.last_finalized_block_number) } + + async fn diamond_cut_by_version( + &self, + _packed_version: H256, + ) -> EnrichedClientResult>> { + unimplemented!() + } } fn build_l1_tx(serial_id: u64, eth_block: u64) -> L1Tx { @@ -201,6 +208,7 @@ async fn create_test_watcher(connection_pool: ConnectionPool) -> (EthWatch let watcher = EthWatch::new( Address::default(), &governance_contract(), + &chain_admin_contract(), Box::new(client.clone()), connection_pool, std::time::Duration::from_nanos(1), @@ -293,6 +301,7 @@ async fn test_normal_operation_governance_upgrades() { let mut watcher = EthWatch::new( Address::default(), &governance_contract(), + &chain_admin_contract(), Box::new(client.clone()), connection_pool.clone(), std::time::Duration::from_nanos(1), @@ -505,6 +514,7 @@ fn tx_into_log(tx: L1Tx) -> Log { transaction_log_index: Some(0u64.into()), log_type: None, removed: None, + block_timestamp: None, } } @@ -549,6 +559,7 @@ fn upgrade_into_governor_log(upgrade: ProtocolUpgrade, eth_block: u64) -> Log { transaction_log_index: Some(0u64.into()), log_type: None, removed: None, + block_timestamp: None, } } diff --git a/core/node/fee_model/Cargo.toml b/core/node/fee_model/Cargo.toml index 7ac3c1d32e88..643e87b9c27e 100644 --- a/core/node/fee_model/Cargo.toml +++ b/core/node/fee_model/Cargo.toml @@ -1,5 +1,6 @@ [package] name = "zksync_node_fee_model" +description = "ZKsync fee model" version.workspace = true edition.workspace = true authors.workspace = true @@ -17,6 +18,8 @@ zksync_config.workspace = true zksync_eth_client.workspace = true zksync_utils.workspace = true zksync_web3_decl.workspace = true +zksync_base_token_adjuster.workspace = true +bigdecimal.workspace = true tokio = { workspace = true, features = ["time"] } anyhow.workspace = true diff --git a/core/node/fee_model/src/l1_gas_price/gas_adjuster/mod.rs b/core/node/fee_model/src/l1_gas_price/gas_adjuster/mod.rs index 9e553ba47bf2..2032cb9c89fd 100644 --- a/core/node/fee_model/src/l1_gas_price/gas_adjuster/mod.rs +++ b/core/node/fee_model/src/l1_gas_price/gas_adjuster/mod.rs @@ -2,14 +2,13 @@ use std::{ collections::VecDeque, - ops::RangeInclusive, sync::{Arc, RwLock}, }; use tokio::sync::watch; use zksync_config::{configs::eth_sender::PubdataSendingMode, GasAdjusterConfig}; use zksync_eth_client::EthInterface; -use zksync_types::{commitment::L1BatchCommitmentMode, L1_GAS_PER_PUBDATA_BYTE, U256, U64}; +use zksync_types::{commitment::L1BatchCommitmentMode, L1_GAS_PER_PUBDATA_BYTE, U256}; use zksync_web3_decl::client::{DynClient, L1}; use self::metrics::METRICS; @@ -52,26 +51,25 @@ impl GasAdjuster { .await? .as_usize() .saturating_sub(1); - let base_fee_history = eth_client + let fee_history = eth_client .base_fee_history(current_block, config.max_base_fee_samples) .await?; - // Web3 API doesn't provide a method to fetch blob fees for multiple blocks using single request, - // so we request blob base fee only for the latest block. - let (_, last_block_blob_base_fee) = - Self::get_base_fees_history(eth_client.as_ref(), current_block..=current_block).await?; + let base_fee_statistics = GasStatistics::new( + config.max_base_fee_samples, + current_block, + fee_history.iter().map(|fee| fee.base_fee_per_gas), + ); + + let blob_base_fee_statistics = GasStatistics::new( + config.num_samples_for_blob_base_fee_estimate, + current_block, + fee_history.iter().map(|fee| fee.base_fee_per_blob_gas), + ); Ok(Self { - base_fee_statistics: GasStatistics::new( - config.max_base_fee_samples, - current_block, - &base_fee_history, - ), - blob_base_fee_statistics: GasStatistics::new( - config.num_samples_for_blob_base_fee_estimate, - current_block, - &last_block_blob_base_fee, - ), + base_fee_statistics, + blob_base_fee_statistics, config, pubdata_sending_mode, eth_client, @@ -95,25 +93,29 @@ impl GasAdjuster { let last_processed_block = self.base_fee_statistics.last_processed_block(); if current_block > last_processed_block { - let (base_fee_history, blob_base_fee_history) = Self::get_base_fees_history( - self.eth_client.as_ref(), - (last_processed_block + 1)..=current_block, - ) - .await?; + let n_blocks = current_block - last_processed_block; + let base_fees = self + .eth_client + .base_fee_history(current_block, n_blocks) + .await?; // We shouldn't rely on L1 provider to return consistent results, so we check that we have at least one new sample. - if let Some(current_base_fee_per_gas) = base_fee_history.last() { + if let Some(current_base_fee_per_gas) = base_fees.last().map(|fee| fee.base_fee_per_gas) + { METRICS .current_base_fee_per_gas - .set(*current_base_fee_per_gas); + .set(current_base_fee_per_gas); } - self.base_fee_statistics.add_samples(&base_fee_history); + self.base_fee_statistics + .add_samples(base_fees.iter().map(|fee| fee.base_fee_per_gas)); - if let Some(current_blob_base_fee) = blob_base_fee_history.last() { + if let Some(current_blob_base_fee) = + base_fees.last().map(|fee| fee.base_fee_per_blob_gas) + { // Blob base fee overflows `u64` only in very extreme cases. // It doesn't worth to observe exact value with metric because anyway values that can be used // are capped by `self.config.max_blob_base_fee()` of `u64` type. - if current_blob_base_fee > &U256::from(u64::MAX) { + if current_blob_base_fee > U256::from(u64::MAX) { tracing::error!("Failed to report current_blob_base_fee = {current_blob_base_fee}, it exceeds u64::MAX"); } else { METRICS @@ -122,7 +124,7 @@ impl GasAdjuster { } } self.blob_base_fee_statistics - .add_samples(&blob_base_fee_history); + .add_samples(base_fees.iter().map(|fee| fee.base_fee_per_blob_gas)); } Ok(()) } @@ -200,6 +202,11 @@ impl GasAdjuster { PubdataSendingMode::Calldata => { self.estimate_effective_gas_price() * self.pubdata_byte_gas() } + PubdataSendingMode::Custom => { + // Fix this when we have a better understanding of dynamic pricing for custom DA layers. + // GitHub issue: https://github.com/matter-labs/zksync-era/issues/2105 + 0 + } } } @@ -223,62 +230,6 @@ impl GasAdjuster { } } } - - /// Returns vector of base fees and blob base fees for given block range. - /// Note, that data for pre-dencun blocks won't be included in the vector returned. - async fn get_base_fees_history( - eth_client: &DynClient, - block_range: RangeInclusive, - ) -> anyhow::Result<(Vec, Vec)> { - let mut base_fee_history = Vec::new(); - let mut blob_base_fee_history = Vec::new(); - for block_number in block_range { - let header = eth_client.block(U64::from(block_number).into()).await?; - if let Some(base_fee_per_gas) = - header.as_ref().and_then(|header| header.base_fee_per_gas) - { - base_fee_history.push(base_fee_per_gas.as_u64()) - } - - if let Some(excess_blob_gas) = header.as_ref().and_then(|header| header.excess_blob_gas) - { - blob_base_fee_history.push(Self::blob_base_fee(excess_blob_gas.as_u64())) - } - } - - Ok((base_fee_history, blob_base_fee_history)) - } - - /// Calculates `blob_base_fee` given `excess_blob_gas`. - fn blob_base_fee(excess_blob_gas: u64) -> U256 { - // Constants and formula are taken from EIP4844 specification. - const MIN_BLOB_BASE_FEE: u32 = 1; - const BLOB_BASE_FEE_UPDATE_FRACTION: u32 = 3338477; - - Self::fake_exponential( - MIN_BLOB_BASE_FEE.into(), - excess_blob_gas.into(), - BLOB_BASE_FEE_UPDATE_FRACTION.into(), - ) - } - - /// approximates `factor * e ** (numerator / denominator)` using Taylor expansion. - fn fake_exponential(factor: U256, numerator: U256, denominator: U256) -> U256 { - let mut i = 1_u32; - let mut output = U256::zero(); - let mut accum = factor * denominator; - while !accum.is_zero() { - output += accum; - - accum *= numerator; - accum /= denominator; - accum /= U256::from(i); - - i += 1; - } - - output / denominator - } } impl L1TxParamsProvider for GasAdjuster { @@ -334,6 +285,22 @@ impl L1TxParamsProvider for GasAdjuster { fn get_priority_fee(&self) -> u64 { self.config.default_priority_fee_per_gas } + + // The idea is that when we finally decide to send blob tx, we want to offer gas fees high + // enough to "almost be certain" that the transaction gets included. To never have to double + // the gas prices as then we have very little control how much we pay in the end. This strategy + // works as no matter if we double or triple such price, we pay the same block base fees. + fn get_blob_tx_base_fee(&self) -> u64 { + self.base_fee_statistics.last_added_value() * 2 + } + + fn get_blob_tx_blob_base_fee(&self) -> u64 { + self.blob_base_fee_statistics.last_added_value().as_u64() * 2 + } + + fn get_blob_tx_priority_fee(&self) -> u64 { + self.get_priority_fee() * 2 + } } /// Helper structure responsible for collecting the data about recent transactions, @@ -347,7 +314,7 @@ pub(super) struct GasStatisticsInner { } impl GasStatisticsInner { - fn new(max_samples: usize, block: usize, fee_history: &[T]) -> Self { + fn new(max_samples: usize, block: usize, fee_history: impl IntoIterator) -> Self { let mut statistics = Self { max_samples, samples: VecDeque::with_capacity(max_samples), @@ -371,9 +338,11 @@ impl GasStatisticsInner { self.samples.back().copied().unwrap_or(self.median_cached) } - fn add_samples(&mut self, fees: &[T]) { + fn add_samples(&mut self, fees: impl IntoIterator) { + let old_len = self.samples.len(); self.samples.extend(fees); - self.last_processed_block += fees.len(); + let processed_blocks = self.samples.len() - old_len; + self.last_processed_block += processed_blocks; let extra = self.samples.len().saturating_sub(self.max_samples); self.samples.drain(..extra); @@ -391,7 +360,7 @@ impl GasStatisticsInner { pub(super) struct GasStatistics(RwLock>); impl GasStatistics { - pub fn new(max_samples: usize, block: usize, fee_history: &[T]) -> Self { + pub fn new(max_samples: usize, block: usize, fee_history: impl IntoIterator) -> Self { Self(RwLock::new(GasStatisticsInner::new( max_samples, block, @@ -407,7 +376,7 @@ impl GasStatistics { self.0.read().unwrap().last_added_value() } - pub fn add_samples(&self, fees: &[T]) { + pub fn add_samples(&self, fees: impl IntoIterator) { self.0.write().unwrap().add_samples(fees) } diff --git a/core/node/fee_model/src/l1_gas_price/gas_adjuster/tests.rs b/core/node/fee_model/src/l1_gas_price/gas_adjuster/tests.rs index 594efc6915e2..200903b6deda 100644 --- a/core/node/fee_model/src/l1_gas_price/gas_adjuster/tests.rs +++ b/core/node/fee_model/src/l1_gas_price/gas_adjuster/tests.rs @@ -1,29 +1,29 @@ -use std::collections::VecDeque; +use std::{collections::VecDeque, sync::RwLockReadGuard}; use test_casing::test_casing; use zksync_config::{configs::eth_sender::PubdataSendingMode, GasAdjusterConfig}; -use zksync_eth_client::clients::MockEthereum; +use zksync_eth_client::{clients::MockEthereum, BaseFees}; use zksync_types::commitment::L1BatchCommitmentMode; -use super::{GasAdjuster, GasStatisticsInner}; +use super::{GasAdjuster, GasStatistics, GasStatisticsInner}; /// Check that we compute the median correctly #[test] fn median() { // sorted: 4 4 6 7 8 - assert_eq!(GasStatisticsInner::new(5, 5, &[6, 4, 7, 8, 4]).median(), 6); + assert_eq!(GasStatisticsInner::new(5, 5, [6, 4, 7, 8, 4]).median(), 6); // sorted: 4 4 8 10 - assert_eq!(GasStatisticsInner::new(4, 4, &[8, 4, 4, 10]).median(), 8); + assert_eq!(GasStatisticsInner::new(4, 4, [8, 4, 4, 10]).median(), 8); } /// Check that we properly manage the block base fee queue #[test] fn samples_queue() { - let mut stats = GasStatisticsInner::new(5, 5, &[6, 4, 7, 8, 4, 5]); + let mut stats = GasStatisticsInner::new(5, 5, [6, 4, 7, 8, 4, 5]); assert_eq!(stats.samples, VecDeque::from([4, 7, 8, 4, 5])); - stats.add_samples(&[18, 18, 18]); + stats.add_samples([18, 18, 18]); assert_eq!(stats.samples, VecDeque::from([4, 5, 18, 18, 18])); } @@ -32,38 +32,54 @@ fn samples_queue() { #[test_casing(2, [L1BatchCommitmentMode::Rollup, L1BatchCommitmentMode::Validium])] #[tokio::test] async fn kept_updated(commitment_mode: L1BatchCommitmentMode) { - let eth_client = MockEthereum::builder() - .with_fee_history(vec![0, 4, 6, 8, 7, 5, 5, 8, 10, 9]) - .with_excess_blob_gas_history(vec![ - 393216, - 393216 * 2, - 393216, - 393216 * 2, - 393216, - 393216 * 2, - 393216 * 3, - 393216 * 4, - ]) - .build(); + // Helper function to read a value from adjuster + fn read(statistics: &GasStatistics) -> RwLockReadGuard> { + statistics.0.read().unwrap() + } + + let block_fees = vec![0, 4, 6, 8, 7, 5, 5, 8, 10, 9]; + let blob_fees = vec![ + 0, + 393216, + 393216, + 393216 * 2, + 393216, + 393216 * 2, + 393216 * 2, + 393216 * 3, + 393216 * 4, + 393216, + ]; + let base_fees = block_fees + .into_iter() + .zip(blob_fees) + .map(|(block, blob)| BaseFees { + base_fee_per_gas: block, + base_fee_per_blob_gas: blob.into(), + }) + .collect(); + + let eth_client = MockEthereum::builder().with_fee_history(base_fees).build(); // 5 sampled blocks + additional block to account for latest block subtraction eth_client.advance_block_number(6); + let config = GasAdjusterConfig { + default_priority_fee_per_gas: 5, + max_base_fee_samples: 5, + pricing_formula_parameter_a: 1.5, + pricing_formula_parameter_b: 1.0005, + internal_l1_pricing_multiplier: 0.8, + internal_enforced_l1_gas_price: None, + internal_enforced_pubdata_price: None, + poll_period: 5, + max_l1_gas_price: None, + num_samples_for_blob_base_fee_estimate: 3, + internal_pubdata_pricing_multiplier: 1.0, + max_blob_base_fee: None, + }; let adjuster = GasAdjuster::new( Box::new(eth_client.clone().into_client()), - GasAdjusterConfig { - default_priority_fee_per_gas: 5, - max_base_fee_samples: 5, - pricing_formula_parameter_a: 1.5, - pricing_formula_parameter_b: 1.0005, - internal_l1_pricing_multiplier: 0.8, - internal_enforced_l1_gas_price: None, - internal_enforced_pubdata_price: None, - poll_period: 5, - max_l1_gas_price: None, - num_samples_for_blob_base_fee_estimate: 3, - internal_pubdata_pricing_multiplier: 1.0, - max_blob_base_fee: None, - }, + config, PubdataSendingMode::Calldata, commitment_mode, ) @@ -71,58 +87,35 @@ async fn kept_updated(commitment_mode: L1BatchCommitmentMode) { .unwrap(); assert_eq!( - adjuster.base_fee_statistics.0.read().unwrap().samples.len(), - 5 + read(&adjuster.base_fee_statistics).samples.len(), + config.max_base_fee_samples ); - assert_eq!(adjuster.base_fee_statistics.0.read().unwrap().median(), 6); + assert_eq!(read(&adjuster.base_fee_statistics).median(), 6); - let expected_median_blob_base_fee = GasAdjuster::blob_base_fee(393216); + eprintln!("{:?}", read(&adjuster.blob_base_fee_statistics).samples); + let expected_median_blob_base_fee = 393216 * 2; assert_eq!( - adjuster - .blob_base_fee_statistics - .0 - .read() - .unwrap() - .samples - .len(), - 1 + read(&adjuster.blob_base_fee_statistics).samples.len(), + config.num_samples_for_blob_base_fee_estimate ); assert_eq!( - adjuster.blob_base_fee_statistics.0.read().unwrap().median(), - expected_median_blob_base_fee + read(&adjuster.blob_base_fee_statistics).median(), + expected_median_blob_base_fee.into() ); eth_client.advance_block_number(3); adjuster.keep_updated().await.unwrap(); assert_eq!( - adjuster.base_fee_statistics.0.read().unwrap().samples.len(), - 5 + read(&adjuster.base_fee_statistics).samples.len(), + config.max_base_fee_samples ); - assert_eq!(adjuster.base_fee_statistics.0.read().unwrap().median(), 7); + assert_eq!(read(&adjuster.base_fee_statistics).median(), 7); - let expected_median_blob_base_fee = GasAdjuster::blob_base_fee(393216 * 3); + let expected_median_blob_base_fee = 393216 * 3; + assert_eq!(read(&adjuster.blob_base_fee_statistics).samples.len(), 3); assert_eq!( - adjuster - .blob_base_fee_statistics - .0 - .read() - .unwrap() - .samples - .len(), - 3 + read(&adjuster.blob_base_fee_statistics).median(), + expected_median_blob_base_fee.into() ); - assert_eq!( - adjuster.blob_base_fee_statistics.0.read().unwrap().median(), - expected_median_blob_base_fee - ); -} - -#[test] -fn blob_base_fee_formula() { - const EXCESS_BLOB_GAS: u64 = 0x4b80000; - const EXPECTED_BLOB_BASE_FEE: u64 = 19893400088; - - let blob_base_fee = GasAdjuster::blob_base_fee(EXCESS_BLOB_GAS); - assert_eq!(blob_base_fee.as_u64(), EXPECTED_BLOB_BASE_FEE); } diff --git a/core/node/fee_model/src/l1_gas_price/mod.rs b/core/node/fee_model/src/l1_gas_price/mod.rs index 219dc2f9c38d..0dab2d921c40 100644 --- a/core/node/fee_model/src/l1_gas_price/mod.rs +++ b/core/node/fee_model/src/l1_gas_price/mod.rs @@ -27,4 +27,13 @@ pub trait L1TxParamsProvider: fmt::Debug + 'static + Send + Sync { /// Returns a lower bound for the `base_fee` value for the next L1 block. fn get_next_block_minimal_base_fee(&self) -> u64; + + /// Returns the recommended `max_fee_per_gas` value (EIP1559) for blob transaction. + fn get_blob_tx_base_fee(&self) -> u64; + + /// Returns the recommended `max_blob_fee_per_gas` value (EIP4844) for blob transaction. + fn get_blob_tx_blob_base_fee(&self) -> u64; + + /// Returns the recommended `max_priority_fee_per_gas` value (EIP1559) for blob transaction. + fn get_blob_tx_priority_fee(&self) -> u64; } diff --git a/core/node/fee_model/src/lib.rs b/core/node/fee_model/src/lib.rs index 793b5d4f8441..00d804de6c81 100644 --- a/core/node/fee_model/src/lib.rs +++ b/core/node/fee_model/src/lib.rs @@ -1,6 +1,8 @@ use std::{fmt, sync::Arc}; use anyhow::Context as _; +use async_trait::async_trait; +use zksync_base_token_adjuster::BaseTokenRatioProvider; use zksync_dal::{ConnectionPool, Core, CoreDal}; use zksync_types::{ fee_model::{ @@ -16,7 +18,7 @@ use crate::l1_gas_price::GasAdjuster; pub mod l1_gas_price; /// Trait responsible for providing fee info for a batch -#[async_trait::async_trait] +#[async_trait] pub trait BatchFeeModelInputProvider: fmt::Debug + 'static + Send + Sync { /// Returns the batch fee with scaling applied. This may be used to account for the fact that the L1 gas and pubdata prices may fluctuate, esp. /// in API methods that should return values that are valid for some period of time after the estimation was done. @@ -42,7 +44,7 @@ pub trait BatchFeeModelInputProvider: fmt::Debug + 'static + Send + Sync { }) } - /// Returns the fee model parameters. + /// Returns the fee model parameters using the denomination of the base token used (WEI for ETH). fn get_fee_model_params(&self) -> FeeParams; } @@ -53,15 +55,17 @@ impl dyn BatchFeeModelInputProvider { } } -/// The struct that represents the batch fee input provider to be used in the main node of the server, i.e. -/// it explicitly gets the L1 gas price from the provider and uses it to calculate the batch fee input instead of getting -/// it from other node. +/// The struct that represents the batch fee input provider to be used in the main node of the server. +/// This struct gets the L1 gas price directly from the provider rather than from another node, as is the +/// case with the external node. #[derive(Debug)] pub struct MainNodeFeeInputProvider { provider: Arc, + base_token_ratio_provider: Arc, config: FeeModelConfig, } +#[async_trait] impl BatchFeeModelInputProvider for MainNodeFeeInputProvider { fn get_fee_model_params(&self) -> FeeParams { match self.config { @@ -69,18 +73,27 @@ impl BatchFeeModelInputProvider for MainNodeFeeInputProvider { config, l1_gas_price: self.provider.estimate_effective_gas_price(), }), - FeeModelConfig::V2(config) => FeeParams::V2(FeeParamsV2 { + FeeModelConfig::V2(config) => FeeParams::V2(FeeParamsV2::new( config, - l1_gas_price: self.provider.estimate_effective_gas_price(), - l1_pubdata_price: self.provider.estimate_effective_pubdata_price(), - }), + self.provider.estimate_effective_gas_price(), + self.provider.estimate_effective_pubdata_price(), + self.base_token_ratio_provider.get_conversion_ratio(), + )), } } } impl MainNodeFeeInputProvider { - pub fn new(provider: Arc, config: FeeModelConfig) -> Self { - Self { provider, config } + pub fn new( + provider: Arc, + base_token_ratio_provider: Arc, + config: FeeModelConfig, + ) -> Self { + Self { + provider, + base_token_ratio_provider, + config, + } } } @@ -104,7 +117,7 @@ impl ApiFeeInputProvider { } } -#[async_trait::async_trait] +#[async_trait] impl BatchFeeModelInputProvider for ApiFeeInputProvider { async fn get_batch_fee_input_scaled( &self, @@ -156,11 +169,9 @@ fn compute_batch_fee_model_input_v2( l1_gas_price_scale_factor: f64, l1_pubdata_price_scale_factor: f64, ) -> PubdataIndependentBatchFeeModelInput { - let FeeParamsV2 { - config, - l1_gas_price, - l1_pubdata_price, - } = params; + let config = params.config(); + let l1_gas_price = params.l1_gas_price(); + let l1_pubdata_price = params.l1_pubdata_price(); let FeeModelConfigV2 { minimal_l2_gas_price, @@ -227,6 +238,7 @@ impl Default for MockBatchFeeParamsProvider { } } +#[async_trait] impl BatchFeeModelInputProvider for MockBatchFeeParamsProvider { fn get_fee_model_params(&self) -> FeeParams { self.0 @@ -235,6 +247,13 @@ impl BatchFeeModelInputProvider for MockBatchFeeParamsProvider { #[cfg(test)] mod tests { + use std::num::NonZeroU64; + + use zksync_base_token_adjuster::NoOpRatioProvider; + use zksync_config::{configs::eth_sender::PubdataSendingMode, GasAdjusterConfig}; + use zksync_eth_client::{clients::MockEthereum, BaseFees}; + use zksync_types::{commitment::L1BatchCommitmentMode, fee_model::BaseTokenConversionRatio}; + use super::*; // To test that overflow never happens, we'll use giant L1 gas price, i.e. @@ -261,11 +280,12 @@ mod tests { max_pubdata_per_batch: 100_000, }; - let params = FeeParamsV2 { + let params = FeeParamsV2::new( config, - l1_gas_price: GIANT_L1_GAS_PRICE, - l1_pubdata_price: GIANT_L1_GAS_PRICE, - }; + GIANT_L1_GAS_PRICE, + GIANT_L1_GAS_PRICE, + BaseTokenConversionRatio::default(), + ); // We'll use scale factor of 3.0 let input = compute_batch_fee_model_input_v2(params, 3.0, 3.0); @@ -287,11 +307,12 @@ mod tests { max_pubdata_per_batch: 100_000, }; - let params = FeeParamsV2 { + let params = FeeParamsV2::new( config, - l1_gas_price: SMALL_L1_GAS_PRICE, - l1_pubdata_price: SMALL_L1_GAS_PRICE, - }; + SMALL_L1_GAS_PRICE, + SMALL_L1_GAS_PRICE, + BaseTokenConversionRatio::default(), + ); let input = compute_batch_fee_model_input_v2(params, 1.0, 1.0); @@ -312,11 +333,12 @@ mod tests { max_pubdata_per_batch: 100_000, }; - let params = FeeParamsV2 { + let params = FeeParamsV2::new( config, - l1_gas_price: GIANT_L1_GAS_PRICE, - l1_pubdata_price: GIANT_L1_GAS_PRICE, - }; + GIANT_L1_GAS_PRICE, + GIANT_L1_GAS_PRICE, + BaseTokenConversionRatio::default(), + ); let input = compute_batch_fee_model_input_v2(params, 1.0, 1.0); assert_eq!(input.l1_gas_price, GIANT_L1_GAS_PRICE); @@ -327,7 +349,7 @@ mod tests { } #[test] - fn test_compute_batch_fee_model_input_v2_only_compute_overhead() { + fn test_compute_baxtch_fee_model_input_v2_only_compute_overhead() { // Here we use sensible config, but when only compute is used to close the batch let config = FeeModelConfigV2 { minimal_l2_gas_price: 100_000_000_000, @@ -338,11 +360,12 @@ mod tests { max_pubdata_per_batch: 100_000, }; - let params = FeeParamsV2 { + let params = FeeParamsV2::new( config, - l1_gas_price: GIANT_L1_GAS_PRICE, - l1_pubdata_price: GIANT_L1_GAS_PRICE, - }; + GIANT_L1_GAS_PRICE, + GIANT_L1_GAS_PRICE, + BaseTokenConversionRatio::default(), + ); let input = compute_batch_fee_model_input_v2(params, 1.0, 1.0); assert_eq!(input.l1_gas_price, GIANT_L1_GAS_PRICE); @@ -364,19 +387,22 @@ mod tests { max_pubdata_per_batch: 100_000, }; - let base_params = FeeParamsV2 { - config: base_config, - l1_gas_price: 1_000_000_000, - l1_pubdata_price: 1_000_000_000, - }; + let base_params = FeeParamsV2::new( + base_config, + 1_000_000_000, + 1_000_000_000, + BaseTokenConversionRatio::default(), + ); let base_input = compute_batch_fee_model_input_v2(base_params, 1.0, 1.0); let base_input_larger_l1_gas_price = compute_batch_fee_model_input_v2( - FeeParamsV2 { - l1_gas_price: base_params.l1_gas_price * 2, - ..base_params - }, + FeeParamsV2::new( + base_config, + 2_000_000_000, // double the L1 gas price + 1_000_000_000, + BaseTokenConversionRatio::default(), + ), 1.0, 1.0, ); @@ -396,10 +422,12 @@ mod tests { ); let base_input_larger_pubdata_price = compute_batch_fee_model_input_v2( - FeeParamsV2 { - l1_pubdata_price: base_params.l1_pubdata_price * 2, - ..base_params - }, + FeeParamsV2::new( + base_config, + 1_000_000_000, + 2_000_000_000, // double the L1 pubdata price + BaseTokenConversionRatio::default(), + ), 1.0, 1.0, ); @@ -419,13 +447,15 @@ mod tests { ); let base_input_larger_max_gas = compute_batch_fee_model_input_v2( - FeeParamsV2 { - config: FeeModelConfigV2 { + FeeParamsV2::new( + FeeModelConfigV2 { max_gas_per_batch: base_config.max_gas_per_batch * 2, ..base_config }, - ..base_params - }, + base_params.l1_gas_price(), + base_params.l1_pubdata_price(), + BaseTokenConversionRatio::default(), + ), 1.0, 1.0, ); @@ -439,13 +469,15 @@ mod tests { ); let base_input_larger_max_pubdata = compute_batch_fee_model_input_v2( - FeeParamsV2 { - config: FeeModelConfigV2 { + FeeParamsV2::new( + FeeModelConfigV2 { max_pubdata_per_batch: base_config.max_pubdata_per_batch * 2, ..base_config }, - ..base_params - }, + base_params.l1_gas_price(), + base_params.l1_pubdata_price(), + BaseTokenConversionRatio::default(), + ), 1.0, 1.0, ); @@ -458,4 +490,194 @@ mod tests { "Max pubdata increase lowers pubdata price" ); } + + #[tokio::test] + async fn test_get_fee_model_params() { + struct TestCase { + name: &'static str, + conversion_ratio: BaseTokenConversionRatio, + input_minimal_l2_gas_price: u64, // Wei denomination + input_l1_gas_price: u64, // Wei + input_l1_pubdata_price: u64, // Wei + expected_minimal_l2_gas_price: u64, // BaseToken denomination + expected_l1_gas_price: u64, // BaseToken + expected_l1_pubdata_price: u64, // BaseToken + } + let test_cases = vec![ + TestCase { + name: "1 ETH = 2 BaseToken", + conversion_ratio: BaseTokenConversionRatio { + numerator: NonZeroU64::new(2).unwrap(), + denominator: NonZeroU64::new(1).unwrap(), + }, + input_minimal_l2_gas_price: 1000, + input_l1_gas_price: 2000, + input_l1_pubdata_price: 3000, + expected_minimal_l2_gas_price: 2000, + expected_l1_gas_price: 4000, + expected_l1_pubdata_price: 6000, + }, + TestCase { + name: "1 ETH = 0.5 BaseToken", + conversion_ratio: BaseTokenConversionRatio { + numerator: NonZeroU64::new(1).unwrap(), + denominator: NonZeroU64::new(2).unwrap(), + }, + input_minimal_l2_gas_price: 1000, + input_l1_gas_price: 2000, + input_l1_pubdata_price: 3000, + expected_minimal_l2_gas_price: 500, + expected_l1_gas_price: 1000, + expected_l1_pubdata_price: 1500, + }, + TestCase { + name: "1 ETH = 1 BaseToken", + conversion_ratio: BaseTokenConversionRatio { + numerator: NonZeroU64::new(1).unwrap(), + denominator: NonZeroU64::new(1).unwrap(), + }, + input_minimal_l2_gas_price: 1000, + input_l1_gas_price: 2000, + input_l1_pubdata_price: 3000, + expected_minimal_l2_gas_price: 1000, + expected_l1_gas_price: 2000, + expected_l1_pubdata_price: 3000, + }, + TestCase { + name: "Large conversion - 1 ETH = 1_000 BaseToken", + conversion_ratio: BaseTokenConversionRatio { + numerator: NonZeroU64::new(1_000_000).unwrap(), + denominator: NonZeroU64::new(1).unwrap(), + }, + input_minimal_l2_gas_price: 1_000_000, + input_l1_gas_price: 2_000_000, + input_l1_pubdata_price: 3_000_000, + expected_minimal_l2_gas_price: 1_000_000_000_000, + expected_l1_gas_price: 2_000_000_000_000, + expected_l1_pubdata_price: 3_000_000_000_000, + }, + TestCase { + name: "Small conversion - 1 ETH = 0.001 BaseToken", + conversion_ratio: BaseTokenConversionRatio { + numerator: NonZeroU64::new(1).unwrap(), + denominator: NonZeroU64::new(1_000).unwrap(), + }, + input_minimal_l2_gas_price: 1_000_000, + input_l1_gas_price: 2_000_000, + input_l1_pubdata_price: 3_000_000, + expected_minimal_l2_gas_price: 1_000, + expected_l1_gas_price: 2_000, + expected_l1_pubdata_price: 3_000, + }, + TestCase { + name: "Fractional conversion ratio 123456789", + conversion_ratio: BaseTokenConversionRatio { + numerator: NonZeroU64::new(1123456789).unwrap(), + denominator: NonZeroU64::new(1_000_000_000).unwrap(), + }, + input_minimal_l2_gas_price: 1_000_000, + input_l1_gas_price: 2_000_000, + input_l1_pubdata_price: 3_000_000, + expected_minimal_l2_gas_price: 1123456, + expected_l1_gas_price: 2246913, + expected_l1_pubdata_price: 3370370, + }, + TestCase { + name: "Conversion ratio too large so clamp down to u64::MAX", + conversion_ratio: BaseTokenConversionRatio { + numerator: NonZeroU64::new(u64::MAX).unwrap(), + denominator: NonZeroU64::new(1).unwrap(), + }, + input_minimal_l2_gas_price: 2, + input_l1_gas_price: 2, + input_l1_pubdata_price: 2, + expected_minimal_l2_gas_price: u64::MAX, + expected_l1_gas_price: u64::MAX, + expected_l1_pubdata_price: u64::MAX, + }, + ]; + + for case in test_cases { + let gas_adjuster = + setup_gas_adjuster(case.input_l1_gas_price, case.input_l1_pubdata_price).await; + + let base_token_ratio_provider = NoOpRatioProvider::new(case.conversion_ratio); + + let config = FeeModelConfig::V2(FeeModelConfigV2 { + minimal_l2_gas_price: case.input_minimal_l2_gas_price, + compute_overhead_part: 1.0, + pubdata_overhead_part: 1.0, + batch_overhead_l1_gas: 1, + max_gas_per_batch: 1, + max_pubdata_per_batch: 1, + }); + + let fee_provider = MainNodeFeeInputProvider::new( + Arc::new(gas_adjuster), + Arc::new(base_token_ratio_provider), + config, + ); + + let fee_params = fee_provider.get_fee_model_params(); + + if let FeeParams::V2(params) = fee_params { + assert_eq!( + params.l1_gas_price(), + case.expected_l1_gas_price, + "Test case '{}' failed: l1_gas_price mismatch", + case.name + ); + assert_eq!( + params.l1_pubdata_price(), + case.expected_l1_pubdata_price, + "Test case '{}' failed: l1_pubdata_price mismatch", + case.name + ); + assert_eq!( + params.config().minimal_l2_gas_price, + case.expected_minimal_l2_gas_price, + "Test case '{}' failed: minimal_l2_gas_price mismatch", + case.name + ); + } else { + panic!("Expected FeeParams::V2 for test case '{}'", case.name); + } + } + } + + // Helper function to create BaseFees. + fn base_fees(block: u64, blob: U256) -> BaseFees { + BaseFees { + base_fee_per_gas: block, + base_fee_per_blob_gas: blob, + } + } + + // Helper function to setup the GasAdjuster. + async fn setup_gas_adjuster(l1_gas_price: u64, l1_pubdata_price: u64) -> GasAdjuster { + let mock = MockEthereum::builder() + .with_fee_history(vec![ + base_fees(0, U256::from(4)), + base_fees(1, U256::from(3)), + ]) + .build(); + mock.advance_block_number(2); // Ensure we have enough blocks for the fee history + + let gas_adjuster_config = GasAdjusterConfig { + internal_enforced_l1_gas_price: Some(l1_gas_price), + internal_enforced_pubdata_price: Some(l1_pubdata_price), + max_base_fee_samples: 1, // Ensure this is less than the number of blocks + num_samples_for_blob_base_fee_estimate: 2, + ..Default::default() + }; + + GasAdjuster::new( + Box::new(mock.into_client()), + gas_adjuster_config, + PubdataSendingMode::Blobs, + L1BatchCommitmentMode::Rollup, + ) + .await + .expect("Failed to create GasAdjuster") + } } diff --git a/core/node/genesis/Cargo.toml b/core/node/genesis/Cargo.toml index 1f274cab877c..71c4c45e9e38 100644 --- a/core/node/genesis/Cargo.toml +++ b/core/node/genesis/Cargo.toml @@ -1,5 +1,6 @@ [package] name = "zksync_node_genesis" +description = "ZKsync node genesis tools" version.workspace = true edition.workspace = true authors.workspace = true @@ -10,7 +11,7 @@ keywords.workspace = true categories.workspace = true [dependencies] -multivm.workspace = true +zksync_multivm.workspace = true vise.workspace = true zksync_types.workspace = true zksync_dal.workspace = true diff --git a/core/node/genesis/src/lib.rs b/core/node/genesis/src/lib.rs index 461f208e3012..a04153a63fc6 100644 --- a/core/node/genesis/src/lib.rs +++ b/core/node/genesis/src/lib.rs @@ -5,12 +5,12 @@ use std::fmt::Formatter; use anyhow::Context as _; -use multivm::utils::get_max_gas_per_pubdata_byte; -use zksync_config::{configs::DatabaseSecrets, GenesisConfig}; +use zksync_config::GenesisConfig; use zksync_contracts::{BaseSystemContracts, BaseSystemContractsHashes, SET_CHAIN_ID_EVENT}; -use zksync_dal::{Connection, ConnectionPool, Core, CoreDal, DalError}; +use zksync_dal::{Connection, Core, CoreDal, DalError}; use zksync_eth_client::EthInterface; use zksync_merkle_tree::{domain::ZkSyncTree, TreeInstruction}; +use zksync_multivm::utils::get_max_gas_per_pubdata_byte; use zksync_system_constants::PRIORITY_EXPIRATION; use zksync_types::{ block::{BlockGasCount, DeployedContract, L1BatchHeader, L2BlockHasher, L2BlockHeader}, @@ -151,6 +151,7 @@ impl GenesisParams { } } +#[derive(Debug)] pub struct GenesisBatchParams { pub root_hash: H256, pub commitment: H256, @@ -220,12 +221,13 @@ pub async fn insert_genesis_batch( .into_iter() .partition(|log_query| log_query.rw_flag); - let storage_logs: Vec> = deduplicated_writes + let storage_logs: Vec = deduplicated_writes .iter() .enumerate() .map(|(index, log)| { TreeInstruction::write( - StorageKey::new(AccountTreeId::new(log.address), u256_to_h256(log.key)), + StorageKey::new(AccountTreeId::new(log.address), u256_to_h256(log.key)) + .hashed_key_u256(), (index + 1) as u64, u256_to_h256(log.written_value), ) @@ -269,6 +271,10 @@ pub async fn insert_genesis_batch( }) } +pub async fn is_genesis_needed(storage: &mut Connection<'_, Core>) -> Result { + Ok(storage.blocks_dal().is_genesis_needed().await?) +} + pub async fn ensure_genesis_state( storage: &mut Connection<'_, Core>, genesis_params: &GenesisParams, @@ -410,15 +416,11 @@ pub async fn create_genesis_l1_batch( // Save chain id transaction into the database // We keep returning anyhow and will refactor it later pub async fn save_set_chain_id_tx( + storage: &mut Connection<'_, Core>, query_client: &dyn EthInterface, diamond_proxy_address: Address, state_transition_manager_address: Address, - database_secrets: &DatabaseSecrets, ) -> anyhow::Result<()> { - let db_url = database_secrets.master_url()?; - let pool = ConnectionPool::::singleton(db_url).build().await?; - let mut storage = pool.connection().await?; - let to = query_client.block_number().await?.as_u64(); let from = to.saturating_sub(PRIORITY_EXPIRATION); let filter = FilterBuilder::default() diff --git a/core/node/genesis/src/utils.rs b/core/node/genesis/src/utils.rs index 7fdbe05da368..a6c9513dbde8 100644 --- a/core/node/genesis/src/utils.rs +++ b/core/node/genesis/src/utils.rs @@ -1,12 +1,12 @@ use std::collections::HashMap; use itertools::Itertools; -use multivm::{ +use zksync_contracts::BaseSystemContracts; +use zksync_dal::{Connection, Core, CoreDal}; +use zksync_multivm::{ circuit_sequencer_api_latest::sort_storage_access::sort_storage_access_queries, zk_evm_latest::aux_structures::{LogQuery as MultiVmLogQuery, Timestamp as MultiVMTimestamp}, }; -use zksync_contracts::BaseSystemContracts; -use zksync_dal::{Connection, Core, CoreDal}; use zksync_system_constants::{DEFAULT_ERA_CHAIN_ID, ETHEREUM_ADDRESS}; use zksync_types::{ block::{DeployedContract, L1BatchTreeData}, @@ -199,7 +199,9 @@ pub(super) async fn insert_system_contracts( let written_storage_keys: Vec<_> = deduplicated_writes .iter() - .map(|log| StorageKey::new(AccountTreeId::new(log.address), u256_to_h256(log.key))) + .map(|log| { + StorageKey::new(AccountTreeId::new(log.address), u256_to_h256(log.key)).hashed_key() + }) .collect(); transaction .storage_logs_dedup_dal() diff --git a/core/node/house_keeper/Cargo.toml b/core/node/house_keeper/Cargo.toml index 62b3605c3857..ed86a713ea25 100644 --- a/core/node/house_keeper/Cargo.toml +++ b/core/node/house_keeper/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "zksync_house_keeper" -version = "0.1.0" +description = "ZKsync house keeper" +version.workspace = true edition.workspace = true authors.workspace = true homepage.workspace = true @@ -13,7 +14,7 @@ categories.workspace = true vise.workspace = true zksync_dal.workspace = true zksync_shared_metrics.workspace = true -prover_dal.workspace = true +zksync_prover_dal.workspace = true zksync_types.workspace = true zksync_config.workspace = true diff --git a/core/node/house_keeper/src/prover/archiver/fri_gpu_prover_archiver.rs b/core/node/house_keeper/src/prover/archiver/fri_gpu_prover_archiver.rs index 2af66a937b33..5db53710733c 100644 --- a/core/node/house_keeper/src/prover/archiver/fri_gpu_prover_archiver.rs +++ b/core/node/house_keeper/src/prover/archiver/fri_gpu_prover_archiver.rs @@ -1,5 +1,5 @@ -use prover_dal::{Prover, ProverDal}; use zksync_dal::ConnectionPool; +use zksync_prover_dal::{Prover, ProverDal}; use crate::{periodic_job::PeriodicJob, prover::metrics::HOUSE_KEEPER_METRICS}; diff --git a/core/node/house_keeper/src/prover/archiver/fri_prover_jobs_archiver.rs b/core/node/house_keeper/src/prover/archiver/fri_prover_jobs_archiver.rs index 8e3134c078f2..02268c60e5f5 100644 --- a/core/node/house_keeper/src/prover/archiver/fri_prover_jobs_archiver.rs +++ b/core/node/house_keeper/src/prover/archiver/fri_prover_jobs_archiver.rs @@ -1,5 +1,5 @@ -use prover_dal::{Prover, ProverDal}; use zksync_dal::ConnectionPool; +use zksync_prover_dal::{Prover, ProverDal}; use crate::{periodic_job::PeriodicJob, prover::metrics::HOUSE_KEEPER_METRICS}; diff --git a/core/node/house_keeper/src/prover/queue_reporter/fri_proof_compressor_queue_reporter.rs b/core/node/house_keeper/src/prover/queue_reporter/fri_proof_compressor_queue_reporter.rs index 886a4c116b89..c554bf4616d3 100644 --- a/core/node/house_keeper/src/prover/queue_reporter/fri_proof_compressor_queue_reporter.rs +++ b/core/node/house_keeper/src/prover/queue_reporter/fri_proof_compressor_queue_reporter.rs @@ -1,8 +1,8 @@ use std::collections::HashMap; use async_trait::async_trait; -use prover_dal::{Prover, ProverDal}; use zksync_dal::ConnectionPool; +use zksync_prover_dal::{Prover, ProverDal}; use zksync_types::{protocol_version::ProtocolSemanticVersion, prover_dal::JobCountStatistics}; use crate::{ diff --git a/core/node/house_keeper/src/prover/queue_reporter/fri_prover_queue_reporter.rs b/core/node/house_keeper/src/prover/queue_reporter/fri_prover_queue_reporter.rs index 04d823252aff..f429367c44a1 100644 --- a/core/node/house_keeper/src/prover/queue_reporter/fri_prover_queue_reporter.rs +++ b/core/node/house_keeper/src/prover/queue_reporter/fri_prover_queue_reporter.rs @@ -1,7 +1,7 @@ use async_trait::async_trait; -use prover_dal::{Prover, ProverDal}; use zksync_config::configs::fri_prover_group::FriProverGroupConfig; use zksync_dal::{ConnectionPool, Core, CoreDal}; +use zksync_prover_dal::{Prover, ProverDal}; use crate::{periodic_job::PeriodicJob, prover::metrics::FRI_PROVER_METRICS}; diff --git a/core/node/house_keeper/src/prover/queue_reporter/fri_witness_generator_queue_reporter.rs b/core/node/house_keeper/src/prover/queue_reporter/fri_witness_generator_queue_reporter.rs index 487b28491c43..cd124dffaf67 100644 --- a/core/node/house_keeper/src/prover/queue_reporter/fri_witness_generator_queue_reporter.rs +++ b/core/node/house_keeper/src/prover/queue_reporter/fri_witness_generator_queue_reporter.rs @@ -1,8 +1,8 @@ use std::collections::HashMap; use async_trait::async_trait; -use prover_dal::{Prover, ProverDal}; use zksync_dal::ConnectionPool; +use zksync_prover_dal::{Prover, ProverDal}; use zksync_types::{ basic_fri_types::AggregationRound, protocol_version::ProtocolSemanticVersion, prover_dal::JobCountStatistics, diff --git a/core/node/house_keeper/src/prover/retry_manager/fri_proof_compressor_job_retry_manager.rs b/core/node/house_keeper/src/prover/retry_manager/fri_proof_compressor_job_retry_manager.rs index 4a27993249f0..4d4d8ceed75e 100644 --- a/core/node/house_keeper/src/prover/retry_manager/fri_proof_compressor_job_retry_manager.rs +++ b/core/node/house_keeper/src/prover/retry_manager/fri_proof_compressor_job_retry_manager.rs @@ -1,8 +1,8 @@ use std::time::Duration; use async_trait::async_trait; -use prover_dal::{Prover, ProverDal}; use zksync_dal::ConnectionPool; +use zksync_prover_dal::{Prover, ProverDal}; use crate::{periodic_job::PeriodicJob, prover::metrics::PROVER_FRI_METRICS}; diff --git a/core/node/house_keeper/src/prover/retry_manager/fri_prover_job_retry_manager.rs b/core/node/house_keeper/src/prover/retry_manager/fri_prover_job_retry_manager.rs index f059703a13c5..755944d21634 100644 --- a/core/node/house_keeper/src/prover/retry_manager/fri_prover_job_retry_manager.rs +++ b/core/node/house_keeper/src/prover/retry_manager/fri_prover_job_retry_manager.rs @@ -1,8 +1,8 @@ use std::time::Duration; use async_trait::async_trait; -use prover_dal::{Prover, ProverDal}; use zksync_dal::ConnectionPool; +use zksync_prover_dal::{Prover, ProverDal}; use crate::{periodic_job::PeriodicJob, prover::metrics::SERVER_METRICS}; diff --git a/core/node/house_keeper/src/prover/retry_manager/fri_witness_generator_jobs_retry_manager.rs b/core/node/house_keeper/src/prover/retry_manager/fri_witness_generator_jobs_retry_manager.rs index 5b418fe64389..817d1e290252 100644 --- a/core/node/house_keeper/src/prover/retry_manager/fri_witness_generator_jobs_retry_manager.rs +++ b/core/node/house_keeper/src/prover/retry_manager/fri_witness_generator_jobs_retry_manager.rs @@ -1,7 +1,7 @@ use async_trait::async_trait; -use prover_dal::{Prover, ProverDal}; use zksync_config::configs::fri_witness_generator::WitnessGenerationTimeouts; use zksync_dal::ConnectionPool; +use zksync_prover_dal::{Prover, ProverDal}; use zksync_types::prover_dal::StuckJobs; use crate::{ diff --git a/core/node/house_keeper/src/prover/waiting_to_queued_fri_witness_job_mover.rs b/core/node/house_keeper/src/prover/waiting_to_queued_fri_witness_job_mover.rs index bf4e31eee69d..d4d5edc78eb9 100644 --- a/core/node/house_keeper/src/prover/waiting_to_queued_fri_witness_job_mover.rs +++ b/core/node/house_keeper/src/prover/waiting_to_queued_fri_witness_job_mover.rs @@ -1,6 +1,6 @@ use async_trait::async_trait; -use prover_dal::{Prover, ProverDal}; use zksync_dal::ConnectionPool; +use zksync_prover_dal::{Prover, ProverDal}; use crate::{periodic_job::PeriodicJob, prover::metrics::SERVER_METRICS}; diff --git a/core/node/metadata_calculator/Cargo.toml b/core/node/metadata_calculator/Cargo.toml index b694c1d198cd..5b566c09ff68 100644 --- a/core/node/metadata_calculator/Cargo.toml +++ b/core/node/metadata_calculator/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "zksync_metadata_calculator" -version = "0.1.0" +description = "ZKsync batch metadata calculator" +version.workspace = true edition.workspace = true authors.workspace = true homepage.workspace = true @@ -10,7 +11,7 @@ keywords.workspace = true categories.workspace = true [dependencies] -zksync_crypto.workspace = true +zksync_crypto_primitives.workspace = true zksync_dal.workspace = true zksync_health_check.workspace = true zksync_merkle_tree.workspace = true diff --git a/core/node/metadata_calculator/src/api_server/mod.rs b/core/node/metadata_calculator/src/api_server/mod.rs index c90b889df918..c81e2ba74544 100644 --- a/core/node/metadata_calculator/src/api_server/mod.rs +++ b/core/node/metadata_calculator/src/api_server/mod.rs @@ -12,7 +12,7 @@ use axum::{ }; use serde::{Deserialize, Serialize}; use tokio::sync::watch; -use zksync_crypto::hasher::blake2::Blake2Hasher; +use zksync_crypto_primitives::hasher::blake2::Blake2Hasher; use zksync_health_check::{CheckHealth, Health, HealthStatus}; use zksync_merkle_tree::NoVersionError; use zksync_types::{L1BatchNumber, H256, U256}; @@ -343,7 +343,7 @@ impl AsyncTreeReader { Ok(Json(response)) } - fn create_api_server( + async fn create_api_server( self, bind_address: &SocketAddr, mut stop_receiver: watch::Receiver, @@ -355,10 +355,11 @@ impl AsyncTreeReader { .route("/proofs", routing::post(Self::get_proofs_handler)) .with_state(self); - let server = axum::Server::try_bind(bind_address) - .with_context(|| format!("Failed binding Merkle tree API server to {bind_address}"))? - .serve(app.into_make_service()); - let local_addr = server.local_addr(); + let listener = tokio::net::TcpListener::bind(bind_address) + .await + .with_context(|| format!("Failed binding Merkle tree API server to {bind_address}"))?; + let local_addr = listener.local_addr()?; + let server = axum::serve(listener, app); let server_future = async move { server.with_graceful_shutdown(async move { if stop_receiver.changed().await.is_err() { @@ -387,7 +388,8 @@ impl AsyncTreeReader { bind_address: SocketAddr, stop_receiver: watch::Receiver, ) -> anyhow::Result<()> { - self.create_api_server(&bind_address, stop_receiver)? + self.create_api_server(&bind_address, stop_receiver) + .await? .run() .await } diff --git a/core/node/metadata_calculator/src/api_server/tests.rs b/core/node/metadata_calculator/src/api_server/tests.rs index 26782e446f3f..42a3152e6b53 100644 --- a/core/node/metadata_calculator/src/api_server/tests.rs +++ b/core/node/metadata_calculator/src/api_server/tests.rs @@ -17,7 +17,7 @@ use crate::tests::{gen_storage_logs, reset_db_state, run_calculator, setup_calcu async fn merkle_tree_api() { let pool = ConnectionPool::::test_pool().await; let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); - let (calculator, _) = setup_calculator(temp_dir.path(), pool.clone()).await; + let (calculator, _) = setup_calculator(temp_dir.path(), pool.clone(), true).await; let api_addr = (Ipv4Addr::LOCALHOST, 0).into(); reset_db_state(&pool, 5).await; @@ -30,6 +30,7 @@ async fn merkle_tree_api() { .await .unwrap() .create_api_server(&api_addr, stop_receiver.clone()) + .await .unwrap(); let local_addr = *api_server.local_addr(); let api_server_task = tokio::spawn(api_server.run()); @@ -114,7 +115,7 @@ async fn api_client_unparesable_response_error() { async fn local_merkle_tree_client() { let pool = ConnectionPool::::test_pool().await; let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); - let (calculator, _) = setup_calculator(temp_dir.path(), pool.clone()).await; + let (calculator, _) = setup_calculator(temp_dir.path(), pool.clone(), true).await; reset_db_state(&pool, 5).await; let tree_reader = calculator.tree_reader(); diff --git a/core/node/metadata_calculator/src/helpers.rs b/core/node/metadata_calculator/src/helpers.rs index d6918b7a5e87..b6989afb179f 100644 --- a/core/node/metadata_calculator/src/helpers.rs +++ b/core/node/metadata_calculator/src/helpers.rs @@ -91,9 +91,10 @@ impl MerkleTreeHealthCheck { let weak_reader = Arc::>::default(); let weak_reader_for_task = weak_reader.clone(); tokio::spawn(async move { - weak_reader_for_task - .set(reader.wait().await.unwrap().downgrade()) - .ok(); + if let Some(reader) = reader.wait().await { + weak_reader_for_task.set(reader.downgrade()).ok(); + } + // Otherwise, the tree is dropped before getting initialized; this is not an error in this context. }); Self { @@ -393,16 +394,14 @@ impl LazyAsyncTreeReader { self.0.borrow().clone() } - /// Waits until the tree is initialized and returns a reader for it. - pub async fn wait(mut self) -> anyhow::Result { + /// Waits until the tree is initialized and returns a reader for it. If the tree is dropped before + /// getting initialized, returns `None`. + pub async fn wait(mut self) -> Option { loop { if let Some(reader) = self.0.borrow().clone() { - break Ok(reader); + break Some(reader); } - self.0 - .changed() - .await - .context("Tree dropped without getting ready; not resolving tree reader")?; + self.0.changed().await.ok()?; } } } @@ -613,7 +612,7 @@ impl Delayer { #[cfg_attr(test, derive(PartialEq))] pub(crate) struct L1BatchWithLogs { pub header: L1BatchHeader, - pub storage_logs: Vec>, + pub storage_logs: Vec, mode: MerkleTreeMode, } @@ -689,6 +688,7 @@ impl L1BatchWithLogs { writes .chain(reads) .sorted_by_key(|tree_instruction| tree_instruction.key()) + .map(TreeInstruction::with_hashed_key) .collect() } else { // Otherwise, load writes' data from other tables. @@ -732,11 +732,11 @@ impl L1BatchWithLogs { connection: &mut Connection<'_, Core>, l1_batch_number: L1BatchNumber, protective_reads: HashSet, - ) -> anyhow::Result>> { + ) -> anyhow::Result> { let touched_slots_latency = METRICS.start_load_stage(LoadChangesStage::LoadTouchedSlots); let mut touched_slots = connection .storage_logs_dal() - .get_touched_slots_for_l1_batch(l1_batch_number) + .get_touched_slots_for_executed_l1_batch(l1_batch_number) .await .context("cannot fetch touched slots")?; touched_slots_latency.observe_with_count(touched_slots.len()); @@ -759,7 +759,7 @@ impl L1BatchWithLogs { // their further processing. This is not a required step; the logic below works fine without it. // Indeed, extra no-op updates that could be added to `storage_logs` as a consequence of no filtering, // are removed on the Merkle tree level (see the tree domain wrapper). - let log = TreeInstruction::Read(storage_key); + let log = TreeInstruction::Read(storage_key.hashed_key_u256()); storage_logs.insert(storage_key, log); } tracing::debug!( @@ -775,7 +775,7 @@ impl L1BatchWithLogs { if initial_write_batch_for_key <= l1_batch_number { storage_logs.insert( storage_key, - TreeInstruction::write(storage_key, leaf_index, value), + TreeInstruction::write(storage_key.hashed_key_u256(), leaf_index, value), ); } } @@ -787,11 +787,13 @@ impl L1BatchWithLogs { #[cfg(test)] mod tests { + use std::collections::HashMap; + use tempfile::TempDir; use zksync_dal::{ConnectionPool, Core}; use zksync_node_genesis::{insert_genesis_batch, GenesisParams}; - use zksync_prover_interface::inputs::PrepareBasicCircuitsJob; - use zksync_types::{writes::TreeWrite, StorageKey, StorageLog}; + use zksync_prover_interface::inputs::WitnessInputMerklePaths; + use zksync_types::{writes::TreeWrite, StorageKey, StorageLog, U256}; use super::*; use crate::tests::{extend_db_state, gen_storage_logs, mock_config, reset_db_state}; @@ -814,7 +816,7 @@ mod tests { .unwrap(); let touched_slots = storage .storage_logs_dal() - .get_touched_slots_for_l1_batch(l1_batch_number) + .get_touched_slots_for_executed_l1_batch(l1_batch_number) .await .unwrap(); @@ -846,7 +848,10 @@ mod tests { ); } - storage_logs.insert(storage_key, TreeInstruction::Read(storage_key)); + storage_logs.insert( + storage_key, + TreeInstruction::Read(storage_key.hashed_key_u256()), + ); } for (storage_key, value) in touched_slots { @@ -855,7 +860,7 @@ mod tests { let (_, leaf_index) = l1_batches_for_initial_writes[&storage_key.hashed_key()]; storage_logs.insert( storage_key, - TreeInstruction::write(storage_key, leaf_index, value), + TreeInstruction::write(storage_key.hashed_key_u256(), leaf_index, value), ); } } @@ -882,6 +887,19 @@ mod tests { let mut storage = pool.connection().await.unwrap(); let mut tree_writes = Vec::new(); + // Create a lookup table for storage key preimages + let all_storage_logs = storage + .storage_logs_dal() + .dump_all_storage_logs_for_tests() + .await; + let logs_by_hashed_key: HashMap<_, _> = all_storage_logs + .into_iter() + .map(|log| { + let tree_key = U256::from_little_endian(log.hashed_key.as_bytes()); + (tree_key, log) + }) + .collect(); + // Check equivalence in case `tree_writes` are not present in DB. for l1_batch_number in 0..=5 { let l1_batch_number = L1BatchNumber(l1_batch_number); @@ -900,8 +918,8 @@ mod tests { .into_iter() .filter_map(|instruction| match instruction { TreeInstruction::Write(tree_entry) => Some(TreeWrite { - address: *tree_entry.key.address(), - key: *tree_entry.key.key(), + address: logs_by_hashed_key[&tree_entry.key].address.unwrap(), + key: logs_by_hashed_key[&tree_entry.key].key.unwrap(), value: tree_entry.value, leaf_index: tree_entry.leaf_index, }), @@ -1019,7 +1037,7 @@ mod tests { ); } - fn assert_equivalent_witnesses(lhs: PrepareBasicCircuitsJob, rhs: PrepareBasicCircuitsJob) { + fn assert_equivalent_witnesses(lhs: WitnessInputMerklePaths, rhs: WitnessInputMerklePaths) { assert_eq!(lhs.next_enumeration_index(), rhs.next_enumeration_index()); let lhs_paths = lhs.into_merkle_paths(); let rhs_paths = rhs.into_merkle_paths(); diff --git a/core/node/metadata_calculator/src/lib.rs b/core/node/metadata_calculator/src/lib.rs index b57f0dfacb70..451090694b2c 100644 --- a/core/node/metadata_calculator/src/lib.rs +++ b/core/node/metadata_calculator/src/lib.rs @@ -10,7 +10,7 @@ use std::{ use anyhow::Context as _; use tokio::sync::{oneshot, watch}; use zksync_config::configs::{ - chain::OperationsManagerConfig, + chain::{OperationsManagerConfig, StateKeeperConfig}, database::{MerkleTreeConfig, MerkleTreeMode}, }; use zksync_dal::{ConnectionPool, Core}; @@ -89,6 +89,8 @@ pub struct MetadataCalculatorConfig { pub memtable_capacity: usize, /// Timeout to wait for the Merkle tree database to run compaction on stalled writes. pub stalled_writes_timeout: Duration, + /// Whether state keeper writes protective reads when it seals a batch. + pub sealed_batches_have_protective_reads: bool, /// Configuration specific to the Merkle tree recovery. pub recovery: MetadataCalculatorRecoveryConfig, } @@ -97,6 +99,7 @@ impl MetadataCalculatorConfig { pub fn for_main_node( merkle_tree_config: &MerkleTreeConfig, operation_config: &OperationsManagerConfig, + state_keeper_config: &StateKeeperConfig, ) -> Self { Self { db_path: merkle_tree_config.path.clone(), @@ -109,6 +112,8 @@ impl MetadataCalculatorConfig { include_indices_and_filters_in_block_cache: false, memtable_capacity: merkle_tree_config.memtable_capacity(), stalled_writes_timeout: merkle_tree_config.stalled_writes_timeout(), + sealed_batches_have_protective_reads: state_keeper_config + .protective_reads_persistence_enabled, // The main node isn't supposed to be recovered yet, so this value doesn't matter much recovery: MetadataCalculatorRecoveryConfig::default(), } @@ -248,7 +253,12 @@ impl MetadataCalculator { self.health_updater .update(MerkleTreeHealth::MainLoop(tree_info).into()); - let updater = TreeUpdater::new(tree, self.max_l1_batches_per_iter, self.object_store); + let updater = TreeUpdater::new( + tree, + self.max_l1_batches_per_iter, + self.object_store, + self.config.sealed_batches_have_protective_reads, + ); updater .loop_updating_tree(self.delayer, &self.pool, stop_receiver) .await diff --git a/core/node/metadata_calculator/src/recovery/mod.rs b/core/node/metadata_calculator/src/recovery/mod.rs index b4e91bf720ee..dcbc0a68af92 100644 --- a/core/node/metadata_calculator/src/recovery/mod.rs +++ b/core/node/metadata_calculator/src/recovery/mod.rs @@ -261,8 +261,10 @@ impl AsyncTreeRecovery { .acquire() .await .context("semaphore is never closed")?; - Self::recover_key_chunk(&tree, snapshot.l2_block, chunk, pool, stop_receiver).await?; - options.events.chunk_recovered(); + if Self::recover_key_chunk(&tree, snapshot.l2_block, chunk, pool, stop_receiver).await? + { + options.events.chunk_recovered(); + } anyhow::Ok(()) }); future::try_join_all(chunk_tasks).await?; @@ -279,7 +281,9 @@ impl AsyncTreeRecovery { let actual_root_hash = tree.root_hash().await; anyhow::ensure!( actual_root_hash == snapshot.expected_root_hash, - "Root hash of recovered tree {actual_root_hash:?} differs from expected root hash {:?}", + "Root hash of recovered tree {actual_root_hash:?} differs from expected root hash {:?}. \ + If pruning is enabled and the tree is initialized some time after node recovery, \ + this is caused by snapshot storage logs getting pruned; this setup is currently not supported", snapshot.expected_root_hash ); let tree = tree.finalize().await?; @@ -336,20 +340,21 @@ impl AsyncTreeRecovery { Ok(output) } + /// Returns `Ok(true)` if the chunk was recovered, `Ok(false)` if the recovery process was interrupted. async fn recover_key_chunk( tree: &Mutex, snapshot_l2_block: L2BlockNumber, key_chunk: ops::RangeInclusive, pool: &ConnectionPool, stop_receiver: &watch::Receiver, - ) -> anyhow::Result<()> { + ) -> anyhow::Result { let acquire_connection_latency = RECOVERY_METRICS.chunk_latency[&ChunkRecoveryStage::AcquireConnection].start(); let mut storage = pool.connection_tagged("metadata_calculator").await?; acquire_connection_latency.observe(); if *stop_receiver.borrow() { - return Ok(()); + return Ok(false); } let entries_latency = @@ -366,7 +371,7 @@ impl AsyncTreeRecovery { ); if *stop_receiver.borrow() { - return Ok(()); + return Ok(false); } // Sanity check: all entry keys must be distinct. Otherwise, we may end up writing non-final values @@ -396,7 +401,7 @@ impl AsyncTreeRecovery { lock_tree_latency.observe(); if *stop_receiver.borrow() { - return Ok(()); + return Ok(false); } let extend_tree_latency = @@ -406,7 +411,7 @@ impl AsyncTreeRecovery { tracing::debug!( "Extended Merkle tree with entries for chunk {key_chunk:?} in {extend_tree_latency:?}" ); - Ok(()) + Ok(true) } } diff --git a/core/node/metadata_calculator/src/recovery/tests.rs b/core/node/metadata_calculator/src/recovery/tests.rs index f8edd3e5678d..3861e8a5a84e 100644 --- a/core/node/metadata_calculator/src/recovery/tests.rs +++ b/core/node/metadata_calculator/src/recovery/tests.rs @@ -7,7 +7,7 @@ use tempfile::TempDir; use test_casing::{test_casing, Product}; use tokio::sync::mpsc; use zksync_config::configs::{ - chain::OperationsManagerConfig, + chain::{OperationsManagerConfig, StateKeeperConfig}, database::{MerkleTreeConfig, MerkleTreeMode}, }; use zksync_dal::CoreDal; @@ -102,7 +102,7 @@ async fn prepare_recovery_snapshot_with_genesis( // Add all logs from the genesis L1 batch to `logs` so that they cover all state keys. let genesis_logs = storage .storage_logs_dal() - .get_touched_slots_for_l1_batch(L1BatchNumber(0)) + .get_touched_slots_for_executed_l1_batch(L1BatchNumber(0)) .await .unwrap(); let genesis_logs = genesis_logs @@ -113,7 +113,7 @@ async fn prepare_recovery_snapshot_with_genesis( drop(storage); // Ensure that metadata for L1 batch #1 is present in the DB. - let (calculator, _) = setup_calculator(&temp_dir.path().join("init"), pool).await; + let (calculator, _) = setup_calculator(&temp_dir.path().join("init"), pool, true).await; let l1_batch_root_hash = run_calculator(calculator).await; SnapshotRecoveryStatus { @@ -306,6 +306,10 @@ async fn entire_recovery_workflow(case: RecoveryWorkflowCase) { let calculator_config = MetadataCalculatorConfig::for_main_node( &merkle_tree_config, &OperationsManagerConfig { delay_interval: 50 }, + &StateKeeperConfig { + protective_reads_persistence_enabled: true, + ..Default::default() + }, ); let mut calculator = MetadataCalculator::new(calculator_config, None, pool.clone()) .await @@ -358,7 +362,9 @@ async fn entire_recovery_workflow(case: RecoveryWorkflowCase) { .iter() .chain(&new_logs) .enumerate() - .map(|(i, log)| TreeInstruction::write(log.key, i as u64 + 1, log.value)) + .map(|(i, log)| { + TreeInstruction::write(log.key.hashed_key_u256(), i as u64 + 1, log.value) + }) .collect(); let expected_new_root_hash = ZkSyncTree::process_genesis_batch(&all_tree_instructions).root_hash; diff --git a/core/node/metadata_calculator/src/tests.rs b/core/node/metadata_calculator/src/tests.rs index fbdfe6cab322..b878b0c4a533 100644 --- a/core/node/metadata_calculator/src/tests.rs +++ b/core/node/metadata_calculator/src/tests.rs @@ -8,7 +8,7 @@ use tempfile::TempDir; use test_casing::{test_casing, Product}; use tokio::sync::{mpsc, watch}; use zksync_config::configs::{ - chain::OperationsManagerConfig, + chain::{OperationsManagerConfig, StateKeeperConfig}, database::{MerkleTreeConfig, MerkleTreeMode}, }; use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; @@ -17,7 +17,7 @@ use zksync_merkle_tree::domain::ZkSyncTree; use zksync_node_genesis::{insert_genesis_batch, GenesisParams}; use zksync_node_test_utils::{create_l1_batch, create_l2_block}; use zksync_object_store::{MockObjectStore, ObjectStore}; -use zksync_prover_interface::inputs::PrepareBasicCircuitsJob; +use zksync_prover_interface::inputs::WitnessInputMerklePaths; use zksync_storage::RocksDB; use zksync_types::{ block::{L1BatchHeader, L1BatchTreeData}, @@ -57,18 +57,21 @@ pub(super) fn mock_config(db_path: &Path) -> MetadataCalculatorConfig { include_indices_and_filters_in_block_cache: false, memtable_capacity: 16 << 20, // 16 MiB stalled_writes_timeout: Duration::ZERO, // writes should never be stalled in tests + sealed_batches_have_protective_reads: true, recovery: MetadataCalculatorRecoveryConfig::default(), } } +#[test_casing(2, [false, true])] #[tokio::test] -async fn genesis_creation() { +async fn genesis_creation(sealed_protective_reads: bool) { let pool = ConnectionPool::::test_pool().await; let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); - let (calculator, _) = setup_calculator(temp_dir.path(), pool.clone()).await; + let (calculator, _) = + setup_calculator(temp_dir.path(), pool.clone(), sealed_protective_reads).await; run_calculator(calculator).await; - let (calculator, _) = setup_calculator(temp_dir.path(), pool).await; + let (calculator, _) = setup_calculator(temp_dir.path(), pool, sealed_protective_reads).await; let tree = calculator.create_tree().await.unwrap(); let GenericAsyncTree::Ready(tree) = tree else { @@ -100,11 +103,12 @@ async fn low_level_genesis_creation() { assert_eq!(tree.next_l1_batch_number(), L1BatchNumber(1)); } -#[test_casing(8, Product(([1, 4, 7, 9], [false, true])))] +#[test_casing(16, Product(([1, 4, 7, 9], [false, true], [false, true])))] #[tokio::test] async fn tree_truncation_on_l1_batch_divergence( last_common_l1_batch: u32, overwrite_tree_data: bool, + sealed_protective_reads: bool, ) { const INITIAL_BATCH_COUNT: usize = 10; @@ -113,7 +117,8 @@ async fn tree_truncation_on_l1_batch_divergence( let pool = ConnectionPool::::test_pool().await; let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); - let calculator = setup_lightweight_calculator(temp_dir.path(), pool.clone()).await; + let calculator = + setup_lightweight_calculator(temp_dir.path(), pool.clone(), sealed_protective_reads).await; reset_db_state(&pool, INITIAL_BATCH_COUNT).await; run_calculator(calculator).await; @@ -137,7 +142,8 @@ async fn tree_truncation_on_l1_batch_divergence( } } - let calculator = setup_lightweight_calculator(temp_dir.path(), pool.clone()).await; + let calculator = + setup_lightweight_calculator(temp_dir.path(), pool.clone(), sealed_protective_reads).await; let tree = calculator.create_tree().await.unwrap(); let GenericAsyncTree::Ready(mut tree) = tree else { panic!("Unexpected tree state: {tree:?}"); @@ -154,9 +160,12 @@ async fn tree_truncation_on_l1_batch_divergence( assert_eq!(tree.next_l1_batch_number(), last_common_l1_batch + 1); } -#[test_casing(4, [1, 4, 6, 7])] +#[test_casing(8, Product(([1, 4, 6, 7], [false, true])))] #[tokio::test] -async fn tree_truncation_on_l1_batch_divergence_in_pruned_tree(retained_l1_batch: u32) { +async fn tree_truncation_on_l1_batch_divergence_in_pruned_tree( + retained_l1_batch: u32, + sealed_protective_reads: bool, +) { const INITIAL_BATCH_COUNT: usize = 10; const LAST_COMMON_L1_BATCH: L1BatchNumber = L1BatchNumber(6); @@ -164,7 +173,8 @@ async fn tree_truncation_on_l1_batch_divergence_in_pruned_tree(retained_l1_batch let pool = ConnectionPool::::test_pool().await; let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); - let calculator = setup_lightweight_calculator(temp_dir.path(), pool.clone()).await; + let calculator = + setup_lightweight_calculator(temp_dir.path(), pool.clone(), sealed_protective_reads).await; reset_db_state(&pool, INITIAL_BATCH_COUNT).await; run_calculator(calculator).await; @@ -186,7 +196,8 @@ async fn tree_truncation_on_l1_batch_divergence_in_pruned_tree(retained_l1_batch .unwrap(); } - let calculator = setup_lightweight_calculator(temp_dir.path(), pool.clone()).await; + let calculator = + setup_lightweight_calculator(temp_dir.path(), pool.clone(), sealed_protective_reads).await; let tree = calculator.create_tree().await.unwrap(); let GenericAsyncTree::Ready(mut tree) = tree else { panic!("Unexpected tree state: {tree:?}"); @@ -221,28 +232,30 @@ async fn tree_truncation_on_l1_batch_divergence_in_pruned_tree(retained_l1_batch } } +#[test_casing(2, [false, true])] #[tokio::test] -async fn basic_workflow() { +async fn basic_workflow(sealed_protective_reads: bool) { let pool = ConnectionPool::::test_pool().await; let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); - let (calculator, object_store) = setup_calculator(temp_dir.path(), pool.clone()).await; + let (calculator, object_store) = + setup_calculator(temp_dir.path(), pool.clone(), sealed_protective_reads).await; reset_db_state(&pool, 1).await; let merkle_tree_hash = run_calculator(calculator).await; // Check the hash against the reference. - let expected_tree_hash = expected_tree_hash(&pool).await; + let expected_tree_hash = expected_tree_hash(&pool, sealed_protective_reads).await; assert_eq!(merkle_tree_hash, expected_tree_hash); - let job: PrepareBasicCircuitsJob = object_store.get(L1BatchNumber(1)).await.unwrap(); + let job: WitnessInputMerklePaths = object_store.get(L1BatchNumber(1)).await.unwrap(); assert!(job.next_enumeration_index() > 0); let merkle_paths: Vec<_> = job.clone().into_merkle_paths().collect(); assert!(!merkle_paths.is_empty() && merkle_paths.len() <= 100); // ^ The exact values depend on ops in genesis block assert!(merkle_paths.iter().all(|log| log.is_write)); - let (calculator, _) = setup_calculator(temp_dir.path(), pool).await; + let (calculator, _) = setup_calculator(temp_dir.path(), pool, sealed_protective_reads).await; let tree = calculator.create_tree().await.unwrap(); let GenericAsyncTree::Ready(tree) = tree else { panic!("Unexpected tree state: {tree:?}"); @@ -250,16 +263,25 @@ async fn basic_workflow() { assert_eq!(tree.next_l1_batch_number(), L1BatchNumber(2)); } -async fn expected_tree_hash(pool: &ConnectionPool) -> H256 { +async fn expected_tree_hash(pool: &ConnectionPool, sealed_protective_reads: bool) -> H256 { let mut storage = pool.connection().await.unwrap(); - let sealed_l1_batch_number = storage - .blocks_dal() - .get_sealed_l1_batch_number() - .await - .unwrap() - .expect("No L1 batches in Postgres"); + let processed_l1_batch_number = if sealed_protective_reads { + storage + .blocks_dal() + .get_sealed_l1_batch_number() + .await + .unwrap() + .expect("No L1 batches in Postgres") + } else { + storage + .vm_runner_dal() + .get_protective_reads_latest_processed_batch() + .await + .unwrap() + .unwrap_or_default() + }; let mut all_logs = vec![]; - for i in 0..=sealed_l1_batch_number.0 { + for i in 0..=processed_l1_batch_number.0 { let logs = L1BatchWithLogs::new(&mut storage, L1BatchNumber(i), MerkleTreeMode::Lightweight) .await @@ -271,12 +293,14 @@ async fn expected_tree_hash(pool: &ConnectionPool) -> H256 { ZkSyncTree::process_genesis_batch(&all_logs).root_hash } +#[test_casing(2, [false, true])] #[tokio::test] -async fn status_receiver_has_correct_states() { +async fn status_receiver_has_correct_states(sealed_protective_reads: bool) { let pool = ConnectionPool::::test_pool().await; let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); - let (mut calculator, _) = setup_calculator(temp_dir.path(), pool.clone()).await; + let (mut calculator, _) = + setup_calculator(temp_dir.path(), pool.clone(), sealed_protective_reads).await; let tree_health_check = calculator.tree_health_check(); assert_eq!(tree_health_check.name(), "tree"); let health = tree_health_check.check_health().await; @@ -324,19 +348,22 @@ async fn status_receiver_has_correct_states() { .unwrap(); } +#[test_casing(2, [false, true])] #[tokio::test] -async fn multi_l1_batch_workflow() { +async fn multi_l1_batch_workflow(sealed_protective_reads: bool) { let pool = ConnectionPool::::test_pool().await; // Collect all storage logs in a single L1 batch let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); - let (calculator, _) = setup_calculator(temp_dir.path(), pool.clone()).await; + let (calculator, _) = + setup_calculator(temp_dir.path(), pool.clone(), sealed_protective_reads).await; reset_db_state(&pool, 1).await; let root_hash = run_calculator(calculator).await; // Collect the same logs in multiple L1 batches let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); - let (calculator, object_store) = setup_calculator(temp_dir.path(), pool.clone()).await; + let (calculator, object_store) = + setup_calculator(temp_dir.path(), pool.clone(), sealed_protective_reads).await; reset_db_state(&pool, 10).await; let multi_block_root_hash = run_calculator(calculator).await; assert_eq!(multi_block_root_hash, root_hash); @@ -344,7 +371,7 @@ async fn multi_l1_batch_workflow() { let mut prev_index = None; for l1_batch_number in 1..=10 { let l1_batch_number = L1BatchNumber(l1_batch_number); - let job: PrepareBasicCircuitsJob = object_store.get(l1_batch_number).await.unwrap(); + let job: WitnessInputMerklePaths = object_store.get(l1_batch_number).await.unwrap(); let next_enumeration_index = job.next_enumeration_index(); let merkle_paths: Vec<_> = job.into_merkle_paths().collect(); assert!(!merkle_paths.is_empty() && merkle_paths.len() <= 10); @@ -360,16 +387,62 @@ async fn multi_l1_batch_workflow() { } } +#[test_casing(2, [false, true])] +#[tokio::test] +async fn error_on_pruned_next_l1_batch(sealed_protective_reads: bool) { + let pool = ConnectionPool::::test_pool().await; + let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); + let (calculator, _) = + setup_calculator(temp_dir.path(), pool.clone(), sealed_protective_reads).await; + reset_db_state(&pool, 1).await; + run_calculator(calculator).await; + + // Add some new blocks to the storage and mock their partial pruning. + let mut storage = pool.connection().await.unwrap(); + let new_logs = gen_storage_logs(100..200, 10); + extend_db_state(&mut storage, new_logs).await; + storage + .pruning_dal() + .soft_prune_batches_range(L1BatchNumber(5), L2BlockNumber(5)) + .await + .unwrap(); + storage + .pruning_dal() + .hard_prune_batches_range(L1BatchNumber(5), L2BlockNumber(5)) + .await + .unwrap(); + // Sanity check: there should be no pruned batch headers. + let next_l1_batch_header = storage + .blocks_dal() + .get_l1_batch_header(L1BatchNumber(2)) + .await + .unwrap(); + assert!(next_l1_batch_header.is_none()); + + let (calculator, _) = + setup_calculator(temp_dir.path(), pool.clone(), sealed_protective_reads).await; + let (_stop_sender, stop_receiver) = watch::channel(false); + let err = calculator.run(stop_receiver).await.unwrap_err(); + let err = format!("{err:#}"); + assert!( + err.contains("L1 batch #2, next to be processed by the tree, is pruned"), + "{err}" + ); +} + +#[test_casing(2, [false, true])] #[tokio::test] -async fn running_metadata_calculator_with_additional_blocks() { +async fn running_metadata_calculator_with_additional_blocks(sealed_protective_reads: bool) { let pool = ConnectionPool::::test_pool().await; let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); - let calculator = setup_lightweight_calculator(temp_dir.path(), pool.clone()).await; + let calculator = + setup_lightweight_calculator(temp_dir.path(), pool.clone(), sealed_protective_reads).await; reset_db_state(&pool, 5).await; run_calculator(calculator).await; - let mut calculator = setup_lightweight_calculator(temp_dir.path(), pool.clone()).await; + let mut calculator = + setup_lightweight_calculator(temp_dir.path(), pool.clone(), sealed_protective_reads).await; let (stop_sx, stop_rx) = watch::channel(false); let (delay_sx, mut delay_rx) = mpsc::unbounded_channel(); calculator.delayer.delay_notifier = delay_sx; @@ -405,7 +478,7 @@ async fn running_metadata_calculator_with_additional_blocks() { .unwrap(); // Switch to the full tree. It should pick up from the same spot and result in the same tree root hash. - let (calculator, _) = setup_calculator(temp_dir.path(), pool).await; + let (calculator, _) = setup_calculator(temp_dir.path(), pool, true).await; let root_hash_for_full_tree = run_calculator(calculator).await; assert_eq!(root_hash_for_full_tree, updated_root_hash); } @@ -418,9 +491,17 @@ async fn shutting_down_calculator() { create_config(temp_dir.path(), MerkleTreeMode::Lightweight); operation_config.delay_interval = 30_000; // ms; chosen to be larger than `RUN_TIMEOUT` - let calculator = - setup_calculator_with_options(&merkle_tree_config, &operation_config, pool.clone(), None) - .await; + let calculator = setup_calculator_with_options( + &merkle_tree_config, + &operation_config, + &StateKeeperConfig { + protective_reads_persistence_enabled: true, + ..Default::default() + }, + pool.clone(), + None, + ) + .await; reset_db_state(&pool, 5).await; @@ -437,10 +518,12 @@ async fn shutting_down_calculator() { async fn test_postgres_backup_recovery( sleep_between_batches: bool, insert_batch_without_metadata: bool, + sealed_protective_reads: bool, ) { let pool = ConnectionPool::::test_pool().await; let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); - let calculator = setup_lightweight_calculator(temp_dir.path(), pool.clone()).await; + let calculator = + setup_lightweight_calculator(temp_dir.path(), pool.clone(), sealed_protective_reads).await; reset_db_state(&pool, 5).await; run_calculator(calculator).await; @@ -461,11 +544,22 @@ async fn test_postgres_backup_recovery( .insert_mock_l1_batch(batch_without_metadata) .await .unwrap(); + storage + .vm_runner_dal() + .mark_protective_reads_batch_as_processing(batch_without_metadata.number) + .await + .unwrap(); + storage + .vm_runner_dal() + .mark_protective_reads_batch_as_completed(batch_without_metadata.number) + .await + .unwrap(); insert_initial_writes_for_batch(&mut storage, batch_without_metadata.number).await; } drop(storage); - let mut calculator = setup_lightweight_calculator(temp_dir.path(), pool.clone()).await; + let mut calculator = + setup_lightweight_calculator(temp_dir.path(), pool.clone(), sealed_protective_reads).await; let (stop_sx, stop_rx) = watch::channel(false); let (delay_sx, mut delay_rx) = mpsc::unbounded_channel(); calculator.delayer.delay_notifier = delay_sx; @@ -486,6 +580,14 @@ async fn test_postgres_backup_recovery( .insert_mock_l1_batch(batch_header) .await .unwrap(); + txn.vm_runner_dal() + .mark_protective_reads_batch_as_processing(batch_header.number) + .await + .unwrap(); + txn.vm_runner_dal() + .mark_protective_reads_batch_as_completed(batch_header.number) + .await + .unwrap(); insert_initial_writes_for_batch(&mut txn, batch_header.number).await; txn.commit().await.unwrap(); if sleep_between_batches { @@ -512,30 +614,38 @@ async fn test_postgres_backup_recovery( .unwrap(); } +#[test_casing(2, [false, true])] #[tokio::test] -async fn postgres_backup_recovery() { - test_postgres_backup_recovery(false, false).await; +async fn postgres_backup_recovery(sealed_protective_reads: bool) { + test_postgres_backup_recovery(false, false, sealed_protective_reads).await; } +#[test_casing(2, [false, true])] #[tokio::test] -async fn postgres_backup_recovery_with_delay_between_batches() { - test_postgres_backup_recovery(true, false).await; +async fn postgres_backup_recovery_with_delay_between_batches(sealed_protective_reads: bool) { + test_postgres_backup_recovery(true, false, sealed_protective_reads).await; } +#[test_casing(2, [false, true])] #[tokio::test] -async fn postgres_backup_recovery_with_excluded_metadata() { - test_postgres_backup_recovery(false, true).await; +async fn postgres_backup_recovery_with_excluded_metadata(sealed_protective_reads: bool) { + test_postgres_backup_recovery(false, true, sealed_protective_reads).await; } pub(crate) async fn setup_calculator( db_path: &Path, pool: ConnectionPool, + sealed_protective_reads: bool, ) -> (MetadataCalculator, Arc) { let store = MockObjectStore::arc(); let (merkle_tree_config, operation_manager) = create_config(db_path, MerkleTreeMode::Full); let calculator = setup_calculator_with_options( &merkle_tree_config, &operation_manager, + &StateKeeperConfig { + protective_reads_persistence_enabled: sealed_protective_reads, + ..Default::default() + }, pool, Some(store.clone()), ) @@ -546,9 +656,20 @@ pub(crate) async fn setup_calculator( async fn setup_lightweight_calculator( db_path: &Path, pool: ConnectionPool, + sealed_protective_reads: bool, ) -> MetadataCalculator { let (db_config, operation_config) = create_config(db_path, MerkleTreeMode::Lightweight); - setup_calculator_with_options(&db_config, &operation_config, pool, None).await + setup_calculator_with_options( + &db_config, + &operation_config, + &StateKeeperConfig { + protective_reads_persistence_enabled: sealed_protective_reads, + ..Default::default() + }, + pool, + None, + ) + .await } fn create_config( @@ -570,6 +691,7 @@ fn create_config( async fn setup_calculator_with_options( merkle_tree_config: &MerkleTreeConfig, operation_config: &OperationsManagerConfig, + state_keeper_config: &StateKeeperConfig, pool: ConnectionPool, object_store: Option>, ) -> MetadataCalculator { @@ -581,8 +703,11 @@ async fn setup_calculator_with_options( } drop(storage); - let calculator_config = - MetadataCalculatorConfig::for_main_node(merkle_tree_config, operation_config); + let calculator_config = MetadataCalculatorConfig::for_main_node( + merkle_tree_config, + operation_config, + state_keeper_config, + ); MetadataCalculator::new(calculator_config, object_store, pool) .await .unwrap() @@ -636,6 +761,11 @@ pub(crate) async fn reset_db_state(pool: &ConnectionPool, num_batches: usi .delete_initial_writes(L1BatchNumber(0)) .await .unwrap(); + storage + .vm_runner_dal() + .delete_protective_reads(L1BatchNumber(0)) + .await + .unwrap(); let logs = gen_storage_logs(0..100, num_batches); extend_db_state(&mut storage, logs).await; @@ -690,6 +820,16 @@ pub(super) async fn extend_db_state_from_l1_batch( .mark_l2_blocks_as_executed_in_l1_batch(batch_number) .await .unwrap(); + storage + .vm_runner_dal() + .mark_protective_reads_batch_as_processing(batch_number) + .await + .unwrap(); + storage + .vm_runner_dal() + .mark_protective_reads_batch_as_completed(batch_number) + .await + .unwrap(); insert_initial_writes_for_batch(storage, batch_number).await; } } @@ -700,7 +840,7 @@ async fn insert_initial_writes_for_batch( ) { let written_non_zero_slots: Vec<_> = connection .storage_logs_dal() - .get_touched_slots_for_l1_batch(l1_batch_number) + .get_touched_slots_for_executed_l1_batch(l1_batch_number) .await .unwrap() .into_iter() @@ -720,6 +860,7 @@ async fn insert_initial_writes_for_batch( .into_iter() .sorted() .filter(|key| !pre_written_slots.contains(&key.hashed_key())) + .map(|key| key.hashed_key()) .collect(); connection .storage_logs_dedup_dal() @@ -814,6 +955,11 @@ async fn remove_l1_batches( .delete_initial_writes(last_l1_batch_to_keep) .await .unwrap(); + storage + .vm_runner_dal() + .delete_protective_reads(last_l1_batch_to_keep) + .await + .unwrap(); batch_headers } @@ -905,9 +1051,12 @@ async fn deduplication_works_as_expected() { } } -#[test_casing(3, [3, 5, 8])] +#[test_casing(6, Product(([3, 5, 8], [false, true])))] #[tokio::test] -async fn l1_batch_divergence_entire_workflow(last_common_l1_batch: u32) { +async fn l1_batch_divergence_entire_workflow( + last_common_l1_batch: u32, + sealed_protective_reads: bool, +) { const INITIAL_BATCH_COUNT: usize = 10; assert!((last_common_l1_batch as usize) < INITIAL_BATCH_COUNT); @@ -915,7 +1064,8 @@ async fn l1_batch_divergence_entire_workflow(last_common_l1_batch: u32) { let pool = ConnectionPool::::test_pool().await; let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); - let calculator = setup_lightweight_calculator(temp_dir.path(), pool.clone()).await; + let calculator = + setup_lightweight_calculator(temp_dir.path(), pool.clone(), sealed_protective_reads).await; reset_db_state(&pool, INITIAL_BATCH_COUNT).await; run_calculator(calculator).await; @@ -924,9 +1074,10 @@ async fn l1_batch_divergence_entire_workflow(last_common_l1_batch: u32) { // Extend the state with new L1 batches. let logs = gen_storage_logs(100..200, 5); extend_db_state(&mut storage, logs).await; - let expected_root_hash = expected_tree_hash(&pool).await; + let expected_root_hash = expected_tree_hash(&pool, sealed_protective_reads).await; - let calculator = setup_lightweight_calculator(temp_dir.path(), pool.clone()).await; + let calculator = + setup_lightweight_calculator(temp_dir.path(), pool.clone(), sealed_protective_reads).await; let final_root_hash = run_calculator(calculator).await; assert_eq!(final_root_hash, expected_root_hash); } diff --git a/core/node/metadata_calculator/src/updater.rs b/core/node/metadata_calculator/src/updater.rs index 8271865199a8..d0bd2f2b82c0 100644 --- a/core/node/metadata_calculator/src/updater.rs +++ b/core/node/metadata_calculator/src/updater.rs @@ -5,6 +5,7 @@ use std::{ops, sync::Arc, time::Instant}; use anyhow::Context as _; use futures::{future, FutureExt}; use tokio::sync::watch; +use zksync_config::configs::database::MerkleTreeMode; use zksync_dal::{helpers::wait_for_l1_batch, Connection, ConnectionPool, Core, CoreDal}; use zksync_merkle_tree::domain::TreeMetadata; use zksync_object_store::ObjectStore; @@ -24,6 +25,7 @@ pub(super) struct TreeUpdater { tree: AsyncTree, max_l1_batches_per_iter: usize, object_store: Option>, + sealed_batches_have_protective_reads: bool, } impl TreeUpdater { @@ -31,11 +33,13 @@ impl TreeUpdater { tree: AsyncTree, max_l1_batches_per_iter: usize, object_store: Option>, + sealed_batches_have_protective_reads: bool, ) -> Self { Self { tree, max_l1_batches_per_iter, object_store, + sealed_batches_have_protective_reads, } } @@ -84,25 +88,30 @@ impl TreeUpdater { /// is slow for whatever reason. async fn process_multiple_batches( &mut self, - storage: &mut Connection<'_, Core>, + pool: &ConnectionPool, l1_batch_numbers: ops::RangeInclusive, ) -> anyhow::Result { let tree_mode = self.tree.mode(); let start = Instant::now(); tracing::info!("Processing L1 batches #{l1_batch_numbers:?} in {tree_mode:?} mode"); + let mut storage = pool.connection_tagged("metadata_calculator").await?; let first_l1_batch_number = L1BatchNumber(*l1_batch_numbers.start()); let last_l1_batch_number = L1BatchNumber(*l1_batch_numbers.end()); - let mut l1_batch_data = L1BatchWithLogs::new(storage, first_l1_batch_number, tree_mode) - .await - .with_context(|| { - format!("failed fetching tree input for L1 batch #{first_l1_batch_number}") - })?; + let mut l1_batch_data = + L1BatchWithLogs::new(&mut storage, first_l1_batch_number, tree_mode) + .await + .with_context(|| { + format!("failed fetching tree input for L1 batch #{first_l1_batch_number}") + })?; + drop(storage); let mut total_logs = 0; let mut updated_headers = vec![]; for l1_batch_number in l1_batch_numbers { + let mut storage = pool.connection_tagged("metadata_calculator").await?; let l1_batch_number = L1BatchNumber(l1_batch_number); let Some(current_l1_batch_data) = l1_batch_data else { + Self::ensure_not_pruned(&mut storage, l1_batch_number).await?; return Ok(l1_batch_number); }; total_logs += current_l1_batch_data.storage_logs.len(); @@ -111,13 +120,14 @@ impl TreeUpdater { let load_next_l1_batch_task = async { if l1_batch_number < last_l1_batch_number { let next_l1_batch_number = l1_batch_number + 1; - L1BatchWithLogs::new(storage, next_l1_batch_number, tree_mode) - .await - .with_context(|| { - format!( - "failed fetching tree input for L1 batch #{next_l1_batch_number}" - ) - }) + let batch_result = + L1BatchWithLogs::new(&mut storage, next_l1_batch_number, tree_mode).await; + // Drop storage at the earliest possible moment so that it doesn't block logic running concurrently, + // such as tree pruning. + drop(storage); + batch_result.with_context(|| { + format!("failed fetching tree input for L1 batch #{next_l1_batch_number}") + }) } else { Ok(None) // Don't need to load the next L1 batch after the last one we're processing. } @@ -130,11 +140,12 @@ impl TreeUpdater { hash: metadata.root_hash, rollup_last_leaf_index: metadata.rollup_last_leaf_index, }; + + let mut storage = pool.connection_tagged("metadata_calculator").await?; storage .blocks_dal() .save_l1_batch_tree_data(l1_batch_number, &tree_data) - .await - .context("failed saving tree data")?; + .await?; // ^ Note that `save_l1_batch_tree_data()` will not blindly overwrite changes if L1 batch // metadata already exists; instead, it'll check that the old and new metadata match. // That is, if we run multiple tree instances, we'll get metadata correspondence @@ -144,14 +155,18 @@ impl TreeUpdater { storage .tee_verifier_input_producer_dal() .create_tee_verifier_input_producer_job(l1_batch_number) - .await - .expect("failed to create tee_verifier_input_producer job"); + .await?; // Save the proof generation details to Postgres storage .proof_generation_dal() - .insert_proof_generation_details(l1_batch_number, object_key) + .insert_proof_generation_details(l1_batch_number) + .await?; + storage + .proof_generation_dal() + .save_merkle_paths_artifacts_metadata(l1_batch_number, object_key) .await?; } + drop(storage); save_postgres_latency.observe(); tracing::info!("Updated metadata for L1 batch #{l1_batch_number} in Postgres"); @@ -167,32 +182,62 @@ impl TreeUpdater { Ok(last_l1_batch_number + 1) } + /// Checks whether the requested L1 batch was pruned. Right now, the tree cannot recover from this situation, + /// so we exit with an error if this happens. + async fn ensure_not_pruned( + storage: &mut Connection<'_, Core>, + l1_batch_number: L1BatchNumber, + ) -> anyhow::Result<()> { + let pruning_info = storage.pruning_dal().get_pruning_info().await?; + anyhow::ensure!( + Some(l1_batch_number) > pruning_info.last_soft_pruned_l1_batch, + "L1 batch #{l1_batch_number}, next to be processed by the tree, is pruned; the tree cannot continue operating" + ); + Ok(()) + } + async fn step( &mut self, - mut storage: Connection<'_, Core>, - next_l1_batch_to_seal: &mut L1BatchNumber, + pool: &ConnectionPool, + next_l1_batch_to_process: &mut L1BatchNumber, ) -> anyhow::Result<()> { - let Some(last_sealed_l1_batch) = storage - .blocks_dal() - .get_sealed_l1_batch_number() - .await - .context("failed loading sealed L1 batch number")? - else { - tracing::trace!("No L1 batches to seal: Postgres storage is empty"); - return Ok(()); + let mut storage = pool.connection_tagged("metadata_calculator").await?; + let last_l1_batch_with_protective_reads = if self.tree.mode() == MerkleTreeMode::Lightweight + || self.sealed_batches_have_protective_reads + { + let Some(last_sealed_l1_batch) = storage + .blocks_dal() + .get_sealed_l1_batch_number() + .await + .context("failed loading sealed L1 batch number")? + else { + tracing::trace!("No L1 batches to seal: Postgres storage is empty"); + return Ok(()); + }; + last_sealed_l1_batch + } else { + storage + .vm_runner_dal() + .get_protective_reads_latest_processed_batch() + .await + .context("failed loading latest L1 batch number with protective reads")? + .unwrap_or_default() }; + drop(storage); + let last_requested_l1_batch = - next_l1_batch_to_seal.0 + self.max_l1_batches_per_iter as u32 - 1; - let last_requested_l1_batch = last_requested_l1_batch.min(last_sealed_l1_batch.0); - let l1_batch_numbers = next_l1_batch_to_seal.0..=last_requested_l1_batch; + next_l1_batch_to_process.0 + self.max_l1_batches_per_iter as u32 - 1; + let last_requested_l1_batch = + last_requested_l1_batch.min(last_l1_batch_with_protective_reads.0); + let l1_batch_numbers = next_l1_batch_to_process.0..=last_requested_l1_batch; if l1_batch_numbers.is_empty() { tracing::trace!( - "No L1 batches to seal: batch numbers range to be loaded {l1_batch_numbers:?} is empty" + "No L1 batches to process: batch numbers range to be loaded {l1_batch_numbers:?} is empty" ); } else { tracing::info!("Updating Merkle tree with L1 batches #{l1_batch_numbers:?}"); - *next_l1_batch_to_seal = self - .process_multiple_batches(&mut storage, l1_batch_numbers) + *next_l1_batch_to_process = self + .process_multiple_batches(pool, l1_batch_numbers) .await?; } Ok(()) @@ -206,10 +251,10 @@ impl TreeUpdater { mut stop_receiver: watch::Receiver, ) -> anyhow::Result<()> { let tree = &mut self.tree; - let mut next_l1_batch_to_seal = tree.next_l1_batch_number(); + let mut next_l1_batch_to_process = tree.next_l1_batch_number(); tracing::info!( "Initialized metadata calculator with {max_batches_per_iter} max L1 batches per iteration. \ - Next L1 batch for Merkle tree: {next_l1_batch_to_seal}", + Next L1 batch for Merkle tree: {next_l1_batch_to_process}", max_batches_per_iter = self.max_l1_batches_per_iter ); @@ -218,19 +263,18 @@ impl TreeUpdater { tracing::info!("Stop signal received, metadata_calculator is shutting down"); break; } - let storage = pool.connection_tagged("metadata_calculator").await?; - let snapshot = *next_l1_batch_to_seal; - self.step(storage, &mut next_l1_batch_to_seal).await?; - let delay = if snapshot == *next_l1_batch_to_seal { + let snapshot = *next_l1_batch_to_process; + self.step(pool, &mut next_l1_batch_to_process).await?; + let delay = if snapshot == *next_l1_batch_to_process { tracing::trace!( - "Metadata calculator (next L1 batch: #{next_l1_batch_to_seal}) \ + "Metadata calculator (next L1 batch: #{next_l1_batch_to_process}) \ didn't make any progress; delaying it using {delayer:?}" ); delayer.wait(&self.tree).left_future() } else { tracing::trace!( - "Metadata calculator (next L1 batch: #{next_l1_batch_to_seal}) made progress from #{snapshot}" + "Metadata calculator (next L1 batch: #{next_l1_batch_to_process}) made progress from #{snapshot}" ); future::ready(()).right_future() }; @@ -380,9 +424,13 @@ impl AsyncTree { let mut storage = pool.connection_tagged("metadata_calculator").await?; self.ensure_genesis(&mut storage, earliest_l1_batch).await?; - let next_l1_batch_to_seal = self.next_l1_batch_number(); + let next_l1_batch_to_process = self.next_l1_batch_number(); - let current_db_batch = storage.blocks_dal().get_sealed_l1_batch_number().await?; + let current_db_batch = storage + .vm_runner_dal() + .get_protective_reads_latest_processed_batch() + .await? + .unwrap_or_default(); let last_l1_batch_with_tree_data = storage .blocks_dal() .get_last_l1_batch_number_with_tree_data() @@ -390,7 +438,7 @@ impl AsyncTree { drop(storage); tracing::info!( - "Next L1 batch for Merkle tree: {next_l1_batch_to_seal}, current Postgres L1 batch: {current_db_batch:?}, \ + "Next L1 batch for Merkle tree: {next_l1_batch_to_process}, current Postgres L1 batch: {current_db_batch:?}, \ last L1 batch with metadata: {last_l1_batch_with_tree_data:?}" ); @@ -399,18 +447,18 @@ impl AsyncTree { // responsible for their appearance!), but fortunately most of the updater doesn't depend on it. if let Some(last_l1_batch_with_tree_data) = last_l1_batch_with_tree_data { let backup_lag = - (last_l1_batch_with_tree_data.0 + 1).saturating_sub(next_l1_batch_to_seal.0); + (last_l1_batch_with_tree_data.0 + 1).saturating_sub(next_l1_batch_to_process.0); METRICS.backup_lag.set(backup_lag.into()); - if next_l1_batch_to_seal > last_l1_batch_with_tree_data + 1 { + if next_l1_batch_to_process > last_l1_batch_with_tree_data + 1 { tracing::warn!( - "Next L1 batch of the tree ({next_l1_batch_to_seal}) is greater than last L1 batch with metadata in Postgres \ + "Next L1 batch of the tree ({next_l1_batch_to_process}) is greater than last L1 batch with metadata in Postgres \ ({last_l1_batch_with_tree_data}); this may be a result of restoring Postgres from a snapshot. \ Truncating Merkle tree versions so that this mismatch is fixed..." ); self.roll_back_logs(last_l1_batch_with_tree_data)?; self.save().await?; - tracing::info!("Truncated Merkle tree to L1 batch #{next_l1_batch_to_seal}"); + tracing::info!("Truncated Merkle tree to L1 batch #{next_l1_batch_to_process}"); } self.ensure_no_l1_batch_divergence(pool).await?; diff --git a/core/node/node_framework/Cargo.toml b/core/node/node_framework/Cargo.toml index d48522fb8116..640000c6a7d8 100644 --- a/core/node/node_framework/Cargo.toml +++ b/core/node/node_framework/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "zksync_node_framework" -version = "0.1.0" +description = "ZKsync node framework" +version.workspace = true edition.workspace = true authors.workspace = true homepage.workspace = true @@ -10,11 +11,12 @@ keywords.workspace = true categories.workspace = true [dependencies] -prometheus_exporter.workspace = true +zksync_node_framework_derive.workspace = true +zksync_vlog.workspace = true zksync_types.workspace = true zksync_health_check.workspace = true zksync_dal.workspace = true -prover_dal.workspace = true +zksync_prover_dal.workspace = true zksync_db_connection.workspace = true zksync_config.workspace = true zksync_protobuf_config.workspace = true @@ -33,6 +35,8 @@ zksync_commitment_generator.workspace = true zksync_house_keeper.workspace = true zksync_node_fee_model.workspace = true zksync_eth_sender.workspace = true +zksync_da_client.workspace = true +zksync_da_dispatcher.workspace = true zksync_block_reverter.workspace = true zksync_state_keeper.workspace = true zksync_consistency_checker.workspace = true @@ -46,7 +50,11 @@ zksync_queued_job_processor.workspace = true zksync_reorg_detector.workspace = true zksync_vm_runner.workspace = true zksync_node_db_pruner.workspace = true +zksync_base_token_adjuster.workspace = true +zksync_node_storage_init.workspace = true +zksync_external_price_api.workspace = true +pin-project-lite.workspace = true tracing.workspace = true thiserror.workspace = true async-trait.workspace = true @@ -57,5 +65,6 @@ ctrlc.workspace = true [dev-dependencies] zksync_env_config.workspace = true -vlog.workspace = true assert_matches.workspace = true +# For running UI tests for proc macro +trybuild.workspace = true diff --git a/core/node/node_framework/examples/main_node.rs b/core/node/node_framework/examples/main_node.rs index f0cb8417ff97..38f989bda85f 100644 --- a/core/node/node_framework/examples/main_node.rs +++ b/core/node/node_framework/examples/main_node.rs @@ -50,7 +50,7 @@ use zksync_node_framework::{ server::{Web3ServerLayer, Web3ServerOptionalConfig}, tree_api_client::TreeApiClientLayer, tx_sender::{PostgresStorageCachesConfig, TxSenderLayer}, - tx_sink::TxSinkLayer, + tx_sink::MasterPoolSinkLayer, }, }, service::{ZkStackService, ZkStackServiceBuilder, ZkStackServiceError}, @@ -135,9 +135,11 @@ impl MainNodeBuilder { fn add_metadata_calculator_layer(mut self) -> anyhow::Result { let merkle_tree_env_config = DBConfig::from_env()?.merkle_tree; let operations_manager_env_config = OperationsManagerConfig::from_env()?; + let state_keeper_env_config = StateKeeperConfig::from_env()?; let metadata_calculator_config = MetadataCalculatorConfig::for_main_node( &merkle_tree_env_config, &operations_manager_env_config, + &state_keeper_env_config, ); self.node .add_layer(MetadataCalculatorLayer::new(metadata_calculator_config)); @@ -213,7 +215,7 @@ impl MainNodeBuilder { let wallets = Wallets::from_env()?; // On main node we always use master pool sink. - self.node.add_layer(TxSinkLayer::MasterPoolSink); + self.node.add_layer(MasterPoolSinkLayer); self.node.add_layer(TxSenderLayer::new( TxSenderConfig::new( &state_keeper_config, @@ -302,6 +304,7 @@ impl MainNodeBuilder { rpc_config.websocket_requests_per_minute_limit(), ), replication_lag_limit: circuit_breaker_config.replication_lag_limit(), + with_extended_tracing: rpc_config.extended_api_tracing, ..Default::default() }; self.node.add_layer(Web3ServerLayer::ws( @@ -379,11 +382,11 @@ impl MainNodeBuilder { fn main() -> anyhow::Result<()> { let observability_config = ObservabilityConfig::from_env().context("ObservabilityConfig::from_env()")?; - let log_format: vlog::LogFormat = observability_config + let log_format: zksync_vlog::LogFormat = observability_config .log_format .parse() .context("Invalid log format")?; - let _guard = vlog::ObservabilityBuilder::new() + let _guard = zksync_vlog::ObservabilityBuilder::new() .with_log_format(log_format) .build(); diff --git a/core/node/node_framework/examples/showcase.rs b/core/node/node_framework/examples/showcase.rs index 98baa5bc9683..3dbb576c1935 100644 --- a/core/node/node_framework/examples/showcase.rs +++ b/core/node/node_framework/examples/showcase.rs @@ -9,9 +9,10 @@ use std::{ use zksync_node_framework::{ resource::Resource, - service::{ServiceContext, StopReceiver, ZkStackServiceBuilder}, + service::{StopReceiver, ZkStackServiceBuilder}, task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, }; /// This will be an example of a shared resource. Basically, something that can be used by multiple @@ -31,7 +32,7 @@ struct MemoryDatabase { /// but in real envs we use GCP. Alternatively, we have different resource implementations for /// main node and EN, like `MempoolIO` and `ExternalIO`. /// -/// Whether it makes sense to hdie the actual resource behind a trait often depends on the resource +/// Whether it makes sense to hide the actual resource behind a trait often depends on the resource /// itself. For example, our DAL is massive and cannot realistically be changed easily, so it's OK /// for it to be a concrete resource. But for anything that may realistically have two different /// implementations, it's often a good idea to hide it behind a trait. @@ -51,7 +52,7 @@ impl Database for MemoryDatabase { } /// An idiomatic way to create a resource is to prepare a wrapper for it. -/// This way we separate the logic of framework (which is primarily about glueing things together) +/// This way we separate the logic of the framework (which is primarily about glueing things together) /// from an actual logic of the resource. #[derive(Clone)] struct DatabaseResource(pub Arc); @@ -63,8 +64,6 @@ struct DatabaseResource(pub Arc); /// /// For the latter requirement, there exists an `Unique` wrapper that can be used to store non-`Clone` /// resources. It's not used in this example, but it's a useful thing to know about. -/// -/// Finally, there are other wrappers for resources as well, like `ResourceCollection` and `LazyResource`. impl Resource for DatabaseResource { fn name() -> String { // The convention for resource names is `/`. In this case, the scope is `common`, but @@ -162,45 +161,92 @@ impl Task for CheckTask { /// and another layer to fetch it. The benefit here is that if you want to swap the database /// implementation, you only have to inject a different wiring layer for database, and the /// wiring layers for the tasks will remain unchanged. +/// +/// Each wiring layer has to implement the `WiringLayer` trait. +/// It will receive its inputs and has to produce outputs, which will be stored in the node. +/// Added resources will be available for the layers that are added after this one, +/// and added tasks will be launched once the wiring completes. +/// +/// Inputs and outputs for the layers are defined by the [`FromContext`] and [`IntoContext`] +/// traits correspondingly. These traits have a few ready implementations, for example: +/// +/// - `()` can be used if you don't need inputs or don't produce outputs +/// - Any type `T` or `Option` that implements `Resource` also implements both [`FromContext`] +/// and [`IntoContext`]. This can be handy if you work with a single resource. +/// - Otherwise, the most convenient way is to define a struct that will hold all the inputs/ouptuts +/// and derive [`FromContext`] and [`IntoContext`] for it. +/// +/// See the trait documentation for more detail. struct DatabaseLayer; +/// Here we use a derive macro to define outputs for our layer. +#[derive(IntoContext)] +struct DatabaseLayerOutput { + pub db: DatabaseResource, +} + #[async_trait::async_trait] impl WiringLayer for DatabaseLayer { + // We don't need any input for this layer. + type Input = (); + // We will produce a database resource. + type Output = DatabaseLayerOutput; + fn layer_name(&self) -> &'static str { "database_layer" } /// `wire` method will be invoked by the service before the tasks are started. - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { + async fn wire(self, _input: Self::Input) -> Result { let database = Arc::new(MemoryDatabase { data: Arc::new(Mutex::new(HashMap::new())), }); // We add the resource to the service context. This way it will be available for the tasks. - context.insert_resource(DatabaseResource(database))?; - Ok(()) + Ok(DatabaseLayerOutput { + db: DatabaseResource(database), + }) } } /// Layer where we add tasks. struct TasksLayer; +#[derive(FromContext)] +struct TasksLayerInput { + pub db: DatabaseResource, +} + +#[derive(IntoContext)] +struct TasksLayerOutput { + // Note that when using derive macros, all the fields are assumed to be resources by default. + // If you want to add a task, you need to apply a special attribute on the field. + #[context(task)] + pub put_task: PutTask, + #[context(task)] + pub check_task: CheckTask, +} + #[async_trait::async_trait] impl WiringLayer for TasksLayer { + // Here we both receive input and produce output. + type Input = TasksLayerInput; + type Output = TasksLayerOutput; + fn layer_name(&self) -> &'static str { "tasks_layer" } - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { - // We fetch the database resource from the context. + async fn wire(self, input: Self::Input) -> Result { + // We received the database resource from the context as `input`. // Note that we don't really care where it comes from or what's the actual implementation is. - // We only care whether it's available and bail out if not. - let db = context.get_resource::().await?.0; + let db = input.db.0; let put_task = PutTask { db: db.clone() }; let check_task = CheckTask { db }; // These tasks will be launched by the service once the wiring process is complete. - context.add_task(Box::new(put_task)); - context.add_task(Box::new(check_task)); - Ok(()) + Ok(TasksLayerOutput { + put_task, + check_task, + }) } } diff --git a/core/node/node_framework/src/implementations/layers/base_token/base_token_ratio_persister.rs b/core/node/node_framework/src/implementations/layers/base_token/base_token_ratio_persister.rs new file mode 100644 index 000000000000..d15f9bea0e25 --- /dev/null +++ b/core/node/node_framework/src/implementations/layers/base_token/base_token_ratio_persister.rs @@ -0,0 +1,88 @@ +use zksync_base_token_adjuster::BaseTokenRatioPersister; +use zksync_config::{configs::base_token_adjuster::BaseTokenAdjusterConfig, ContractsConfig}; + +use crate::{ + implementations::resources::{ + pools::{MasterPool, PoolResource}, + price_api_client::PriceAPIClientResource, + }, + service::StopReceiver, + task::{Task, TaskId}, + wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, +}; + +/// Wiring layer for `BaseTokenRatioPersister` +/// +/// Responsible for orchestrating communications with external API feeds to get ETH<->BaseToken +/// conversion ratios and persisting them both in the DB and in the L1. +#[derive(Debug)] +pub struct BaseTokenRatioPersisterLayer { + config: BaseTokenAdjusterConfig, + contracts_config: ContractsConfig, +} + +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub master_pool: PoolResource, + #[context(default)] + pub price_api_client: PriceAPIClientResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + #[context(task)] + pub persister: BaseTokenRatioPersister, +} + +impl BaseTokenRatioPersisterLayer { + pub fn new(config: BaseTokenAdjusterConfig, contracts_config: ContractsConfig) -> Self { + Self { + config, + contracts_config, + } + } +} + +#[async_trait::async_trait] +impl WiringLayer for BaseTokenRatioPersisterLayer { + type Input = Input; + type Output = Output; + + fn layer_name(&self) -> &'static str { + "base_token_ratio_persister" + } + + async fn wire(self, input: Self::Input) -> Result { + let master_pool = input.master_pool.get().await?; + + let price_api_client = input.price_api_client; + let base_token_addr = self + .contracts_config + .base_token_addr + .expect("base token address is not set"); + + let persister = BaseTokenRatioPersister::new( + master_pool, + self.config, + base_token_addr, + price_api_client.0, + ); + + Ok(Output { persister }) + } +} + +#[async_trait::async_trait] +impl Task for BaseTokenRatioPersister { + fn id(&self) -> TaskId { + "base_token_ratio_persister".into() + } + + async fn run(mut self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { + (*self).run(stop_receiver.0).await?; + Ok(()) + } +} diff --git a/core/node/node_framework/src/implementations/layers/base_token/base_token_ratio_provider.rs b/core/node/node_framework/src/implementations/layers/base_token/base_token_ratio_provider.rs new file mode 100644 index 000000000000..4a15895b5241 --- /dev/null +++ b/core/node/node_framework/src/implementations/layers/base_token/base_token_ratio_provider.rs @@ -0,0 +1,81 @@ +use std::sync::Arc; + +use zksync_base_token_adjuster::DBBaseTokenRatioProvider; +use zksync_config::BaseTokenAdjusterConfig; + +use crate::{ + implementations::resources::{ + base_token_ratio_provider::BaseTokenRatioProviderResource, + pools::{PoolResource, ReplicaPool}, + }, + service::StopReceiver, + task::{Task, TaskId}, + wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, +}; + +/// Wiring layer for `BaseTokenRatioProvider` +/// +/// Responsible for serving the latest ETH<->BaseToken conversion ratio. This layer is only wired if +/// the base token is not ETH. If wired, this layer inserts the BaseTokenRatioProviderResource and kicks +/// off a task to poll the DB for the latest ratio and cache it. +/// +/// If the base token is ETH, a default, no-op impl of the BaseTokenRatioProviderResource is used by other +/// layers to always return a conversion ratio of 1. +#[derive(Debug)] +pub struct BaseTokenRatioProviderLayer { + config: BaseTokenAdjusterConfig, +} + +impl BaseTokenRatioProviderLayer { + pub fn new(config: BaseTokenAdjusterConfig) -> Self { + Self { config } + } +} + +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub replica_pool: PoolResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + pub ratio_provider: BaseTokenRatioProviderResource, + #[context(task)] + pub ratio_provider_task: DBBaseTokenRatioProvider, +} + +#[async_trait::async_trait] +impl WiringLayer for BaseTokenRatioProviderLayer { + type Input = Input; + type Output = Output; + + fn layer_name(&self) -> &'static str { + "base_token_ratio_provider" + } + + async fn wire(self, input: Self::Input) -> Result { + let replica_pool = input.replica_pool.get().await.unwrap(); + + let ratio_provider = DBBaseTokenRatioProvider::new(replica_pool, self.config).await?; + // Cloning the provided preserves the internal state. + Ok(Output { + ratio_provider: Arc::new(ratio_provider.clone()).into(), + ratio_provider_task: ratio_provider, + }) + } +} + +#[async_trait::async_trait] +impl Task for DBBaseTokenRatioProvider { + fn id(&self) -> TaskId { + "base_token_ratio_provider".into() + } + + async fn run(mut self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { + (*self).run(stop_receiver.0).await?; + Ok(()) + } +} diff --git a/core/node/node_framework/src/implementations/layers/base_token/coingecko_client.rs b/core/node/node_framework/src/implementations/layers/base_token/coingecko_client.rs new file mode 100644 index 000000000000..14ab568c2f3a --- /dev/null +++ b/core/node/node_framework/src/implementations/layers/base_token/coingecko_client.rs @@ -0,0 +1,55 @@ +use std::sync::Arc; + +use zksync_config::configs::ExternalPriceApiClientConfig; +use zksync_external_price_api::coingecko_api::CoinGeckoPriceAPIClient; + +use crate::{ + implementations::resources::price_api_client::PriceAPIClientResource, + wiring_layer::{WiringError, WiringLayer}, + IntoContext, +}; + +/// Wiring layer for `CoingeckoApiClient` +/// +/// Responsible for inserting a resource with a client to get base token prices from CoinGecko to be +/// used by the `BaseTokenRatioPersister`. +#[derive(Debug)] +pub struct CoingeckoClientLayer { + config: ExternalPriceApiClientConfig, +} + +impl CoingeckoClientLayer { + /// Identifier of used client type. + /// Can be used to choose the layer for the client based on configuration variables. + pub const CLIENT_NAME: &'static str = "coingecko"; +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + pub price_api_client: PriceAPIClientResource, +} + +impl CoingeckoClientLayer { + pub fn new(config: ExternalPriceApiClientConfig) -> Self { + Self { config } + } +} + +#[async_trait::async_trait] +impl WiringLayer for CoingeckoClientLayer { + type Input = (); + type Output = Output; + + fn layer_name(&self) -> &'static str { + "coingecko_api_client" + } + + async fn wire(self, _input: Self::Input) -> Result { + let cg_client = Arc::new(CoinGeckoPriceAPIClient::new(self.config)); + + Ok(Output { + price_api_client: cg_client.into(), + }) + } +} diff --git a/core/node/node_framework/src/implementations/layers/base_token/forced_price_client.rs b/core/node/node_framework/src/implementations/layers/base_token/forced_price_client.rs new file mode 100644 index 000000000000..67785dc26ed4 --- /dev/null +++ b/core/node/node_framework/src/implementations/layers/base_token/forced_price_client.rs @@ -0,0 +1,52 @@ +use std::sync::Arc; + +use zksync_config::configs::ExternalPriceApiClientConfig; +use zksync_external_price_api::forced_price_client::ForcedPriceClient; + +use crate::{ + implementations::resources::price_api_client::PriceAPIClientResource, + wiring_layer::{WiringError, WiringLayer}, + IntoContext, +}; + +/// Wiring layer for `ForcedPriceClient` +/// +/// Inserts a resource with a forced configured price to be used by the `BaseTokenRatioPersister`. +#[derive(Debug)] +pub struct ForcedPriceClientLayer { + config: ExternalPriceApiClientConfig, +} + +impl ForcedPriceClientLayer { + pub fn new(config: ExternalPriceApiClientConfig) -> Self { + Self { config } + } + + /// Identifier of used client type. + /// Can be used to choose the layer for the client based on configuration variables. + pub const CLIENT_NAME: &'static str = "forced"; +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + pub price_api_client: PriceAPIClientResource, +} + +#[async_trait::async_trait] +impl WiringLayer for ForcedPriceClientLayer { + type Input = (); + type Output = Output; + + fn layer_name(&self) -> &'static str { + "forced_price_client" + } + + async fn wire(self, _input: Self::Input) -> Result { + let forced_client = Arc::new(ForcedPriceClient::new(self.config)); + + Ok(Output { + price_api_client: forced_client.into(), + }) + } +} diff --git a/core/node/node_framework/src/implementations/layers/base_token/mod.rs b/core/node/node_framework/src/implementations/layers/base_token/mod.rs new file mode 100644 index 000000000000..5b58527a3d82 --- /dev/null +++ b/core/node/node_framework/src/implementations/layers/base_token/mod.rs @@ -0,0 +1,5 @@ +pub mod base_token_ratio_persister; +pub mod base_token_ratio_provider; +pub mod coingecko_client; +pub mod forced_price_client; +pub mod no_op_external_price_api_client; diff --git a/core/node/node_framework/src/implementations/layers/base_token/no_op_external_price_api_client.rs b/core/node/node_framework/src/implementations/layers/base_token/no_op_external_price_api_client.rs new file mode 100644 index 000000000000..2bf5eda798fa --- /dev/null +++ b/core/node/node_framework/src/implementations/layers/base_token/no_op_external_price_api_client.rs @@ -0,0 +1,45 @@ +use std::sync::Arc; + +use zksync_external_price_api::NoOpPriceAPIClient; + +use crate::{ + implementations::resources::price_api_client::PriceAPIClientResource, + wiring_layer::{WiringError, WiringLayer}, + IntoContext, +}; + +/// Wiring layer for `NoOpExternalPriceApiClient` +/// +/// Inserts a resource with a no-op client to get base token prices to be used by the `BaseTokenRatioPersister`. +#[derive(Debug)] +pub struct NoOpExternalPriceApiClientLayer; + +impl NoOpExternalPriceApiClientLayer { + /// Identifier of used client type. + /// Can be used to choose the layer for the client based on configuration variables. + pub const CLIENT_NAME: &'static str = "no-op"; +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + pub price_api_client: PriceAPIClientResource, +} + +#[async_trait::async_trait] +impl WiringLayer for NoOpExternalPriceApiClientLayer { + type Input = (); + type Output = Output; + + fn layer_name(&self) -> &'static str { + "no_op_external_price_api_client" + } + + async fn wire(self, _input: Self::Input) -> Result { + let no_op_client = Arc::new(NoOpPriceAPIClient {}); + + Ok(Output { + price_api_client: no_op_client.into(), + }) + } +} diff --git a/core/node/node_framework/src/implementations/layers/batch_status_updater.rs b/core/node/node_framework/src/implementations/layers/batch_status_updater.rs index ba328facc8a3..f9b18a6bf0bf 100644 --- a/core/node/node_framework/src/implementations/layers/batch_status_updater.rs +++ b/core/node/node_framework/src/implementations/layers/batch_status_updater.rs @@ -6,36 +6,57 @@ use crate::{ main_node_client::MainNodeClientResource, pools::{MasterPool, PoolResource}, }, - service::{ServiceContext, StopReceiver}, + service::StopReceiver, task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, }; +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub pool: PoolResource, + pub client: MainNodeClientResource, + #[context(default)] + pub app_health: AppHealthCheckResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + #[context(task)] + pub updater: BatchStatusUpdater, +} + +/// Wiring layer for `BatchStatusUpdater`, part of the external node. #[derive(Debug)] pub struct BatchStatusUpdaterLayer; #[async_trait::async_trait] impl WiringLayer for BatchStatusUpdaterLayer { + type Input = Input; + type Output = Output; + fn layer_name(&self) -> &'static str { "batch_status_updater_layer" } - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { - let pool = context.get_resource::>().await?; - let MainNodeClientResource(client) = context.get_resource().await?; + async fn wire(self, input: Self::Input) -> Result { + let Input { + pool, + client, + app_health, + } = input; - let updater = BatchStatusUpdater::new(client, pool.get().await?); + let updater = BatchStatusUpdater::new(client.0, pool.get().await?); // Insert healthcheck - let AppHealthCheckResource(app_health) = context.get_resource_or_default().await; app_health + .0 .insert_component(updater.health_check()) .map_err(WiringError::internal)?; - // Insert task - context.add_task(Box::new(updater)); - - Ok(()) + Ok(Output { updater }) } } diff --git a/core/node/node_framework/src/implementations/layers/block_reverter.rs b/core/node/node_framework/src/implementations/layers/block_reverter.rs new file mode 100644 index 000000000000..4cfe4212e4d8 --- /dev/null +++ b/core/node/node_framework/src/implementations/layers/block_reverter.rs @@ -0,0 +1,95 @@ +use zksync_block_reverter::{BlockReverter, NodeRole}; + +use crate::{ + implementations::resources::{ + pools::{MasterPool, PoolResource}, + reverter::BlockReverterResource, + }, + FromContext, IntoContext, WiringError, WiringLayer, +}; + +/// Layer for the block reverter resource. +/// For documentation on the methods see the corresponding methods in [`BlockReverter`]. +#[derive(Debug)] +pub struct BlockReverterLayer { + node_role: NodeRole, + allow_rolling_back_executed_batches: bool, + should_roll_back_postgres: bool, + state_keeper_cache_path: Option, + merkle_tree_path: Option, +} + +impl BlockReverterLayer { + pub fn new(node_role: NodeRole) -> Self { + Self { + node_role, + allow_rolling_back_executed_batches: false, + should_roll_back_postgres: false, + state_keeper_cache_path: None, + merkle_tree_path: None, + } + } + + pub fn allow_rolling_back_executed_batches(&mut self) -> &mut Self { + self.allow_rolling_back_executed_batches = true; + self + } + + pub fn enable_rolling_back_postgres(&mut self) -> &mut Self { + self.should_roll_back_postgres = true; + self + } + + pub fn enable_rolling_back_merkle_tree(&mut self, path: String) -> &mut Self { + self.merkle_tree_path = Some(path); + self + } + + pub fn enable_rolling_back_state_keeper_cache(&mut self, path: String) -> &mut Self { + self.state_keeper_cache_path = Some(path); + self + } +} + +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub master_pool: PoolResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + pub block_reverter: BlockReverterResource, +} + +#[async_trait::async_trait] +impl WiringLayer for BlockReverterLayer { + type Input = Input; + type Output = Output; + + fn layer_name(&self) -> &'static str { + "block_reverter_layer" + } + + async fn wire(self, input: Self::Input) -> Result { + let pool = input.master_pool.get().await?; + let mut block_reverter = BlockReverter::new(self.node_role, pool); + if self.allow_rolling_back_executed_batches { + block_reverter.allow_rolling_back_executed_batches(); + } + if self.should_roll_back_postgres { + block_reverter.enable_rolling_back_postgres(); + } + if let Some(path) = self.merkle_tree_path { + block_reverter.enable_rolling_back_merkle_tree(path); + } + if let Some(path) = self.state_keeper_cache_path { + block_reverter.enable_rolling_back_state_keeper_cache(path); + } + + Ok(Output { + block_reverter: block_reverter.into(), + }) + } +} diff --git a/core/node/node_framework/src/implementations/layers/circuit_breaker_checker.rs b/core/node/node_framework/src/implementations/layers/circuit_breaker_checker.rs index b8fff34b7e92..b3d31e34c354 100644 --- a/core/node/node_framework/src/implementations/layers/circuit_breaker_checker.rs +++ b/core/node/node_framework/src/implementations/layers/circuit_breaker_checker.rs @@ -3,54 +3,64 @@ use zksync_config::configs::chain::CircuitBreakerConfig; use crate::{ implementations::resources::circuit_breakers::CircuitBreakersResource, - service::{ServiceContext, StopReceiver}, - task::{TaskId, UnconstrainedTask}, + service::StopReceiver, + task::{Task, TaskId, TaskKind}, wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, }; +/// Wiring layer for circuit breaker checker +/// +/// Expects other layers to insert different components' circuit breakers into +/// [`zksync_circuit_breaker::CircuitBreakers`] collection using [`CircuitBreakersResource`]. +/// The added task periodically runs checks for all inserted circuit breakers. #[derive(Debug)] pub struct CircuitBreakerCheckerLayer(pub CircuitBreakerConfig); +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + #[context(default)] + pub circuit_breakers: CircuitBreakersResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + #[context(task)] + pub circuit_breaker_checker: CircuitBreakerChecker, +} + #[async_trait::async_trait] impl WiringLayer for CircuitBreakerCheckerLayer { + type Input = Input; + type Output = Output; + fn layer_name(&self) -> &'static str { "circuit_breaker_checker_layer" } - async fn wire(self: Box, mut node: ServiceContext<'_>) -> Result<(), WiringError> { - // Get resources. - let circuit_breaker_resource = node - .get_resource_or_default::() - .await; - + async fn wire(self, input: Self::Input) -> Result { let circuit_breaker_checker = - CircuitBreakerChecker::new(circuit_breaker_resource.breakers, self.0.sync_interval()); + CircuitBreakerChecker::new(input.circuit_breakers.breakers, self.0.sync_interval()); - // Create and insert task. - let task = CircuitBreakerCheckerTask { + Ok(Output { circuit_breaker_checker, - }; - - node.add_unconstrained_task(Box::new(task)); - Ok(()) + }) } } -#[derive(Debug)] -struct CircuitBreakerCheckerTask { - circuit_breaker_checker: CircuitBreakerChecker, -} - #[async_trait::async_trait] -impl UnconstrainedTask for CircuitBreakerCheckerTask { +impl Task for CircuitBreakerChecker { + fn kind(&self) -> TaskKind { + TaskKind::UnconstrainedTask + } + fn id(&self) -> TaskId { "circuit_breaker_checker".into() } - async fn run_unconstrained( - mut self: Box, - stop_receiver: StopReceiver, - ) -> anyhow::Result<()> { - self.circuit_breaker_checker.run(stop_receiver.0).await + async fn run(mut self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { + (*self).run(stop_receiver.0).await } } diff --git a/core/node/node_framework/src/implementations/layers/commitment_generator.rs b/core/node/node_framework/src/implementations/layers/commitment_generator.rs index cc57599759eb..6d68559d4aec 100644 --- a/core/node/node_framework/src/implementations/layers/commitment_generator.rs +++ b/core/node/node_framework/src/implementations/layers/commitment_generator.rs @@ -8,17 +8,36 @@ use crate::{ healthcheck::AppHealthCheckResource, pools::{MasterPool, PoolResource}, }, - service::{ServiceContext, StopReceiver}, + service::StopReceiver, task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, }; +/// Wiring layer for l1 batches commitment generation +/// +/// Responsible for initialization and running [`CommitmentGenerator`]. #[derive(Debug)] pub struct CommitmentGeneratorLayer { mode: L1BatchCommitmentMode, max_parallelism: Option>, } +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub master_pool: PoolResource, + #[context(default)] + pub app_health: AppHealthCheckResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + #[context(task)] + pub commitment_generator: CommitmentGenerator, +} + impl CommitmentGeneratorLayer { pub fn new(mode: L1BatchCommitmentMode) -> Self { Self { @@ -35,49 +54,44 @@ impl CommitmentGeneratorLayer { #[async_trait::async_trait] impl WiringLayer for CommitmentGeneratorLayer { + type Input = Input; + type Output = Output; + fn layer_name(&self) -> &'static str { "commitment_generator_layer" } - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { - let pool_resource = context.get_resource::>().await?; - + async fn wire(self, input: Self::Input) -> Result { let pool_size = self .max_parallelism .unwrap_or(CommitmentGenerator::default_parallelism()) .get(); - let main_pool = pool_resource.get_custom(pool_size).await?; + let main_pool = input.master_pool.get_custom(pool_size).await?; let mut commitment_generator = CommitmentGenerator::new(main_pool, self.mode); if let Some(max_parallelism) = self.max_parallelism { commitment_generator.set_max_parallelism(max_parallelism); } - let AppHealthCheckResource(app_health) = context.get_resource_or_default().await; - app_health + input + .app_health + .0 .insert_component(commitment_generator.health_check()) .map_err(WiringError::internal)?; - context.add_task(Box::new(CommitmentGeneratorTask { + Ok(Output { commitment_generator, - })); - - Ok(()) + }) } } -#[derive(Debug)] -struct CommitmentGeneratorTask { - commitment_generator: CommitmentGenerator, -} - #[async_trait::async_trait] -impl Task for CommitmentGeneratorTask { +impl Task for CommitmentGenerator { fn id(&self) -> TaskId { "commitment_generator".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { - self.commitment_generator.run(stop_receiver.0).await + (*self).run(stop_receiver.0).await } } diff --git a/core/node/node_framework/src/implementations/layers/consensus.rs b/core/node/node_framework/src/implementations/layers/consensus.rs deleted file mode 100644 index 8cc7ea4098de..000000000000 --- a/core/node/node_framework/src/implementations/layers/consensus.rs +++ /dev/null @@ -1,177 +0,0 @@ -use anyhow::Context as _; -use zksync_concurrency::{ctx, scope}; -use zksync_config::configs::consensus::{ConsensusConfig, ConsensusSecrets}; -use zksync_dal::{ConnectionPool, Core}; -use zksync_node_consensus as consensus; -use zksync_node_sync::{ActionQueueSender, SyncState}; -use zksync_web3_decl::client::{DynClient, L2}; - -use crate::{ - implementations::resources::{ - action_queue::ActionQueueSenderResource, - main_node_client::MainNodeClientResource, - pools::{MasterPool, PoolResource}, - sync_state::SyncStateResource, - }, - service::{ServiceContext, StopReceiver}, - task::{Task, TaskId}, - wiring_layer::{WiringError, WiringLayer}, -}; - -#[derive(Debug, Copy, Clone)] -pub enum Mode { - Main, - External, -} - -#[derive(Debug)] -pub struct ConsensusLayer { - pub mode: Mode, - pub config: Option, - pub secrets: Option, -} - -#[async_trait::async_trait] -impl WiringLayer for ConsensusLayer { - fn layer_name(&self) -> &'static str { - "consensus_layer" - } - - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { - let pool = context - .get_resource::>() - .await? - .get() - .await?; - - match self.mode { - Mode::Main => { - let config = self.config.ok_or_else(|| { - WiringError::Configuration("Missing public consensus config".to_string()) - })?; - let secrets = self.secrets.ok_or_else(|| { - WiringError::Configuration("Missing private consensus config".to_string()) - })?; - let task = MainNodeConsensusTask { - config, - secrets, - pool, - }; - context.add_task(Box::new(task)); - } - Mode::External => { - let main_node_client = context.get_resource::().await?.0; - let sync_state = context.get_resource::().await?.0; - let action_queue_sender = context - .get_resource::() - .await? - .0 - .take() - .ok_or_else(|| { - WiringError::Configuration( - "Action queue sender is taken by another resource".to_string(), - ) - })?; - - let config = match (self.config, self.secrets) { - (Some(cfg), Some(secrets)) => Some((cfg, secrets)), - (Some(_), None) => { - return Err(WiringError::Configuration( - "Consensus config is specified, but secrets are missing".to_string(), - )); - } - (None, _) => { - // Secrets may be unconditionally embedded in some environments, but they are unused - // unless a consensus config is provided. - None - } - }; - - let task = FetcherTask { - config, - pool, - main_node_client, - sync_state, - action_queue_sender, - }; - context.add_task(Box::new(task)); - } - } - Ok(()) - } -} - -#[derive(Debug)] -pub struct MainNodeConsensusTask { - config: ConsensusConfig, - secrets: ConsensusSecrets, - pool: ConnectionPool, -} - -#[async_trait::async_trait] -impl Task for MainNodeConsensusTask { - fn id(&self) -> TaskId { - "consensus".into() - } - - async fn run(self: Box, mut stop_receiver: StopReceiver) -> anyhow::Result<()> { - // We instantiate the root context here, since the consensus task is the only user of the - // structured concurrency framework (`MainNodeConsensusTask` and `FetcherTask` are considered mutually - // exclusive). - // Note, however, that awaiting for the `stop_receiver` is related to the root context behavior, - // not the consensus task itself. There may have been any number of tasks running in the root context, - // but we only need to wait for stop signal once, and it will be propagated to all child contexts. - let root_ctx = ctx::root(); - scope::run!(&root_ctx, |ctx, s| async move { - s.spawn_bg(consensus::era::run_main_node( - ctx, - self.config, - self.secrets, - self.pool, - )); - let _ = stop_receiver.0.wait_for(|stop| *stop).await?; - Ok(()) - }) - .await - } -} - -#[derive(Debug)] -pub struct FetcherTask { - config: Option<(ConsensusConfig, ConsensusSecrets)>, - pool: ConnectionPool, - main_node_client: Box>, - sync_state: SyncState, - action_queue_sender: ActionQueueSender, -} - -#[async_trait::async_trait] -impl Task for FetcherTask { - fn id(&self) -> TaskId { - "consensus_fetcher".into() - } - - async fn run(self: Box, mut stop_receiver: StopReceiver) -> anyhow::Result<()> { - // We instantiate the root context here, since the consensus task is the only user of the - // structured concurrency framework (`MainNodeConsensusTask` and `FetcherTask` are considered mutually - // exclusive). - // Note, however, that awaiting for the `stop_receiver` is related to the root context behavior, - // not the consensus task itself. There may have been any number of tasks running in the root context, - // but we only need to wait for stop signal once, and it will be propagated to all child contexts. - let root_ctx = ctx::root(); - scope::run!(&root_ctx, |ctx, s| async { - s.spawn_bg(consensus::era::run_en( - ctx, - self.config, - self.pool, - self.sync_state, - self.main_node_client, - self.action_queue_sender, - )); - let _ = stop_receiver.0.wait_for(|stop| *stop).await?; - Ok(()) - }) - .await - .context("consensus actor") - } -} diff --git a/core/node/node_framework/src/implementations/layers/consensus/external_node.rs b/core/node/node_framework/src/implementations/layers/consensus/external_node.rs new file mode 100644 index 000000000000..bdb0eae70eea --- /dev/null +++ b/core/node/node_framework/src/implementations/layers/consensus/external_node.rs @@ -0,0 +1,129 @@ +use anyhow::Context as _; +use zksync_concurrency::{ctx, scope}; +use zksync_config::configs::consensus::{ConsensusConfig, ConsensusSecrets}; +use zksync_dal::{ConnectionPool, Core}; +use zksync_node_consensus as consensus; +use zksync_node_framework_derive::IntoContext; +use zksync_node_sync::{ActionQueueSender, SyncState}; +use zksync_web3_decl::client::{DynClient, L2}; + +use crate::{ + implementations::resources::{ + action_queue::ActionQueueSenderResource, + main_node_client::MainNodeClientResource, + pools::{MasterPool, PoolResource}, + sync_state::SyncStateResource, + }, + service::StopReceiver, + task::{Task, TaskId}, + wiring_layer::{WiringError, WiringLayer}, + FromContext, +}; + +/// Wiring layer for external node consensus component. +#[derive(Debug)] +pub struct ExternalNodeConsensusLayer { + pub config: Option, + pub secrets: Option, +} + +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub master_pool: PoolResource, + pub main_node_client: MainNodeClientResource, + pub sync_state: SyncStateResource, + pub action_queue_sender: ActionQueueSenderResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + #[context(task)] + pub consensus_task: ExternalNodeTask, +} + +#[async_trait::async_trait] +impl WiringLayer for ExternalNodeConsensusLayer { + type Input = Input; + type Output = Output; + + fn layer_name(&self) -> &'static str { + "external_node_consensus_layer" + } + + async fn wire(self, input: Self::Input) -> Result { + let pool = input.master_pool.get().await?; + + let main_node_client = input.main_node_client.0; + let sync_state = input.sync_state.0; + let action_queue_sender = input.action_queue_sender.0.take().ok_or_else(|| { + WiringError::Configuration( + "Action queue sender is taken by another resource".to_string(), + ) + })?; + + let config = match (self.config, self.secrets) { + (Some(cfg), Some(secrets)) => Some((cfg, secrets)), + (Some(_), None) => { + return Err(WiringError::Configuration( + "Consensus config is specified, but secrets are missing".to_string(), + )); + } + (None, _) => { + // Secrets may be unconditionally embedded in some environments, but they are unused + // unless a consensus config is provided. + None + } + }; + + let consensus_task = ExternalNodeTask { + config, + pool, + main_node_client, + sync_state, + action_queue_sender, + }; + Ok(Output { consensus_task }) + } +} + +#[derive(Debug)] +pub struct ExternalNodeTask { + config: Option<(ConsensusConfig, ConsensusSecrets)>, + pool: ConnectionPool, + main_node_client: Box>, + sync_state: SyncState, + action_queue_sender: ActionQueueSender, +} + +#[async_trait::async_trait] +impl Task for ExternalNodeTask { + fn id(&self) -> TaskId { + "consensus_fetcher".into() + } + + async fn run(self: Box, mut stop_receiver: StopReceiver) -> anyhow::Result<()> { + // We instantiate the root context here, since the consensus task is the only user of the + // structured concurrency framework (`MainNodeConsensusTask` and `ExternalNodeTask` are considered mutually + // exclusive). + // Note, however, that awaiting for the `stop_receiver` is related to the root context behavior, + // not the consensus task itself. There may have been any number of tasks running in the root context, + // but we only need to wait for stop signal once, and it will be propagated to all child contexts. + let root_ctx = ctx::root(); + scope::run!(&root_ctx, |ctx, s| async { + s.spawn_bg(consensus::era::run_external_node( + ctx, + self.config, + self.pool, + self.sync_state, + self.main_node_client, + self.action_queue_sender, + )); + let _ = stop_receiver.0.wait_for(|stop| *stop).await?; + Ok(()) + }) + .await + .context("consensus actor") + } +} diff --git a/core/node/node_framework/src/implementations/layers/consensus/main_node.rs b/core/node/node_framework/src/implementations/layers/consensus/main_node.rs new file mode 100644 index 000000000000..1ecd5f33c5ab --- /dev/null +++ b/core/node/node_framework/src/implementations/layers/consensus/main_node.rs @@ -0,0 +1,90 @@ +use zksync_concurrency::{ctx, scope}; +use zksync_config::configs::consensus::{ConsensusConfig, ConsensusSecrets}; +use zksync_dal::{ConnectionPool, Core}; +use zksync_node_consensus as consensus; +use zksync_node_framework_derive::FromContext; + +use crate::{ + implementations::resources::pools::{MasterPool, PoolResource}, + service::StopReceiver, + task::{Task, TaskId}, + wiring_layer::{WiringError, WiringLayer}, + IntoContext, +}; + +/// Wiring layer for main node consensus component. +#[derive(Debug)] +pub struct MainNodeConsensusLayer { + pub config: ConsensusConfig, + pub secrets: ConsensusSecrets, +} + +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub master_pool: PoolResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + #[context(task)] + pub consensus_task: MainNodeConsensusTask, +} + +#[async_trait::async_trait] +impl WiringLayer for MainNodeConsensusLayer { + type Input = Input; + type Output = Output; + + fn layer_name(&self) -> &'static str { + "main_node_consensus_layer" + } + + async fn wire(self, input: Self::Input) -> Result { + let pool = input.master_pool.get().await?; + + let consensus_task = MainNodeConsensusTask { + config: self.config, + secrets: self.secrets, + pool, + }; + + Ok(Output { consensus_task }) + } +} + +#[derive(Debug)] +pub struct MainNodeConsensusTask { + config: ConsensusConfig, + secrets: ConsensusSecrets, + pool: ConnectionPool, +} + +#[async_trait::async_trait] +impl Task for MainNodeConsensusTask { + fn id(&self) -> TaskId { + "consensus".into() + } + + async fn run(self: Box, mut stop_receiver: StopReceiver) -> anyhow::Result<()> { + // We instantiate the root context here, since the consensus task is the only user of the + // structured concurrency framework (`MainNodeConsensusTask` and `ExternalNodeTask` are considered mutually + // exclusive). + // Note, however, that awaiting for the `stop_receiver` is related to the root context behavior, + // not the consensus task itself. There may have been any number of tasks running in the root context, + // but we only need to wait for stop signal once, and it will be propagated to all child contexts. + let root_ctx = ctx::root(); + scope::run!(&root_ctx, |ctx, s| async move { + s.spawn_bg(consensus::era::run_main_node( + ctx, + self.config, + self.secrets, + self.pool, + )); + let _ = stop_receiver.0.wait_for(|stop| *stop).await?; + Ok(()) + }) + .await + } +} diff --git a/core/node/node_framework/src/implementations/layers/consensus/mod.rs b/core/node/node_framework/src/implementations/layers/consensus/mod.rs new file mode 100644 index 000000000000..59465d21d70d --- /dev/null +++ b/core/node/node_framework/src/implementations/layers/consensus/mod.rs @@ -0,0 +1,4 @@ +pub use self::{external_node::ExternalNodeConsensusLayer, main_node::MainNodeConsensusLayer}; + +pub mod external_node; +pub mod main_node; diff --git a/core/node/node_framework/src/implementations/layers/consistency_checker.rs b/core/node/node_framework/src/implementations/layers/consistency_checker.rs index fb4b6d8f5eed..a9e99eb89ac4 100644 --- a/core/node/node_framework/src/implementations/layers/consistency_checker.rs +++ b/core/node/node_framework/src/implementations/layers/consistency_checker.rs @@ -7,11 +7,13 @@ use crate::{ healthcheck::AppHealthCheckResource, pools::{MasterPool, PoolResource}, }, - service::{ServiceContext, StopReceiver}, + service::StopReceiver, task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, }; +/// Wiring layer for the `ConsistencyChecker` (used by the external node). #[derive(Debug)] pub struct ConsistencyCheckerLayer { diamond_proxy_addr: Address, @@ -19,6 +21,22 @@ pub struct ConsistencyCheckerLayer { commitment_mode: L1BatchCommitmentMode, } +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub l1_client: EthInterfaceResource, + pub master_pool: PoolResource, + #[context(default)] + pub app_health: AppHealthCheckResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + #[context(task)] + pub consistency_checker: ConsistencyChecker, +} + impl ConsistencyCheckerLayer { pub fn new( diamond_proxy_addr: Address, @@ -35,16 +53,18 @@ impl ConsistencyCheckerLayer { #[async_trait::async_trait] impl WiringLayer for ConsistencyCheckerLayer { + type Input = Input; + type Output = Output; + fn layer_name(&self) -> &'static str { "consistency_checker_layer" } - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { + async fn wire(self, input: Self::Input) -> Result { // Get resources. - let l1_client = context.get_resource::().await?.0; + let l1_client = input.l1_client.0; - let pool_resource = context.get_resource::>().await?; - let singleton_pool = pool_resource.get_singleton().await?; + let singleton_pool = input.master_pool.get_singleton().await?; let consistency_checker = ConsistencyChecker::new( l1_client, @@ -55,15 +75,15 @@ impl WiringLayer for ConsistencyCheckerLayer { .map_err(WiringError::Internal)? .with_diamond_proxy_addr(self.diamond_proxy_addr); - let AppHealthCheckResource(app_health) = context.get_resource_or_default().await; - app_health + input + .app_health + .0 .insert_component(consistency_checker.health_check().clone()) .map_err(WiringError::internal)?; - // Create and add tasks. - context.add_task(Box::new(consistency_checker)); - - Ok(()) + Ok(Output { + consistency_checker, + }) } } diff --git a/core/node/node_framework/src/implementations/layers/contract_verification_api.rs b/core/node/node_framework/src/implementations/layers/contract_verification_api.rs index 5e76c32ddd53..3f1f76cc1c12 100644 --- a/core/node/node_framework/src/implementations/layers/contract_verification_api.rs +++ b/core/node/node_framework/src/implementations/layers/contract_verification_api.rs @@ -3,37 +3,52 @@ use zksync_dal::{ConnectionPool, Core}; use crate::{ implementations::resources::pools::{MasterPool, PoolResource, ReplicaPool}, - service::{ServiceContext, StopReceiver}, + service::StopReceiver, task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, }; +/// Wiring layer for contract verification +/// +/// Responsible for initialization of the contract verification server. #[derive(Debug)] pub struct ContractVerificationApiLayer(pub ContractVerifierConfig); +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub master_pool: PoolResource, + pub replica_pool: PoolResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + #[context(task)] + pub contract_verification_api_task: ContractVerificationApiTask, +} + #[async_trait::async_trait] impl WiringLayer for ContractVerificationApiLayer { + type Input = Input; + type Output = Output; + fn layer_name(&self) -> &'static str { "contract_verification_api_layer" } - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { - let master_pool = context - .get_resource::>() - .await? - .get() - .await?; - let replica_pool = context - .get_resource::>() - .await? - .get() - .await?; - context.add_task(Box::new(ContractVerificationApiTask { + async fn wire(self, input: Self::Input) -> Result { + let master_pool = input.master_pool.get().await?; + let replica_pool = input.replica_pool.get().await?; + let contract_verification_api_task = ContractVerificationApiTask { master_pool, replica_pool, config: self.0, - })); - Ok(()) + }; + Ok(Output { + contract_verification_api_task, + }) } } diff --git a/core/node/node_framework/src/implementations/layers/da_dispatcher.rs b/core/node/node_framework/src/implementations/layers/da_dispatcher.rs new file mode 100644 index 000000000000..7759da314cc0 --- /dev/null +++ b/core/node/node_framework/src/implementations/layers/da_dispatcher.rs @@ -0,0 +1,84 @@ +use zksync_config::configs::{chain::StateKeeperConfig, da_dispatcher::DADispatcherConfig}; +use zksync_da_dispatcher::DataAvailabilityDispatcher; + +use crate::{ + implementations::resources::{ + da_client::DAClientResource, + pools::{MasterPool, PoolResource}, + }, + service::StopReceiver, + task::{Task, TaskId}, + wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, +}; + +/// A layer that wires the data availability dispatcher task. +#[derive(Debug)] +pub struct DataAvailabilityDispatcherLayer { + state_keeper_config: StateKeeperConfig, + da_config: DADispatcherConfig, +} + +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub master_pool: PoolResource, + pub da_client: DAClientResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + #[context(task)] + pub da_dispatcher_task: DataAvailabilityDispatcher, +} + +impl DataAvailabilityDispatcherLayer { + pub fn new(state_keeper_config: StateKeeperConfig, da_config: DADispatcherConfig) -> Self { + Self { + state_keeper_config, + da_config, + } + } +} + +#[async_trait::async_trait] +impl WiringLayer for DataAvailabilityDispatcherLayer { + type Input = Input; + type Output = Output; + + fn layer_name(&self) -> &'static str { + "da_dispatcher_layer" + } + + async fn wire(self, input: Self::Input) -> Result { + // A pool with size 2 is used here because there are 2 functions within a task that execute in parallel + let master_pool = input.master_pool.get_custom(2).await?; + let da_client = input.da_client.0; + + if let Some(limit) = da_client.blob_size_limit() { + if self.state_keeper_config.max_pubdata_per_batch > limit as u64 { + return Err(WiringError::Configuration(format!( + "Max pubdata per batch is greater than the blob size limit: {} > {}", + self.state_keeper_config.max_pubdata_per_batch, limit + ))); + } + } + + let da_dispatcher_task = + DataAvailabilityDispatcher::new(master_pool, self.da_config, da_client); + + Ok(Output { da_dispatcher_task }) + } +} + +#[async_trait::async_trait] +impl Task for DataAvailabilityDispatcher { + fn id(&self) -> TaskId { + "da_dispatcher".into() + } + + async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { + (*self).run(stop_receiver.0).await + } +} diff --git a/core/node/node_framework/src/implementations/layers/eth_sender.rs b/core/node/node_framework/src/implementations/layers/eth_sender.rs deleted file mode 100644 index 3cf2cf597c31..000000000000 --- a/core/node/node_framework/src/implementations/layers/eth_sender.rs +++ /dev/null @@ -1,199 +0,0 @@ -use anyhow::Context; -use zksync_circuit_breaker::l1_txs::FailedL1TransactionChecker; -use zksync_config::configs::{eth_sender::EthConfig, ContractsConfig}; -use zksync_eth_client::BoundEthInterface; -use zksync_eth_sender::{Aggregator, EthTxAggregator, EthTxManager}; -use zksync_types::{commitment::L1BatchCommitmentMode, L2ChainId}; - -use crate::{ - implementations::resources::{ - circuit_breakers::CircuitBreakersResource, - eth_interface::{BoundEthInterfaceForBlobsResource, BoundEthInterfaceResource}, - l1_tx_params::L1TxParamsResource, - object_store::ObjectStoreResource, - pools::{MasterPool, PoolResource, ReplicaPool}, - }, - service::{ServiceContext, StopReceiver}, - task::{Task, TaskId}, - wiring_layer::{WiringError, WiringLayer}, -}; - -#[derive(Debug)] -pub struct EthTxManagerLayer { - eth_sender_config: EthConfig, -} - -impl EthTxManagerLayer { - pub fn new(eth_sender_config: EthConfig) -> Self { - Self { eth_sender_config } - } -} - -#[async_trait::async_trait] -impl WiringLayer for EthTxManagerLayer { - fn layer_name(&self) -> &'static str { - "eth_tx_manager_layer" - } - - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { - // Get resources. - let master_pool_resource = context.get_resource::>().await?; - let master_pool = master_pool_resource.get().await.unwrap(); - let replica_pool_resource = context.get_resource::>().await?; - let replica_pool = replica_pool_resource.get().await.unwrap(); - - let eth_client = context.get_resource::().await?.0; - let eth_client_blobs = match context - .get_resource::() - .await - { - Ok(BoundEthInterfaceForBlobsResource(client)) => Some(client), - Err(WiringError::ResourceLacking { .. }) => None, - Err(err) => return Err(err), - }; - - let config = self.eth_sender_config.sender.context("sender")?; - - let gas_adjuster = context.get_resource::().await?.0; - - let eth_tx_manager_actor = EthTxManager::new( - master_pool, - config, - gas_adjuster, - eth_client, - eth_client_blobs, - ); - - context.add_task(Box::new(EthTxManagerTask { - eth_tx_manager_actor, - })); - - // Insert circuit breaker. - let CircuitBreakersResource { breakers } = context.get_resource_or_default().await; - breakers - .insert(Box::new(FailedL1TransactionChecker { pool: replica_pool })) - .await; - - Ok(()) - } -} - -#[derive(Debug)] -pub struct EthTxAggregatorLayer { - eth_sender_config: EthConfig, - contracts_config: ContractsConfig, - zksync_network_id: L2ChainId, - l1_batch_commit_data_generator_mode: L1BatchCommitmentMode, -} - -impl EthTxAggregatorLayer { - pub fn new( - eth_sender_config: EthConfig, - contracts_config: ContractsConfig, - zksync_network_id: L2ChainId, - l1_batch_commit_data_generator_mode: L1BatchCommitmentMode, - ) -> Self { - Self { - eth_sender_config, - contracts_config, - zksync_network_id, - l1_batch_commit_data_generator_mode, - } - } -} - -#[async_trait::async_trait] -impl WiringLayer for EthTxAggregatorLayer { - fn layer_name(&self) -> &'static str { - "eth_tx_aggregator_layer" - } - - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { - // Get resources. - let master_pool_resource = context.get_resource::>().await?; - let master_pool = master_pool_resource.get().await.unwrap(); - let replica_pool_resource = context.get_resource::>().await?; - let replica_pool = replica_pool_resource.get().await.unwrap(); - - let eth_client = context.get_resource::().await?.0; - let eth_client_blobs = match context - .get_resource::() - .await - { - Ok(BoundEthInterfaceForBlobsResource(client)) => Some(client), - Err(WiringError::ResourceLacking { .. }) => None, - Err(err) => return Err(err), - }; - let object_store = context.get_resource::().await?.0; - - // Create and add tasks. - let eth_client_blobs_addr = eth_client_blobs - .as_deref() - .map(BoundEthInterface::sender_account); - - let config = self.eth_sender_config.sender.context("sender")?; - let aggregator = Aggregator::new( - config.clone(), - object_store, - eth_client_blobs_addr.is_some(), - self.l1_batch_commit_data_generator_mode, - ); - - let eth_tx_aggregator_actor = EthTxAggregator::new( - master_pool.clone(), - config.clone(), - aggregator, - eth_client.clone(), - self.contracts_config.validator_timelock_addr, - self.contracts_config.l1_multicall3_addr, - self.contracts_config.diamond_proxy_addr, - self.zksync_network_id, - eth_client_blobs_addr, - ) - .await; - - context.add_task(Box::new(EthTxAggregatorTask { - eth_tx_aggregator_actor, - })); - - // Insert circuit breaker. - let CircuitBreakersResource { breakers } = context.get_resource_or_default().await; - breakers - .insert(Box::new(FailedL1TransactionChecker { pool: replica_pool })) - .await; - - Ok(()) - } -} - -#[derive(Debug)] -struct EthTxAggregatorTask { - eth_tx_aggregator_actor: EthTxAggregator, -} - -#[async_trait::async_trait] -impl Task for EthTxAggregatorTask { - fn id(&self) -> TaskId { - "eth_tx_aggregator".into() - } - - async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { - self.eth_tx_aggregator_actor.run(stop_receiver.0).await - } -} - -#[derive(Debug)] -struct EthTxManagerTask { - eth_tx_manager_actor: EthTxManager, -} - -#[async_trait::async_trait] -impl Task for EthTxManagerTask { - fn id(&self) -> TaskId { - "eth_tx_manager".into() - } - - async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { - self.eth_tx_manager_actor.run(stop_receiver.0).await - } -} diff --git a/core/node/node_framework/src/implementations/layers/eth_sender/aggregator.rs b/core/node/node_framework/src/implementations/layers/eth_sender/aggregator.rs new file mode 100644 index 000000000000..96fffcaf6a84 --- /dev/null +++ b/core/node/node_framework/src/implementations/layers/eth_sender/aggregator.rs @@ -0,0 +1,146 @@ +use anyhow::Context; +use zksync_circuit_breaker::l1_txs::FailedL1TransactionChecker; +use zksync_config::configs::{eth_sender::EthConfig, ContractsConfig}; +use zksync_eth_client::BoundEthInterface; +use zksync_eth_sender::{Aggregator, EthTxAggregator}; +use zksync_types::{commitment::L1BatchCommitmentMode, L2ChainId}; + +use crate::{ + implementations::resources::{ + circuit_breakers::CircuitBreakersResource, + eth_interface::{BoundEthInterfaceForBlobsResource, BoundEthInterfaceResource}, + object_store::ObjectStoreResource, + pools::{MasterPool, PoolResource, ReplicaPool}, + }, + service::StopReceiver, + task::{Task, TaskId}, + wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, +}; + +/// Wiring layer for aggregating l1 batches into `eth_txs` +/// +/// Responsible for initialization and running of [`EthTxAggregator`], that aggregates L1 batches +/// into `eth_txs`(such as `CommitBlocks`, `PublishProofBlocksOnchain` or `ExecuteBlock`). +/// These `eth_txs` will be used as a queue for generating signed txs and will be sent later on L1. +/// +/// ## Requests resources +/// +/// - `PoolResource` +/// - `PoolResource` +/// - `BoundEthInterfaceResource` +/// - `BoundEthInterfaceForBlobsResource` (optional) +/// - `ObjectStoreResource` +/// - `CircuitBreakersResource` (adds a circuit breaker) +/// +/// ## Adds tasks +/// +/// - `EthTxAggregator` +#[derive(Debug)] +pub struct EthTxAggregatorLayer { + eth_sender_config: EthConfig, + contracts_config: ContractsConfig, + zksync_network_id: L2ChainId, + l1_batch_commit_data_generator_mode: L1BatchCommitmentMode, +} + +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub master_pool: PoolResource, + pub replica_pool: PoolResource, + pub eth_client: BoundEthInterfaceResource, + pub eth_client_blobs: Option, + pub object_store: ObjectStoreResource, + #[context(default)] + pub circuit_breakers: CircuitBreakersResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + #[context(task)] + pub eth_tx_aggregator: EthTxAggregator, +} + +impl EthTxAggregatorLayer { + pub fn new( + eth_sender_config: EthConfig, + contracts_config: ContractsConfig, + zksync_network_id: L2ChainId, + l1_batch_commit_data_generator_mode: L1BatchCommitmentMode, + ) -> Self { + Self { + eth_sender_config, + contracts_config, + zksync_network_id, + l1_batch_commit_data_generator_mode, + } + } +} + +#[async_trait::async_trait] +impl WiringLayer for EthTxAggregatorLayer { + type Input = Input; + type Output = Output; + + fn layer_name(&self) -> &'static str { + "eth_tx_aggregator_layer" + } + + async fn wire(self, input: Self::Input) -> Result { + // Get resources. + let master_pool = input.master_pool.get().await.unwrap(); + let replica_pool = input.replica_pool.get().await.unwrap(); + + let eth_client = input.eth_client.0; + let eth_client_blobs = input.eth_client_blobs.map(|c| c.0); + let object_store = input.object_store.0; + + // Create and add tasks. + let eth_client_blobs_addr = eth_client_blobs + .as_deref() + .map(BoundEthInterface::sender_account); + + let config = self.eth_sender_config.sender.context("sender")?; + let aggregator = Aggregator::new( + config.clone(), + object_store, + eth_client_blobs_addr.is_some(), + self.l1_batch_commit_data_generator_mode, + ); + + let eth_tx_aggregator = EthTxAggregator::new( + master_pool.clone(), + config.clone(), + aggregator, + eth_client.clone(), + self.contracts_config.validator_timelock_addr, + self.contracts_config.l1_multicall3_addr, + self.contracts_config.diamond_proxy_addr, + self.zksync_network_id, + eth_client_blobs_addr, + ) + .await; + + // Insert circuit breaker. + input + .circuit_breakers + .breakers + .insert(Box::new(FailedL1TransactionChecker { pool: replica_pool })) + .await; + + Ok(Output { eth_tx_aggregator }) + } +} + +#[async_trait::async_trait] +impl Task for EthTxAggregator { + fn id(&self) -> TaskId { + "eth_tx_aggregator".into() + } + + async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { + (*self).run(stop_receiver.0).await + } +} diff --git a/core/node/node_framework/src/implementations/layers/eth_sender/manager.rs b/core/node/node_framework/src/implementations/layers/eth_sender/manager.rs new file mode 100644 index 000000000000..e979c372d8e8 --- /dev/null +++ b/core/node/node_framework/src/implementations/layers/eth_sender/manager.rs @@ -0,0 +1,115 @@ +use anyhow::Context; +use zksync_circuit_breaker::l1_txs::FailedL1TransactionChecker; +use zksync_config::configs::eth_sender::EthConfig; +use zksync_eth_sender::EthTxManager; + +use crate::{ + implementations::resources::{ + circuit_breakers::CircuitBreakersResource, + eth_interface::{BoundEthInterfaceForBlobsResource, BoundEthInterfaceResource}, + l1_tx_params::L1TxParamsResource, + pools::{MasterPool, PoolResource, ReplicaPool}, + }, + service::StopReceiver, + task::{Task, TaskId}, + wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, +}; + +/// Wiring layer for `eth_txs` managing +/// +/// Responsible for initialization and running [`EthTxManager`] component, that manages sending +/// of `eth_txs`(such as `CommitBlocks`, `PublishProofBlocksOnchain` or `ExecuteBlock` ) to L1. +/// +/// ## Requests resources +/// +/// - `PoolResource` +/// - `PoolResource` +/// - `BoundEthInterfaceResource` +/// - `BoundEthInterfaceForBlobsResource` (optional) +/// - `L1TxParamsResource` +/// - `CircuitBreakersResource` (adds a circuit breaker) +/// +/// ## Adds tasks +/// +/// - `EthTxManager` +#[derive(Debug)] +pub struct EthTxManagerLayer { + eth_sender_config: EthConfig, +} + +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub master_pool: PoolResource, + pub replica_pool: PoolResource, + pub eth_client: BoundEthInterfaceResource, + pub eth_client_blobs: Option, + pub l1_tx_params: L1TxParamsResource, + #[context(default)] + pub circuit_breakers: CircuitBreakersResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + #[context(task)] + pub eth_tx_manager: EthTxManager, +} + +impl EthTxManagerLayer { + pub fn new(eth_sender_config: EthConfig) -> Self { + Self { eth_sender_config } + } +} + +#[async_trait::async_trait] +impl WiringLayer for EthTxManagerLayer { + type Input = Input; + type Output = Output; + + fn layer_name(&self) -> &'static str { + "eth_tx_manager_layer" + } + + async fn wire(self, input: Self::Input) -> Result { + // Get resources. + let master_pool = input.master_pool.get().await.unwrap(); + let replica_pool = input.replica_pool.get().await.unwrap(); + + let eth_client = input.eth_client.0; + let eth_client_blobs = input.eth_client_blobs.map(|c| c.0); + + let config = self.eth_sender_config.sender.context("sender")?; + + let gas_adjuster = input.l1_tx_params.0; + + let eth_tx_manager = EthTxManager::new( + master_pool, + config, + gas_adjuster, + eth_client, + eth_client_blobs, + ); + + // Insert circuit breaker. + input + .circuit_breakers + .breakers + .insert(Box::new(FailedL1TransactionChecker { pool: replica_pool })) + .await; + + Ok(Output { eth_tx_manager }) + } +} + +#[async_trait::async_trait] +impl Task for EthTxManager { + fn id(&self) -> TaskId { + "eth_tx_manager".into() + } + + async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { + (*self).run(stop_receiver.0).await + } +} diff --git a/core/node/node_framework/src/implementations/layers/eth_sender/mod.rs b/core/node/node_framework/src/implementations/layers/eth_sender/mod.rs new file mode 100644 index 000000000000..e072f5c6a11a --- /dev/null +++ b/core/node/node_framework/src/implementations/layers/eth_sender/mod.rs @@ -0,0 +1,4 @@ +pub mod aggregator; +pub mod manager; + +pub use self::{aggregator::EthTxAggregatorLayer, manager::EthTxManagerLayer}; diff --git a/core/node/node_framework/src/implementations/layers/eth_watch.rs b/core/node/node_framework/src/implementations/layers/eth_watch.rs index df9319013112..13f593644dc5 100644 --- a/core/node/node_framework/src/implementations/layers/eth_watch.rs +++ b/core/node/node_framework/src/implementations/layers/eth_watch.rs @@ -1,27 +1,42 @@ -use std::time::Duration; - use zksync_config::{ContractsConfig, EthWatchConfig}; -use zksync_contracts::governance_contract; -use zksync_dal::{ConnectionPool, Core}; +use zksync_contracts::{chain_admin_contract, governance_contract}; use zksync_eth_watch::{EthHttpQueryClient, EthWatch}; -use zksync_types::{ethabi::Contract, Address}; use crate::{ implementations::resources::{ eth_interface::EthInterfaceResource, pools::{MasterPool, PoolResource}, }, - service::{ServiceContext, StopReceiver}, + service::StopReceiver, task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, }; +/// Wiring layer for ethereum watcher +/// +/// Responsible for initializing and running of [`EthWatch`] component, that polls the Ethereum node for the relevant events, +/// such as priority operations (aka L1 transactions), protocol upgrades etc. #[derive(Debug)] pub struct EthWatchLayer { eth_watch_config: EthWatchConfig, contracts_config: ContractsConfig, } +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub master_pool: PoolResource, + pub eth_client: EthInterfaceResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + #[context(task)] + pub eth_watch: EthWatch, +} + impl EthWatchLayer { pub fn new(eth_watch_config: EthWatchConfig, contracts_config: ContractsConfig) -> Self { Self { @@ -33,62 +48,49 @@ impl EthWatchLayer { #[async_trait::async_trait] impl WiringLayer for EthWatchLayer { + type Input = Input; + type Output = Output; + fn layer_name(&self) -> &'static str { "eth_watch_layer" } - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { - let pool_resource = context.get_resource::>().await?; - let main_pool = pool_resource.get().await.unwrap(); - - let client = context.get_resource::().await?.0; + async fn wire(self, input: Self::Input) -> Result { + let main_pool = input.master_pool.get().await.unwrap(); + let client = input.eth_client.0; let eth_client = EthHttpQueryClient::new( client, self.contracts_config.diamond_proxy_addr, self.contracts_config .ecosystem_contracts - .map(|a| a.transparent_proxy_admin_addr), + .map(|a| a.state_transition_proxy_addr), + self.contracts_config.chain_admin_addr, self.contracts_config.governance_addr, self.eth_watch_config.confirmations_for_eth_event, ); - context.add_task(Box::new(EthWatchTask { + + let eth_watch = EthWatch::new( + self.contracts_config.diamond_proxy_addr, + &governance_contract(), + &chain_admin_contract(), + Box::new(eth_client), main_pool, - client: eth_client, - governance_contract: governance_contract(), - diamond_proxy_address: self.contracts_config.diamond_proxy_addr, - poll_interval: self.eth_watch_config.poll_interval(), - })); + self.eth_watch_config.poll_interval(), + ) + .await?; - Ok(()) + Ok(Output { eth_watch }) } } -#[derive(Debug)] -struct EthWatchTask { - main_pool: ConnectionPool, - client: EthHttpQueryClient, - governance_contract: Contract, - diamond_proxy_address: Address, - poll_interval: Duration, -} - #[async_trait::async_trait] -impl Task for EthWatchTask { +impl Task for EthWatch { fn id(&self) -> TaskId { "eth_watch".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { - let eth_watch = EthWatch::new( - self.diamond_proxy_address, - &self.governance_contract, - Box::new(self.client), - self.main_pool, - self.poll_interval, - ) - .await?; - - eth_watch.run(stop_receiver.0).await + (*self).run(stop_receiver.0).await } } diff --git a/core/node/node_framework/src/implementations/layers/healtcheck_server.rs b/core/node/node_framework/src/implementations/layers/healtcheck_server.rs index c6138c711083..83a74c63cb45 100644 --- a/core/node/node_framework/src/implementations/layers/healtcheck_server.rs +++ b/core/node/node_framework/src/implementations/layers/healtcheck_server.rs @@ -6,61 +6,73 @@ use zksync_node_api_server::healthcheck::HealthCheckHandle; use crate::{ implementations::resources::healthcheck::AppHealthCheckResource, - service::{ServiceContext, StopReceiver}, - task::{TaskId, UnconstrainedTask}, + service::StopReceiver, + task::{Task, TaskId, TaskKind}, wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, }; -/// Builder for a health check server. +/// Wiring layer for health check server /// -/// Spawned task collects all the health checks added by different tasks to the -/// corresponding resource collection and spawns an HTTP server exposing them. -/// -/// This layer expects other tasks to add health checks to the `ResourceCollection`. -/// -/// ## Effects -/// -/// - Resolves `ResourceCollection`. -/// - Adds `healthcheck_server` to the node. +/// Expects other layers to insert different components' health checks +/// into [`AppHealthCheck`] aggregating heath using [`AppHealthCheckResource`]. +/// The added task spawns a health check server that only exposes the state provided by other tasks. #[derive(Debug)] pub struct HealthCheckLayer(pub HealthCheckConfig); +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + #[context(default)] + pub app_health_check: AppHealthCheckResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + #[context(task)] + pub health_check_task: HealthCheckTask, +} + #[async_trait::async_trait] impl WiringLayer for HealthCheckLayer { + type Input = Input; + type Output = Output; + fn layer_name(&self) -> &'static str { "healthcheck_layer" } - async fn wire(self: Box, mut node: ServiceContext<'_>) -> Result<(), WiringError> { - let AppHealthCheckResource(app_health_check) = node.get_resource_or_default().await; + async fn wire(self, input: Self::Input) -> Result { + let AppHealthCheckResource(app_health_check) = input.app_health_check; + app_health_check.override_limits(self.0.slow_time_limit(), self.0.hard_time_limit()); - let task = HealthCheckTask { + let health_check_task = HealthCheckTask { config: self.0, app_health_check, }; - // Healthcheck server only exposes the state provided by other tasks, and also it has to start as soon as possible. - node.add_unconstrained_task(Box::new(task)); - Ok(()) + Ok(Output { health_check_task }) } } #[derive(Debug)] -struct HealthCheckTask { +pub struct HealthCheckTask { config: HealthCheckConfig, app_health_check: Arc, } #[async_trait::async_trait] -impl UnconstrainedTask for HealthCheckTask { +impl Task for HealthCheckTask { + fn kind(&self) -> TaskKind { + TaskKind::UnconstrainedTask + } + fn id(&self) -> TaskId { "healthcheck_server".into() } - async fn run_unconstrained( - mut self: Box, - mut stop_receiver: StopReceiver, - ) -> anyhow::Result<()> { + async fn run(mut self: Box, mut stop_receiver: StopReceiver) -> anyhow::Result<()> { let handle = HealthCheckHandle::spawn_server(self.config.bind_addr(), self.app_health_check.clone()); stop_receiver.0.changed().await?; diff --git a/core/node/node_framework/src/implementations/layers/house_keeper.rs b/core/node/node_framework/src/implementations/layers/house_keeper.rs index 416d80691a31..74314320d815 100644 --- a/core/node/node_framework/src/implementations/layers/house_keeper.rs +++ b/core/node/node_framework/src/implementations/layers/house_keeper.rs @@ -15,11 +15,14 @@ use zksync_house_keeper::{ use crate::{ implementations::resources::pools::{PoolResource, ProverPool, ReplicaPool}, - service::{ServiceContext, StopReceiver}, + service::StopReceiver, task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, }; +/// Wiring layer for `HouseKeeper` - a component responsible for managing prover jobs +/// and auxiliary server activities. #[derive(Debug)] pub struct HouseKeeperLayer { house_keeper_config: HouseKeeperConfig, @@ -29,6 +32,38 @@ pub struct HouseKeeperLayer { fri_proof_compressor_config: FriProofCompressorConfig, } +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub replica_pool: PoolResource, + pub prover_pool: PoolResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + #[context(task)] + pub l1_batch_metrics_reporter: L1BatchMetricsReporter, + #[context(task)] + pub fri_prover_job_retry_manager: FriProverJobRetryManager, + #[context(task)] + pub fri_witness_generator_job_retry_manager: FriWitnessGeneratorJobRetryManager, + #[context(task)] + pub waiting_to_queued_fri_witness_job_mover: WaitingToQueuedFriWitnessJobMover, + #[context(task)] + pub fri_prover_job_archiver: Option, + #[context(task)] + pub fri_prover_gpu_archiver: Option, + #[context(task)] + pub fri_witness_generator_stats_reporter: FriWitnessGeneratorQueueReporter, + #[context(task)] + pub fri_prover_stats_reporter: FriProverQueueReporter, + #[context(task)] + pub fri_proof_compressor_stats_reporter: FriProofCompressorQueueReporter, + #[context(task)] + pub fri_proof_compressor_job_retry_manager: FriProofCompressorJobRetryManager, +} + impl HouseKeeperLayer { pub fn new( house_keeper_config: HouseKeeperConfig, @@ -49,27 +84,24 @@ impl HouseKeeperLayer { #[async_trait::async_trait] impl WiringLayer for HouseKeeperLayer { + type Input = Input; + type Output = Output; + fn layer_name(&self) -> &'static str { "house_keeper_layer" } - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { - // initialize resources - let replica_pool_resource = context.get_resource::>().await?; - let replica_pool = replica_pool_resource.get().await?; - - let prover_pool_resource = context.get_resource::>().await?; - let prover_pool = prover_pool_resource.get().await?; + async fn wire(self, input: Self::Input) -> Result { + // Initialize resources + let replica_pool = input.replica_pool.get().await?; + let prover_pool = input.prover_pool.get().await?; - // initialize and add tasks + // Initialize and add tasks let l1_batch_metrics_reporter = L1BatchMetricsReporter::new( self.house_keeper_config .l1_batch_metrics_reporting_interval_ms, replica_pool.clone(), ); - context.add_task(Box::new(L1BatchMetricsReporterTask { - l1_batch_metrics_reporter, - })); let fri_prover_job_retry_manager = FriProverJobRetryManager::new( self.fri_prover_config.max_attempts, @@ -77,9 +109,6 @@ impl WiringLayer for HouseKeeperLayer { self.house_keeper_config.prover_job_retrying_interval_ms, prover_pool.clone(), ); - context.add_task(Box::new(FriProverJobRetryManagerTask { - fri_prover_job_retry_manager, - })); let fri_witness_gen_job_retry_manager = FriWitnessGeneratorJobRetryManager::new( self.fri_witness_generator_config.max_attempts, @@ -89,46 +118,30 @@ impl WiringLayer for HouseKeeperLayer { .witness_generator_job_retrying_interval_ms, prover_pool.clone(), ); - context.add_task(Box::new(FriWitnessGeneratorJobRetryManagerTask { - fri_witness_gen_job_retry_manager, - })); let waiting_to_queued_fri_witness_job_mover = WaitingToQueuedFriWitnessJobMover::new( self.house_keeper_config.witness_job_moving_interval_ms, prover_pool.clone(), ); - context.add_task(Box::new(WaitingToQueuedFriWitnessJobMoverTask { - waiting_to_queued_fri_witness_job_mover, - })); - - if let Some((archiving_interval, archive_after)) = - self.house_keeper_config.prover_job_archiver_params() - { - let fri_prover_job_archiver = - FriProverJobsArchiver::new(prover_pool.clone(), archiving_interval, archive_after); - context.add_task(Box::new(FriProverJobArchiverTask { - fri_prover_job_archiver, - })); - } - if let Some((archiving_interval, archive_after)) = - self.house_keeper_config.fri_gpu_prover_archiver_params() - { - let fri_prover_gpu_archiver = - FriGpuProverArchiver::new(prover_pool.clone(), archiving_interval, archive_after); - context.add_task(Box::new(FriProverGpuArchiverTask { - fri_prover_gpu_archiver, - })); - } + let fri_prover_job_archiver = self.house_keeper_config.prover_job_archiver_params().map( + |(archiving_interval, archive_after)| { + FriProverJobsArchiver::new(prover_pool.clone(), archiving_interval, archive_after) + }, + ); + + let fri_prover_gpu_archiver = self + .house_keeper_config + .fri_gpu_prover_archiver_params() + .map(|(archiving_interval, archive_after)| { + FriGpuProverArchiver::new(prover_pool.clone(), archiving_interval, archive_after) + }); let fri_witness_generator_stats_reporter = FriWitnessGeneratorQueueReporter::new( prover_pool.clone(), self.house_keeper_config .witness_generator_stats_reporting_interval_ms, ); - context.add_task(Box::new(FriWitnessGeneratorStatsReporterTask { - fri_witness_generator_stats_reporter, - })); let fri_prover_stats_reporter = FriProverQueueReporter::new( self.house_keeper_config.prover_stats_reporting_interval_ms, @@ -136,18 +149,12 @@ impl WiringLayer for HouseKeeperLayer { replica_pool.clone(), self.fri_prover_group_config, ); - context.add_task(Box::new(FriProverStatsReporterTask { - fri_prover_stats_reporter, - })); let fri_proof_compressor_stats_reporter = FriProofCompressorQueueReporter::new( self.house_keeper_config .proof_compressor_stats_reporting_interval_ms, prover_pool.clone(), ); - context.add_task(Box::new(FriProofCompressorStatsReporterTask { - fri_proof_compressor_stats_reporter, - })); let fri_proof_compressor_retry_manager = FriProofCompressorJobRetryManager::new( self.fri_proof_compressor_config.max_attempts, @@ -156,179 +163,128 @@ impl WiringLayer for HouseKeeperLayer { .proof_compressor_job_retrying_interval_ms, prover_pool.clone(), ); - context.add_task(Box::new(FriProofCompressorJobRetryManagerTask { - fri_proof_compressor_retry_manager, - })); - Ok(()) + Ok(Output { + l1_batch_metrics_reporter, + fri_prover_job_retry_manager, + fri_witness_generator_job_retry_manager: fri_witness_gen_job_retry_manager, + waiting_to_queued_fri_witness_job_mover, + fri_prover_job_archiver, + fri_prover_gpu_archiver, + fri_witness_generator_stats_reporter, + fri_prover_stats_reporter, + fri_proof_compressor_stats_reporter, + fri_proof_compressor_job_retry_manager: fri_proof_compressor_retry_manager, + }) } } -#[derive(Debug)] -struct L1BatchMetricsReporterTask { - l1_batch_metrics_reporter: L1BatchMetricsReporter, -} - #[async_trait::async_trait] -impl Task for L1BatchMetricsReporterTask { +impl Task for L1BatchMetricsReporter { fn id(&self) -> TaskId { "l1_batch_metrics_reporter".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { - self.l1_batch_metrics_reporter.run(stop_receiver.0).await + (*self).run(stop_receiver.0).await } } -#[derive(Debug)] -struct FriProverJobRetryManagerTask { - fri_prover_job_retry_manager: FriProverJobRetryManager, -} - #[async_trait::async_trait] -impl Task for FriProverJobRetryManagerTask { +impl Task for FriProverJobRetryManager { fn id(&self) -> TaskId { "fri_prover_job_retry_manager".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { - self.fri_prover_job_retry_manager.run(stop_receiver.0).await + (*self).run(stop_receiver.0).await } } -#[derive(Debug)] -struct FriWitnessGeneratorJobRetryManagerTask { - fri_witness_gen_job_retry_manager: FriWitnessGeneratorJobRetryManager, -} - #[async_trait::async_trait] -impl Task for FriWitnessGeneratorJobRetryManagerTask { +impl Task for FriWitnessGeneratorJobRetryManager { fn id(&self) -> TaskId { "fri_witness_generator_job_retry_manager".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { - self.fri_witness_gen_job_retry_manager - .run(stop_receiver.0) - .await + (*self).run(stop_receiver.0).await } } -#[derive(Debug)] -struct WaitingToQueuedFriWitnessJobMoverTask { - waiting_to_queued_fri_witness_job_mover: WaitingToQueuedFriWitnessJobMover, -} - #[async_trait::async_trait] -impl Task for WaitingToQueuedFriWitnessJobMoverTask { +impl Task for WaitingToQueuedFriWitnessJobMover { fn id(&self) -> TaskId { "waiting_to_queued_fri_witness_job_mover".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { - self.waiting_to_queued_fri_witness_job_mover - .run(stop_receiver.0) - .await + (*self).run(stop_receiver.0).await } } -#[derive(Debug)] -struct FriWitnessGeneratorStatsReporterTask { - fri_witness_generator_stats_reporter: FriWitnessGeneratorQueueReporter, -} - #[async_trait::async_trait] -impl Task for FriWitnessGeneratorStatsReporterTask { +impl Task for FriWitnessGeneratorQueueReporter { fn id(&self) -> TaskId { - "fri_witness_generator_stats_reporter".into() + "fri_witness_generator_queue_reporter".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { - self.fri_witness_generator_stats_reporter - .run(stop_receiver.0) - .await + (*self).run(stop_receiver.0).await } } -#[derive(Debug)] -struct FriProverStatsReporterTask { - fri_prover_stats_reporter: FriProverQueueReporter, -} - #[async_trait::async_trait] -impl Task for FriProverStatsReporterTask { +impl Task for FriProverQueueReporter { fn id(&self) -> TaskId { - "fri_prover_stats_reporter".into() + "fri_prover_queue_reporter".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { - self.fri_prover_stats_reporter.run(stop_receiver.0).await + (*self).run(stop_receiver.0).await } } -#[derive(Debug)] -struct FriProofCompressorStatsReporterTask { - fri_proof_compressor_stats_reporter: FriProofCompressorQueueReporter, -} - #[async_trait::async_trait] -impl Task for FriProofCompressorStatsReporterTask { +impl Task for FriProofCompressorQueueReporter { fn id(&self) -> TaskId { - "fri_proof_compressor_stats_reporter".into() + "fri_proof_compressor_queue_reporter".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { - self.fri_proof_compressor_stats_reporter - .run(stop_receiver.0) - .await + (*self).run(stop_receiver.0).await } } -#[derive(Debug)] -struct FriProofCompressorJobRetryManagerTask { - fri_proof_compressor_retry_manager: FriProofCompressorJobRetryManager, -} - #[async_trait::async_trait] -impl Task for FriProofCompressorJobRetryManagerTask { +impl Task for FriProofCompressorJobRetryManager { fn id(&self) -> TaskId { "fri_proof_compressor_job_retry_manager".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { - self.fri_proof_compressor_retry_manager - .run(stop_receiver.0) - .await + (*self).run(stop_receiver.0).await } } -#[derive(Debug)] -struct FriProverJobArchiverTask { - fri_prover_job_archiver: FriProverJobsArchiver, -} - #[async_trait::async_trait] -impl Task for FriProverJobArchiverTask { +impl Task for FriProverJobsArchiver { fn id(&self) -> TaskId { - "fri_prover_job_archiver".into() + "fri_prover_jobs_archiver".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { - self.fri_prover_job_archiver.run(stop_receiver.0).await + (*self).run(stop_receiver.0).await } } -struct FriProverGpuArchiverTask { - fri_prover_gpu_archiver: FriGpuProverArchiver, -} - #[async_trait::async_trait] -impl Task for FriProverGpuArchiverTask { +impl Task for FriGpuProverArchiver { fn id(&self) -> TaskId { - "fri_prover_gpu_archiver".into() + "fri_gpu_prover_archiver".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { - self.fri_prover_gpu_archiver.run(stop_receiver.0).await + (*self).run(stop_receiver.0).await } } diff --git a/core/node/node_framework/src/implementations/layers/l1_batch_commitment_mode_validation.rs b/core/node/node_framework/src/implementations/layers/l1_batch_commitment_mode_validation.rs index e333eda51192..1ef340e08aa7 100644 --- a/core/node/node_framework/src/implementations/layers/l1_batch_commitment_mode_validation.rs +++ b/core/node/node_framework/src/implementations/layers/l1_batch_commitment_mode_validation.rs @@ -3,18 +3,33 @@ use zksync_types::{commitment::L1BatchCommitmentMode, Address}; use crate::{ implementations::resources::eth_interface::EthInterfaceResource, - precondition::Precondition, - service::{ServiceContext, StopReceiver}, - task::TaskId, + service::StopReceiver, + task::{Task, TaskId, TaskKind}, wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, }; +/// Wiring layer for a prerequisite that checks if the L1 batch commitment mode is valid +/// against L1. #[derive(Debug)] pub struct L1BatchCommitmentModeValidationLayer { diamond_proxy_addr: Address, l1_batch_commit_data_generator_mode: L1BatchCommitmentMode, } +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub eth_client: EthInterfaceResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + #[context(task)] + pub task: L1BatchCommitmentModeValidationTask, +} + impl L1BatchCommitmentModeValidationLayer { pub fn new( diamond_proxy_addr: Address, @@ -29,31 +44,36 @@ impl L1BatchCommitmentModeValidationLayer { #[async_trait::async_trait] impl WiringLayer for L1BatchCommitmentModeValidationLayer { + type Input = Input; + type Output = Output; + fn layer_name(&self) -> &'static str { "l1_batch_commitment_mode_validation_layer" } - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { - let EthInterfaceResource(query_client) = context.get_resource().await?; + async fn wire(self, input: Self::Input) -> Result { + let EthInterfaceResource(query_client) = input.eth_client; let task = L1BatchCommitmentModeValidationTask::new( self.diamond_proxy_addr, self.l1_batch_commit_data_generator_mode, query_client, ); - context.add_precondition(Box::new(task)); - - Ok(()) + Ok(Output { task }) } } #[async_trait::async_trait] -impl Precondition for L1BatchCommitmentModeValidationTask { +impl Task for L1BatchCommitmentModeValidationTask { + fn kind(&self) -> TaskKind { + TaskKind::Precondition + } + fn id(&self) -> TaskId { "l1_batch_commitment_mode_validation".into() } - async fn check(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { + async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { (*self).exit_on_success().run(stop_receiver.0).await } } diff --git a/core/node/node_framework/src/implementations/layers/l1_gas.rs b/core/node/node_framework/src/implementations/layers/l1_gas.rs index d465510eff5d..85e0422cdcb1 100644 --- a/core/node/node_framework/src/implementations/layers/l1_gas.rs +++ b/core/node/node_framework/src/implementations/layers/l1_gas.rs @@ -10,14 +10,18 @@ use zksync_types::fee_model::FeeModelConfig; use crate::{ implementations::resources::{ + base_token_ratio_provider::BaseTokenRatioProviderResource, eth_interface::EthInterfaceResource, fee_input::FeeInputResource, l1_tx_params::L1TxParamsResource, }, - service::{ServiceContext, StopReceiver}, + service::StopReceiver, task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, }; +/// Wiring layer for sequencer L1 gas interfaces. +/// Adds several resources that depend on L1 gas price. #[derive(Debug)] pub struct SequencerL1GasLayer { gas_adjuster_config: GasAdjusterConfig, @@ -26,6 +30,25 @@ pub struct SequencerL1GasLayer { state_keeper_config: StateKeeperConfig, } +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub eth_client: EthInterfaceResource, + /// If not provided, the base token assumed to be ETH, and the ratio will be constant. + #[context(default)] + pub base_token_ratio_provider: BaseTokenRatioProviderResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + pub fee_input: FeeInputResource, + pub l1_tx_params: L1TxParamsResource, + /// Only runs if someone uses the resources listed above. + #[context(task)] + pub gas_adjuster_task: GasAdjusterTask, +} + impl SequencerL1GasLayer { pub fn new( gas_adjuster_config: GasAdjusterConfig, @@ -44,12 +67,15 @@ impl SequencerL1GasLayer { #[async_trait::async_trait] impl WiringLayer for SequencerL1GasLayer { + type Input = Input; + type Output = Output; + fn layer_name(&self) -> &'static str { "sequencer_l1_gas_layer" } - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { - let client = context.get_resource::().await?.0; + async fn wire(self, input: Self::Input) -> Result { + let client = input.eth_client.0; let adjuster = GasAdjuster::new( client, self.gas_adjuster_config, @@ -60,21 +86,23 @@ impl WiringLayer for SequencerL1GasLayer { .context("GasAdjuster::new()")?; let gas_adjuster = Arc::new(adjuster); + let ratio_provider = input.base_token_ratio_provider; + let batch_fee_input_provider = Arc::new(MainNodeFeeInputProvider::new( gas_adjuster.clone(), + ratio_provider.0.clone(), FeeModelConfig::from_state_keeper_config(&self.state_keeper_config), )); - context.insert_resource(FeeInputResource(batch_fee_input_provider))?; - - context.insert_resource(L1TxParamsResource(gas_adjuster.clone()))?; - - context.add_task(Box::new(GasAdjusterTask { gas_adjuster })); - Ok(()) + Ok(Output { + fee_input: batch_fee_input_provider.into(), + l1_tx_params: gas_adjuster.clone().into(), + gas_adjuster_task: GasAdjusterTask { gas_adjuster }, + }) } } #[derive(Debug)] -struct GasAdjusterTask { +pub struct GasAdjusterTask { gas_adjuster: Arc, } diff --git a/core/node/node_framework/src/implementations/layers/main_node_client.rs b/core/node/node_framework/src/implementations/layers/main_node_client.rs index a694eb831330..2f61bf897e5b 100644 --- a/core/node/node_framework/src/implementations/layers/main_node_client.rs +++ b/core/node/node_framework/src/implementations/layers/main_node_client.rs @@ -9,10 +9,11 @@ use crate::{ implementations::resources::{ healthcheck::AppHealthCheckResource, main_node_client::MainNodeClientResource, }, - service::ServiceContext, wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, }; +/// Wiring layer for main node client. #[derive(Debug)] pub struct MainNodeClientLayer { url: SensitiveUrl, @@ -20,6 +21,19 @@ pub struct MainNodeClientLayer { l2_chain_id: L2ChainId, } +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + #[context(default)] + pub app_health: AppHealthCheckResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + pub main_node_client: MainNodeClientResource, +} + impl MainNodeClientLayer { pub fn new(url: SensitiveUrl, rate_limit_rps: NonZeroUsize, l2_chain_id: L2ChainId) -> Self { Self { @@ -32,11 +46,14 @@ impl MainNodeClientLayer { #[async_trait::async_trait] impl WiringLayer for MainNodeClientLayer { + type Input = Input; + type Output = Output; + fn layer_name(&self) -> &'static str { "main_node_client_layer" } - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { + async fn wire(self, input: Self::Input) -> Result { let main_node_client = Client::http(self.url) .context("failed creating JSON-RPC client for main node")? .for_network(self.l2_chain_id.into()) @@ -44,14 +61,16 @@ impl WiringLayer for MainNodeClientLayer { .build(); let client = Box::new(main_node_client) as Box>; - context.insert_resource(MainNodeClientResource(client.clone()))?; // Insert healthcheck - let AppHealthCheckResource(app_health) = context.get_resource_or_default().await; - app_health - .insert_custom_component(Arc::new(MainNodeHealthCheck::from(client))) + input + .app_health + .0 + .insert_custom_component(Arc::new(MainNodeHealthCheck::from(client.clone()))) .map_err(WiringError::internal)?; - Ok(()) + Ok(Output { + main_node_client: client.into(), + }) } } diff --git a/core/node/node_framework/src/implementations/layers/main_node_fee_params_fetcher.rs b/core/node/node_framework/src/implementations/layers/main_node_fee_params_fetcher.rs index 11bfab18a4c6..848dd4464387 100644 --- a/core/node/node_framework/src/implementations/layers/main_node_fee_params_fetcher.rs +++ b/core/node/node_framework/src/implementations/layers/main_node_fee_params_fetcher.rs @@ -6,31 +6,52 @@ use crate::{ implementations::resources::{ fee_input::FeeInputResource, main_node_client::MainNodeClientResource, }, - service::{ServiceContext, StopReceiver}, + service::StopReceiver, task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, }; +/// Wiring layer for main node fee params fetcher -- a fee input resource used on +/// the external node. #[derive(Debug)] pub struct MainNodeFeeParamsFetcherLayer; +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub main_node_client: MainNodeClientResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + pub fee_input: FeeInputResource, + #[context(task)] + pub fetcher: MainNodeFeeParamsFetcherTask, +} + #[async_trait::async_trait] impl WiringLayer for MainNodeFeeParamsFetcherLayer { + type Input = Input; + type Output = Output; + fn layer_name(&self) -> &'static str { "main_node_fee_params_fetcher_layer" } - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { - let MainNodeClientResource(main_node_client) = context.get_resource().await?; + async fn wire(self, input: Self::Input) -> Result { + let MainNodeClientResource(main_node_client) = input.main_node_client; let fetcher = Arc::new(MainNodeFeeParamsFetcher::new(main_node_client)); - context.insert_resource(FeeInputResource(fetcher.clone()))?; - context.add_task(Box::new(MainNodeFeeParamsFetcherTask { fetcher })); - Ok(()) + Ok(Output { + fee_input: fetcher.clone().into(), + fetcher: MainNodeFeeParamsFetcherTask { fetcher }, + }) } } #[derive(Debug)] -struct MainNodeFeeParamsFetcherTask { +pub struct MainNodeFeeParamsFetcherTask { fetcher: Arc, } diff --git a/core/node/node_framework/src/implementations/layers/metadata_calculator.rs b/core/node/node_framework/src/implementations/layers/metadata_calculator.rs index bc1244410bf2..827ec69d9427 100644 --- a/core/node/node_framework/src/implementations/layers/metadata_calculator.rs +++ b/core/node/node_framework/src/implementations/layers/metadata_calculator.rs @@ -18,20 +18,13 @@ use crate::{ pools::{MasterPool, PoolResource, ReplicaPool}, web3_api::TreeApiClientResource, }, - service::{ServiceContext, StopReceiver}, + service::{ShutdownHook, StopReceiver}, task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, }; -/// Builder for a metadata calculator. -/// -/// ## Effects -/// -/// - Resolves `PoolResource`. -/// - Resolves `PoolResource`. -/// - Resolves `ObjectStoreResource` (optional). -/// - Adds `tree_health_check` to the `ResourceCollection`. -/// - Adds `metadata_calculator` to the node. +/// Wiring layer for Metadata calculator and Tree API. #[derive(Debug)] pub struct MetadataCalculatorLayer { config: MetadataCalculatorConfig, @@ -39,6 +32,32 @@ pub struct MetadataCalculatorLayer { pruning_config: Option, } +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub master_pool: PoolResource, + pub replica_pool: PoolResource, + /// Only needed for `MerkleTreeMode::Full` + pub object_store: Option, + #[context(default)] + pub app_health: AppHealthCheckResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + #[context(task)] + pub metadata_calculator: MetadataCalculator, + pub tree_api_client: TreeApiClientResource, + /// Only provided if configuration is provided. + #[context(task)] + pub tree_api_task: Option, + /// Only provided if configuration is provided. + #[context(task)] + pub pruning_task: Option, + pub rocksdb_shutdown_hook: ShutdownHook, +} + impl MetadataCalculatorLayer { pub fn new(config: MetadataCalculatorConfig) -> Self { Self { @@ -61,25 +80,28 @@ impl MetadataCalculatorLayer { #[async_trait::async_trait] impl WiringLayer for MetadataCalculatorLayer { + type Input = Input; + type Output = Output; + fn layer_name(&self) -> &'static str { "metadata_calculator_layer" } - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { - let pool = context.get_resource::>().await?; - let main_pool = pool.get().await?; + async fn wire(self, input: Self::Input) -> Result { + let main_pool = input.master_pool.get().await?; // The number of connections in a recovery pool is based on the mainnet recovery runs. It doesn't need // to be particularly accurate at this point, since the main node isn't expected to recover from a snapshot. - let recovery_pool = context - .get_resource::>() - .await? - .get_custom(10) - .await?; + let recovery_pool = input.replica_pool.get_custom(10).await?; + let app_health = input.app_health.0; let object_store = match self.config.mode { MerkleTreeMode::Lightweight => None, MerkleTreeMode::Full => { - let store = context.get_resource::().await?; + let store = input.object_store.ok_or_else(|| { + WiringError::Configuration( + "Object store is required for full Merkle tree mode".into(), + ) + })?; Some(store) } }; @@ -92,60 +114,59 @@ impl WiringLayer for MetadataCalculatorLayer { .await? .with_recovery_pool(recovery_pool); - let AppHealthCheckResource(app_health) = context.get_resource_or_default().await; app_health .insert_custom_component(Arc::new(metadata_calculator.tree_health_check())) .map_err(WiringError::internal)?; - if let Some(tree_api_config) = self.tree_api_config { + let tree_api_task = self.tree_api_config.map(|tree_api_config| { let bind_addr = (Ipv4Addr::UNSPECIFIED, tree_api_config.port).into(); let tree_reader = metadata_calculator.tree_reader(); - context.add_task(Box::new(TreeApiTask { + TreeApiTask { bind_addr, tree_reader, - })); - } - - if let Some(pruning_removal_delay) = self.pruning_config { - let pruning_task = Box::new(metadata_calculator.pruning_task(pruning_removal_delay)); - app_health - .insert_component(pruning_task.health_check()) - .map_err(|err| WiringError::Internal(err.into()))?; - context.add_task(pruning_task); - } - - context.insert_resource(TreeApiClientResource(Arc::new( - metadata_calculator.tree_reader(), - )))?; + } + }); - let metadata_calculator_task = Box::new(MetadataCalculatorTask { - metadata_calculator, + let pruning_task = self + .pruning_config + .map( + |pruning_removal_delay| -> Result { + let pruning_task = metadata_calculator.pruning_task(pruning_removal_delay); + app_health + .insert_component(pruning_task.health_check()) + .map_err(|err| WiringError::Internal(err.into()))?; + Ok(pruning_task) + }, + ) + .transpose()?; + + let tree_api_client = TreeApiClientResource(Arc::new(metadata_calculator.tree_reader())); + + let rocksdb_shutdown_hook = ShutdownHook::new("rocksdb_terminaton", async { + // Wait for all the instances of RocksDB to be destroyed. + tokio::task::spawn_blocking(RocksDB::await_rocksdb_termination) + .await + .context("failed terminating RocksDB instances") }); - context.add_task(metadata_calculator_task); - Ok(()) + Ok(Output { + metadata_calculator, + tree_api_client, + tree_api_task, + pruning_task, + rocksdb_shutdown_hook, + }) } } -#[derive(Debug)] -pub struct MetadataCalculatorTask { - metadata_calculator: MetadataCalculator, -} - #[async_trait::async_trait] -impl Task for MetadataCalculatorTask { +impl Task for MetadataCalculator { fn id(&self) -> TaskId { "metadata_calculator".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { - let result = self.metadata_calculator.run(stop_receiver.0).await; - - // Wait for all the instances of RocksDB to be destroyed. - tokio::task::spawn_blocking(RocksDB::await_rocksdb_termination) - .await - .context("failed terminating RocksDB instances")?; - result + (*self).run(stop_receiver.0).await } } @@ -161,13 +182,16 @@ impl Task for TreeApiTask { "tree_api".into() } - async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { - self.tree_reader - .wait() - .await - .context("Cannot initialize tree reader")? - .run_api_server(self.bind_addr, stop_receiver.0) - .await + async fn run(self: Box, mut stop_receiver: StopReceiver) -> anyhow::Result<()> { + if let Some(reader) = self.tree_reader.wait().await { + reader.run_api_server(self.bind_addr, stop_receiver.0).await + } else { + // Tree is dropped before initialized, e.g. because the node is getting shut down. + // We don't want to treat this as an error since it could mask the real shutdown cause in logs etc. + tracing::warn!("Tree is dropped before initialized, not starting the tree API server"); + stop_receiver.0.changed().await?; + Ok(()) + } } } diff --git a/core/node/node_framework/src/implementations/layers/mod.rs b/core/node/node_framework/src/implementations/layers/mod.rs index 8637f15459d5..55bc0a40ca73 100644 --- a/core/node/node_framework/src/implementations/layers/mod.rs +++ b/core/node/node_framework/src/implementations/layers/mod.rs @@ -1,9 +1,12 @@ +pub mod base_token; pub mod batch_status_updater; +pub mod block_reverter; pub mod circuit_breaker_checker; pub mod commitment_generator; pub mod consensus; pub mod consistency_checker; pub mod contract_verification_api; +pub mod da_dispatcher; pub mod eth_sender; pub mod eth_watch; pub mod healtcheck_server; @@ -13,6 +16,7 @@ pub mod l1_gas; pub mod main_node_client; pub mod main_node_fee_params_fetcher; pub mod metadata_calculator; +pub mod node_storage_init; pub mod object_store; pub mod pk_signing_eth_client; pub mod pools_layer; @@ -21,8 +25,7 @@ pub mod prometheus_exporter; pub mod proof_data_handler; pub mod pruning; pub mod query_eth_client; -pub mod reorg_detector_checker; -pub mod reorg_detector_runner; +pub mod reorg_detector; pub mod sigint; pub mod state_keeper; pub mod sync_state_updater; diff --git a/core/node/node_framework/src/implementations/layers/node_storage_init/external_node_strategy.rs b/core/node/node_framework/src/implementations/layers/node_storage_init/external_node_strategy.rs new file mode 100644 index 000000000000..0b98d0e2b556 --- /dev/null +++ b/core/node/node_framework/src/implementations/layers/node_storage_init/external_node_strategy.rs @@ -0,0 +1,100 @@ +use std::sync::Arc; + +// Re-export to initialize the layer without having to depend on the crate directly. +pub use zksync_node_storage_init::SnapshotRecoveryConfig; +use zksync_node_storage_init::{ + external_node::{ExternalNodeGenesis, ExternalNodeReverter, ExternalNodeSnapshotRecovery}, + InitializeStorage, NodeInitializationStrategy, RevertStorage, +}; +use zksync_types::L2ChainId; + +use super::NodeInitializationStrategyResource; +use crate::{ + implementations::resources::{ + healthcheck::AppHealthCheckResource, + main_node_client::MainNodeClientResource, + pools::{MasterPool, PoolResource}, + reverter::BlockReverterResource, + }, + wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, +}; + +/// Wiring layer for external node initialization strategy. +#[derive(Debug)] +pub struct ExternalNodeInitStrategyLayer { + pub l2_chain_id: L2ChainId, + pub snapshot_recovery_config: Option, +} + +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub master_pool: PoolResource, + pub main_node_client: MainNodeClientResource, + pub block_reverter: Option, + #[context(default)] + pub app_health: AppHealthCheckResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + pub strategy: NodeInitializationStrategyResource, +} + +#[async_trait::async_trait] +impl WiringLayer for ExternalNodeInitStrategyLayer { + type Input = Input; + type Output = Output; + + fn layer_name(&self) -> &'static str { + "external_node_role_layer" + } + + async fn wire(self, input: Self::Input) -> Result { + let pool = input.master_pool.get().await?; + let MainNodeClientResource(client) = input.main_node_client; + let AppHealthCheckResource(app_health) = input.app_health; + let block_reverter = match input.block_reverter { + Some(reverter) => { + // If reverter was provided, we intend to be its sole consumer. + // We don't want multiple components to attempt reverting blocks. + let reverter = reverter.0.take().ok_or(WiringError::Configuration( + "BlockReverterResource is taken".into(), + ))?; + Some(reverter) + } + None => None, + }; + + let genesis = Arc::new(ExternalNodeGenesis { + l2_chain_id: self.l2_chain_id, + client: client.clone(), + pool: pool.clone(), + }); + let snapshot_recovery = self.snapshot_recovery_config.map(|recovery_config| { + Arc::new(ExternalNodeSnapshotRecovery { + client: client.clone(), + pool: pool.clone(), + recovery_config, + app_health, + }) as Arc + }); + // We always want to detect reorgs, even if we can't roll them back. + let block_reverter = Some(Arc::new(ExternalNodeReverter { + client, + pool: pool.clone(), + reverter: block_reverter, + }) as Arc); + let strategy = NodeInitializationStrategy { + genesis, + snapshot_recovery, + block_reverter, + }; + + Ok(Output { + strategy: strategy.into(), + }) + } +} diff --git a/core/node/node_framework/src/implementations/layers/node_storage_init/main_node_strategy.rs b/core/node/node_framework/src/implementations/layers/node_storage_init/main_node_strategy.rs new file mode 100644 index 000000000000..ef43aaf1aee0 --- /dev/null +++ b/core/node/node_framework/src/implementations/layers/node_storage_init/main_node_strategy.rs @@ -0,0 +1,64 @@ +use std::sync::Arc; + +use zksync_config::{ContractsConfig, GenesisConfig}; +use zksync_node_storage_init::{main_node::MainNodeGenesis, NodeInitializationStrategy}; + +use super::NodeInitializationStrategyResource; +use crate::{ + implementations::resources::{ + eth_interface::EthInterfaceResource, + pools::{MasterPool, PoolResource}, + }, + wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, +}; + +/// Wiring layer for main node initialization strategy. +#[derive(Debug)] +pub struct MainNodeInitStrategyLayer { + pub genesis: GenesisConfig, + pub contracts: ContractsConfig, +} + +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub master_pool: PoolResource, + pub eth_interface: EthInterfaceResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + pub strategy: NodeInitializationStrategyResource, +} + +#[async_trait::async_trait] +impl WiringLayer for MainNodeInitStrategyLayer { + type Input = Input; + type Output = Output; + + fn layer_name(&self) -> &'static str { + "main_node_role_layer" + } + + async fn wire(self, input: Self::Input) -> Result { + let pool = input.master_pool.get().await?; + let EthInterfaceResource(l1_client) = input.eth_interface; + let genesis = Arc::new(MainNodeGenesis { + contracts: self.contracts, + genesis: self.genesis, + l1_client, + pool, + }); + let strategy = NodeInitializationStrategy { + genesis, + snapshot_recovery: None, + block_reverter: None, + }; + + Ok(Output { + strategy: strategy.into(), + }) + } +} diff --git a/core/node/node_framework/src/implementations/layers/node_storage_init/mod.rs b/core/node/node_framework/src/implementations/layers/node_storage_init/mod.rs new file mode 100644 index 000000000000..5fed50e0f53d --- /dev/null +++ b/core/node/node_framework/src/implementations/layers/node_storage_init/mod.rs @@ -0,0 +1,160 @@ +use zksync_node_storage_init::{NodeInitializationStrategy, NodeStorageInitializer}; + +use crate::{ + implementations::resources::pools::{MasterPool, PoolResource}, + resource::Resource, + service::StopReceiver, + task::{Task, TaskId, TaskKind}, + wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, +}; + +pub mod external_node_strategy; +pub mod main_node_strategy; + +/// Wiring layer for `NodeStorageInializer`. +/// +/// ## Requests resources +/// +/// - `PoolResource` +/// - `NodeInitializationStrategyResource` +/// +/// ## Adds tasks +/// +/// Depends on the mode, either `NodeStorageInitializer` or `NodeStorageInitializerPrecondition` +#[derive(Debug, Default)] +pub struct NodeStorageInitializerLayer { + as_precondition: bool, +} + +impl NodeStorageInitializerLayer { + pub fn new() -> Self { + Self::default() + } + + /// Changes the wiring logic to treat the initializer as a precondition. + pub fn as_precondition(mut self) -> Self { + self.as_precondition = true; + self + } +} + +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub master_pool: PoolResource, + pub strategy: NodeInitializationStrategyResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + #[context(task)] + pub initializer: Option, + #[context(task)] + pub precondition: Option, +} + +impl Output { + fn initializer(initializer: NodeStorageInitializer) -> Self { + Self { + initializer: Some(initializer), + precondition: None, + } + } + + fn precondition(precondition: NodeStorageInitializer) -> Self { + Self { + initializer: None, + precondition: Some(NodeStorageInitializerPrecondition(precondition)), + } + } +} + +#[async_trait::async_trait] +impl WiringLayer for NodeStorageInitializerLayer { + type Input = Input; + type Output = Output; + + fn layer_name(&self) -> &'static str { + if self.as_precondition { + return "node_storage_initializer_precondition_layer"; + } + "node_storage_initializer_layer" + } + + async fn wire(self, input: Self::Input) -> Result { + let pool = input.master_pool.get().await?; + let NodeInitializationStrategyResource(strategy) = input.strategy; + + let initializer = NodeStorageInitializer::new(strategy, pool); + + // Insert either task or precondition. + let output = if self.as_precondition { + Output::precondition(initializer) + } else { + Output::initializer(initializer) + }; + + Ok(output) + } +} + +#[async_trait::async_trait] +impl Task for NodeStorageInitializer { + fn kind(&self) -> TaskKind { + TaskKind::UnconstrainedOneshotTask + } + + fn id(&self) -> TaskId { + "node_storage_initializer".into() + } + + async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { + tracing::info!("Starting the node storage initialization task"); + (*self).run(stop_receiver.0).await?; + tracing::info!("Node storage initialization task completed"); + Ok(()) + } +} + +/// Runs [`NodeStorageInitializer`] as a precondition, blocking +/// tasks from starting until the storage is initialized. +#[derive(Debug)] +pub struct NodeStorageInitializerPrecondition(NodeStorageInitializer); + +#[async_trait::async_trait] +impl Task for NodeStorageInitializerPrecondition { + fn kind(&self) -> TaskKind { + TaskKind::Precondition + } + + fn id(&self) -> TaskId { + "node_storage_initializer_precondition".into() + } + + async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { + tracing::info!("Waiting for node storage to be initialized"); + let result = self.0.wait_for_initialized_storage(stop_receiver.0).await; + tracing::info!("Node storage initialization precondition completed"); + result + } +} + +// Note: unlike with other modules, this one keeps within the same file to simplify +// moving the implementations out of the framework soon. +/// Resource representing the node initialization strategy. +#[derive(Debug, Clone)] +pub struct NodeInitializationStrategyResource(NodeInitializationStrategy); + +impl Resource for NodeInitializationStrategyResource { + fn name() -> String { + "node_initialization_strategy".into() + } +} + +impl From for NodeInitializationStrategyResource { + fn from(strategy: NodeInitializationStrategy) -> Self { + Self(strategy) + } +} diff --git a/core/node/node_framework/src/implementations/layers/object_store.rs b/core/node/node_framework/src/implementations/layers/object_store.rs index e5a4b19c6b56..55840caf1f9c 100644 --- a/core/node/node_framework/src/implementations/layers/object_store.rs +++ b/core/node/node_framework/src/implementations/layers/object_store.rs @@ -3,10 +3,10 @@ use zksync_object_store::ObjectStoreFactory; use crate::{ implementations::resources::object_store::ObjectStoreResource, - service::ServiceContext, wiring_layer::{WiringError, WiringLayer}, }; +/// Wiring layer for object store. #[derive(Debug)] pub struct ObjectStoreLayer { config: ObjectStoreConfig, @@ -20,13 +20,16 @@ impl ObjectStoreLayer { #[async_trait::async_trait] impl WiringLayer for ObjectStoreLayer { + type Input = (); + type Output = ObjectStoreResource; + fn layer_name(&self) -> &'static str { "object_store_layer" } - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { + async fn wire(self, _input: Self::Input) -> Result { let object_store = ObjectStoreFactory::new(self.config).create_store().await?; - context.insert_resource(ObjectStoreResource(object_store))?; - Ok(()) + let resource = ObjectStoreResource(object_store); + Ok(resource) } } diff --git a/core/node/node_framework/src/implementations/layers/pk_signing_eth_client.rs b/core/node/node_framework/src/implementations/layers/pk_signing_eth_client.rs index cc93498e0f2b..de570105a471 100644 --- a/core/node/node_framework/src/implementations/layers/pk_signing_eth_client.rs +++ b/core/node/node_framework/src/implementations/layers/pk_signing_eth_client.rs @@ -10,10 +10,11 @@ use crate::{ implementations::resources::eth_interface::{ BoundEthInterfaceForBlobsResource, BoundEthInterfaceResource, EthInterfaceResource, }, - service::ServiceContext, wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, }; +/// Wiring layer for [`PKSigningClient`]. #[derive(Debug)] pub struct PKSigningEthClientLayer { eth_sender_config: EthConfig, @@ -22,6 +23,20 @@ pub struct PKSigningEthClientLayer { wallets: wallets::EthSender, } +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub eth_client: EthInterfaceResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + pub signing_client: BoundEthInterfaceResource, + /// Only provided if the blob operator key is provided to the layer. + pub signing_client_for_blobs: Option, +} + impl PKSigningEthClientLayer { pub fn new( eth_sender_config: EthConfig, @@ -40,18 +55,21 @@ impl PKSigningEthClientLayer { #[async_trait::async_trait] impl WiringLayer for PKSigningEthClientLayer { + type Input = Input; + type Output = Output; + fn layer_name(&self) -> &'static str { "pk_signing_eth_client_layer" } - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { + async fn wire(self, input: Self::Input) -> Result { let private_key = self.wallets.operator.private_key(); let gas_adjuster_config = self .eth_sender_config .gas_adjuster .as_ref() .context("gas_adjuster config is missing")?; - let EthInterfaceResource(query_client) = context.get_resource().await?; + let EthInterfaceResource(query_client) = input.eth_client; let signing_client = PKSigningClient::new_raw( private_key.clone(), @@ -60,9 +78,9 @@ impl WiringLayer for PKSigningEthClientLayer { self.l1_chain_id, query_client.clone(), ); - context.insert_resource(BoundEthInterfaceResource(Box::new(signing_client)))?; + let signing_client = BoundEthInterfaceResource(Box::new(signing_client)); - if let Some(blob_operator) = &self.wallets.blob_operator { + let signing_client_for_blobs = self.wallets.blob_operator.map(|blob_operator| { let private_key = blob_operator.private_key(); let signing_client_for_blobs = PKSigningClient::new_raw( private_key.clone(), @@ -71,11 +89,12 @@ impl WiringLayer for PKSigningEthClientLayer { self.l1_chain_id, query_client, ); - context.insert_resource(BoundEthInterfaceForBlobsResource(Box::new( - signing_client_for_blobs, - )))?; - } + BoundEthInterfaceForBlobsResource(Box::new(signing_client_for_blobs)) + }); - Ok(()) + Ok(Output { + signing_client, + signing_client_for_blobs, + }) } } diff --git a/core/node/node_framework/src/implementations/layers/pools_layer.rs b/core/node/node_framework/src/implementations/layers/pools_layer.rs index cf26ad4d9326..734f6f0ccf69 100644 --- a/core/node/node_framework/src/implementations/layers/pools_layer.rs +++ b/core/node/node_framework/src/implementations/layers/pools_layer.rs @@ -1,18 +1,13 @@ -use std::sync::Arc; - use zksync_config::configs::{DatabaseSecrets, PostgresConfig}; use zksync_dal::{ConnectionPool, Core}; -use zksync_db_connection::healthcheck::ConnectionPoolHealthCheck; use crate::{ - implementations::resources::{ - healthcheck::AppHealthCheckResource, - pools::{MasterPool, PoolResource, ProverPool, ReplicaPool}, - }, - service::ServiceContext, + implementations::resources::pools::{MasterPool, PoolResource, ProverPool, ReplicaPool}, wiring_layer::{WiringError, WiringLayer}, + IntoContext, }; +/// Builder for the [`PoolsLayer`]. #[derive(Debug)] pub struct PoolsLayerBuilder { config: PostgresConfig, @@ -23,6 +18,8 @@ pub struct PoolsLayerBuilder { } impl PoolsLayerBuilder { + /// Creates a new builder with the provided configuration and secrets. + /// By default, no pulls are enabled. pub fn empty(config: PostgresConfig, database_secrets: DatabaseSecrets) -> Self { Self { config, @@ -33,21 +30,25 @@ impl PoolsLayerBuilder { } } + /// Allows to enable the master pool. pub fn with_master(mut self, with_master: bool) -> Self { self.with_master = with_master; self } + /// Allows to enable the replica pool. pub fn with_replica(mut self, with_replica: bool) -> Self { self.with_replica = with_replica; self } + /// Allows to enable the prover pool. pub fn with_prover(mut self, with_prover: bool) -> Self { self.with_prover = with_prover; self } + /// Builds the [`PoolsLayer`] with the provided configuration. pub fn build(self) -> PoolsLayer { PoolsLayer { config: self.config, @@ -59,6 +60,14 @@ impl PoolsLayerBuilder { } } +/// Wiring layer for connection pools. +/// During wiring, also prepares the global configuration for the connection pools. +/// +/// ## Adds resources +/// +/// - `PoolResource::` (if master pool is enabled) +/// - `PoolResource::` (if replica pool is enabled) +/// - `PoolResource::` (if prover pool is enabled) #[derive(Debug)] pub struct PoolsLayer { config: PostgresConfig, @@ -68,13 +77,24 @@ pub struct PoolsLayer { with_prover: bool, } +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + pub master_pool: Option>, + pub replica_pool: Option>, + pub prover_pool: Option>, +} + #[async_trait::async_trait] impl WiringLayer for PoolsLayer { + type Input = (); + type Output = Output; + fn layer_name(&self) -> &'static str { "pools_layer" } - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { + async fn wire(self, _input: Self::Input) -> Result { if !self.with_master && !self.with_replica && !self.with_prover { return Err(WiringError::Configuration( "At least one pool should be enabled".to_string(), @@ -90,58 +110,48 @@ impl WiringLayer for PoolsLayer { } } - if self.with_master { + let master_pool = if self.with_master { let pool_size = self.config.max_connections()?; let pool_size_master = self.config.max_connections_master().unwrap_or(pool_size); - context.insert_resource(PoolResource::::new( + Some(PoolResource::::new( self.secrets.master_url()?, pool_size_master, None, None, - ))?; - } + )) + } else { + None + }; - if self.with_replica { + let replica_pool = if self.with_replica { // We're most interested in setting acquire / statement timeouts for the API server, which puts the most load // on Postgres. - context.insert_resource(PoolResource::::new( + Some(PoolResource::::new( self.secrets.replica_url()?, self.config.max_connections()?, self.config.statement_timeout(), self.config.acquire_timeout(), - ))?; - } + )) + } else { + None + }; - if self.with_prover { - context.insert_resource(PoolResource::::new( + let prover_pool = if self.with_prover { + Some(PoolResource::::new( self.secrets.prover_url()?, self.config.max_connections()?, None, None, - ))?; - } - - // Insert health checks for the core pool. - let connection_pool = if self.with_replica { - context - .get_resource::>() - .await? - .get() - .await? + )) } else { - context - .get_resource::>() - .await? - .get() - .await? + None }; - let db_health_check = ConnectionPoolHealthCheck::new(connection_pool); - let AppHealthCheckResource(app_health) = context.get_resource_or_default().await; - app_health - .insert_custom_component(Arc::new(db_health_check)) - .map_err(WiringError::internal)?; - Ok(()) + Ok(Output { + master_pool, + replica_pool, + prover_pool, + }) } } diff --git a/core/node/node_framework/src/implementations/layers/postgres_metrics.rs b/core/node/node_framework/src/implementations/layers/postgres_metrics.rs index 09d81844dd5a..238bee578678 100644 --- a/core/node/node_framework/src/implementations/layers/postgres_metrics.rs +++ b/core/node/node_framework/src/implementations/layers/postgres_metrics.rs @@ -4,46 +4,64 @@ use zksync_dal::{metrics::PostgresMetrics, ConnectionPool, Core}; use crate::{ implementations::resources::pools::{PoolResource, ReplicaPool}, - service::{ServiceContext, StopReceiver}, - task::{TaskId, UnconstrainedTask}, + service::StopReceiver, + task::{Task, TaskId, TaskKind}, wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, }; const SCRAPE_INTERVAL: Duration = Duration::from_secs(60); +/// Wiring layer for the Postgres metrics exporter. #[derive(Debug)] pub struct PostgresMetricsLayer; +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub replica_pool: PoolResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + #[context(task)] + pub task: PostgresMetricsScrapingTask, +} + #[async_trait::async_trait] impl WiringLayer for PostgresMetricsLayer { + type Input = Input; + type Output = Output; + fn layer_name(&self) -> &'static str { "postgres_metrics_layer" } - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { - let replica_pool_resource = context.get_resource::>().await?; - let pool_for_metrics = replica_pool_resource.get_singleton().await?; - context.add_unconstrained_task(Box::new(PostgresMetricsScrapingTask { pool_for_metrics })); + async fn wire(self, input: Self::Input) -> Result { + let pool_for_metrics = input.replica_pool.get_singleton().await?; + let task = PostgresMetricsScrapingTask { pool_for_metrics }; - Ok(()) + Ok(Output { task }) } } #[derive(Debug)] -struct PostgresMetricsScrapingTask { +pub struct PostgresMetricsScrapingTask { pool_for_metrics: ConnectionPool, } #[async_trait::async_trait] -impl UnconstrainedTask for PostgresMetricsScrapingTask { +impl Task for PostgresMetricsScrapingTask { + fn kind(&self) -> TaskKind { + TaskKind::UnconstrainedTask + } + fn id(&self) -> TaskId { "postgres_metrics_scraping".into() } - async fn run_unconstrained( - self: Box, - mut stop_receiver: StopReceiver, - ) -> anyhow::Result<()> { + async fn run(self: Box, mut stop_receiver: StopReceiver) -> anyhow::Result<()> { tokio::select! { () = PostgresMetrics::run_scraping(self.pool_for_metrics, SCRAPE_INTERVAL) => { tracing::warn!("Postgres metrics scraping unexpectedly stopped"); diff --git a/core/node/node_framework/src/implementations/layers/prometheus_exporter.rs b/core/node/node_framework/src/implementations/layers/prometheus_exporter.rs index 4b7451348235..8ce53c8bfdb2 100644 --- a/core/node/node_framework/src/implementations/layers/prometheus_exporter.rs +++ b/core/node/node_framework/src/implementations/layers/prometheus_exporter.rs @@ -1,19 +1,15 @@ -use prometheus_exporter::PrometheusExporterConfig; use zksync_health_check::{HealthStatus, HealthUpdater, ReactiveHealthCheck}; +use zksync_vlog::prometheus::PrometheusExporterConfig; use crate::{ implementations::resources::healthcheck::AppHealthCheckResource, - service::{ServiceContext, StopReceiver}, - task::{TaskId, UnconstrainedTask}, + service::StopReceiver, + task::{Task, TaskId, TaskKind}, wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, }; -/// Builder for a prometheus exporter. -/// -/// ## Effects -/// -/// - Adds prometheus health check to the `ResourceCollection`. -/// - Adds `prometheus_exporter` to the node. +/// Wiring layer for Prometheus exporter server. #[derive(Debug)] pub struct PrometheusExporterLayer(pub PrometheusExporterConfig); @@ -23,38 +19,59 @@ pub struct PrometheusExporterTask { prometheus_health_updater: HealthUpdater, } +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + #[context(default)] + pub app_health: AppHealthCheckResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + #[context(task)] + pub task: PrometheusExporterTask, +} + #[async_trait::async_trait] impl WiringLayer for PrometheusExporterLayer { + type Input = Input; + type Output = Output; + fn layer_name(&self) -> &'static str { "prometheus_exporter" } - async fn wire(self: Box, mut node: ServiceContext<'_>) -> Result<(), WiringError> { + async fn wire(self, input: Self::Input) -> Result { let (prometheus_health_check, prometheus_health_updater) = ReactiveHealthCheck::new("prometheus_exporter"); - let AppHealthCheckResource(app_health) = node.get_resource_or_default().await; - app_health + input + .app_health + .0 .insert_component(prometheus_health_check) .map_err(WiringError::internal)?; - let task = Box::new(PrometheusExporterTask { + let task = PrometheusExporterTask { config: self.0, prometheus_health_updater, - }); + }; - node.add_unconstrained_task(task); - Ok(()) + Ok(Output { task }) } } #[async_trait::async_trait] -impl UnconstrainedTask for PrometheusExporterTask { +impl Task for PrometheusExporterTask { + fn kind(&self) -> TaskKind { + TaskKind::UnconstrainedTask + } + fn id(&self) -> TaskId { "prometheus_exporter".into() } - async fn run_unconstrained(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { + async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { let prometheus_task = self.config.run(stop_receiver.0); self.prometheus_health_updater .update(HealthStatus::Ready.into()); diff --git a/core/node/node_framework/src/implementations/layers/proof_data_handler.rs b/core/node/node_framework/src/implementations/layers/proof_data_handler.rs index 7952ca6a585f..bcb3cedc6e7e 100644 --- a/core/node/node_framework/src/implementations/layers/proof_data_handler.rs +++ b/core/node/node_framework/src/implementations/layers/proof_data_handler.rs @@ -10,24 +10,33 @@ use crate::{ object_store::ObjectStoreResource, pools::{MasterPool, PoolResource}, }, - service::{ServiceContext, StopReceiver}, + service::StopReceiver, task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, }; -/// Builder for a proof data handler. -/// -/// ## Effects -/// -/// - Resolves `PoolResource`. -/// - Resolves `ObjectStoreResource`. -/// - Adds `proof_data_handler` to the node. +/// Wiring layer for proof data handler server. #[derive(Debug)] pub struct ProofDataHandlerLayer { proof_data_handler_config: ProofDataHandlerConfig, commitment_mode: L1BatchCommitmentMode, } +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub master_pool: PoolResource, + pub object_store: ObjectStoreResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + #[context(task)] + pub task: ProofDataHandlerTask, +} + impl ProofDataHandlerLayer { pub fn new( proof_data_handler_config: ProofDataHandlerConfig, @@ -42,29 +51,30 @@ impl ProofDataHandlerLayer { #[async_trait::async_trait] impl WiringLayer for ProofDataHandlerLayer { + type Input = Input; + type Output = Output; + fn layer_name(&self) -> &'static str { "proof_data_handler_layer" } - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { - let pool_resource = context.get_resource::>().await?; - let main_pool = pool_resource.get().await.unwrap(); - - let object_store = context.get_resource::().await?; + async fn wire(self, input: Self::Input) -> Result { + let main_pool = input.master_pool.get().await.unwrap(); + let blob_store = input.object_store.0; - context.add_task(Box::new(ProofDataHandlerTask { + let task = ProofDataHandlerTask { proof_data_handler_config: self.proof_data_handler_config, - blob_store: object_store.0, + blob_store, main_pool, commitment_mode: self.commitment_mode, - })); + }; - Ok(()) + Ok(Output { task }) } } #[derive(Debug)] -struct ProofDataHandlerTask { +pub struct ProofDataHandlerTask { proof_data_handler_config: ProofDataHandlerConfig, blob_store: Arc, main_pool: ConnectionPool, diff --git a/core/node/node_framework/src/implementations/layers/pruning.rs b/core/node/node_framework/src/implementations/layers/pruning.rs index 3ad52606083b..216e214026b1 100644 --- a/core/node/node_framework/src/implementations/layers/pruning.rs +++ b/core/node/node_framework/src/implementations/layers/pruning.rs @@ -7,11 +7,13 @@ use crate::{ healthcheck::AppHealthCheckResource, pools::{MasterPool, PoolResource}, }, - service::{ServiceContext, StopReceiver}, + service::StopReceiver, task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, }; +/// Wiring layer for node pruning layer. #[derive(Debug)] pub struct PruningLayer { pruning_removal_delay: Duration, @@ -19,6 +21,21 @@ pub struct PruningLayer { minimum_l1_batch_age: Duration, } +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub master_pool: PoolResource, + #[context(default)] + pub app_health: AppHealthCheckResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + #[context(task)] + pub db_pruner: DbPruner, +} + impl PruningLayer { pub fn new( pruning_removal_delay: Duration, @@ -35,13 +52,15 @@ impl PruningLayer { #[async_trait::async_trait] impl WiringLayer for PruningLayer { + type Input = Input; + type Output = Output; + fn layer_name(&self) -> &'static str { "pruning_layer" } - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { - let pool_resource = context.get_resource::>().await?; - let main_pool = pool_resource.get().await?; + async fn wire(self, input: Self::Input) -> Result { + let main_pool = input.master_pool.get().await?; let db_pruner = DbPruner::new( DbPrunerConfig { @@ -52,14 +71,12 @@ impl WiringLayer for PruningLayer { main_pool, ); - let AppHealthCheckResource(app_health) = context.get_resource_or_default().await; - app_health + input + .app_health + .0 .insert_component(db_pruner.health_check()) .map_err(WiringError::internal)?; - - context.add_task(Box::new(db_pruner)); - - Ok(()) + Ok(Output { db_pruner }) } } diff --git a/core/node/node_framework/src/implementations/layers/query_eth_client.rs b/core/node/node_framework/src/implementations/layers/query_eth_client.rs index 0e4be369db48..d48312d7d5b5 100644 --- a/core/node/node_framework/src/implementations/layers/query_eth_client.rs +++ b/core/node/node_framework/src/implementations/layers/query_eth_client.rs @@ -4,10 +4,10 @@ use zksync_web3_decl::client::Client; use crate::{ implementations::resources::eth_interface::EthInterfaceResource, - service::ServiceContext, wiring_layer::{WiringError, WiringLayer}, }; +/// Wiring layer for Ethereum client. #[derive(Debug)] pub struct QueryEthClientLayer { chain_id: L1ChainId, @@ -22,16 +22,18 @@ impl QueryEthClientLayer { #[async_trait::async_trait] impl WiringLayer for QueryEthClientLayer { + type Input = (); + type Output = EthInterfaceResource; + fn layer_name(&self) -> &'static str { "query_eth_client_layer" } - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { + async fn wire(self, _input: Self::Input) -> Result { let query_client = Client::http(self.web3_url.clone()) .context("Client::new()")? .for_network(self.chain_id.into()) .build(); - context.insert_resource(EthInterfaceResource(Box::new(query_client)))?; - Ok(()) + Ok(EthInterfaceResource(Box::new(query_client))) } } diff --git a/core/node/node_framework/src/implementations/layers/reorg_detector.rs b/core/node/node_framework/src/implementations/layers/reorg_detector.rs new file mode 100644 index 000000000000..0d4cf8dd5220 --- /dev/null +++ b/core/node/node_framework/src/implementations/layers/reorg_detector.rs @@ -0,0 +1,72 @@ +use zksync_reorg_detector::{self, ReorgDetector}; + +use crate::{ + implementations::resources::{ + healthcheck::AppHealthCheckResource, + main_node_client::MainNodeClientResource, + pools::{MasterPool, PoolResource}, + }, + service::StopReceiver, + task::{Task, TaskId}, + wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, +}; + +/// Wiring layer for [`ReorgDetector`] checker. +/// This layer is responsible for detecting reorgs and shutting down the node if one is detected. +/// +/// This layer assumes that the node starts with the initialized state. +#[derive(Debug)] +pub struct ReorgDetectorLayer; + +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub main_node_client: MainNodeClientResource, + pub master_pool: PoolResource, + #[context(default)] + pub app_health: AppHealthCheckResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + #[context(task)] + pub reorg_detector: ReorgDetector, +} + +#[async_trait::async_trait] +impl WiringLayer for ReorgDetectorLayer { + type Input = Input; + type Output = Output; + + fn layer_name(&self) -> &'static str { + "reorg_detector_layer" + } + + async fn wire(self, input: Self::Input) -> Result { + let MainNodeClientResource(main_node_client) = input.main_node_client; + let pool = input.master_pool.get().await?; + + let reorg_detector = ReorgDetector::new(main_node_client, pool); + + let AppHealthCheckResource(app_health) = input.app_health; + app_health + .insert_component(reorg_detector.health_check().clone()) + .map_err(WiringError::internal)?; + + Ok(Output { reorg_detector }) + } +} + +#[async_trait::async_trait] +impl Task for ReorgDetector { + fn id(&self) -> TaskId { + "reorg_detector".into() + } + + async fn run(mut self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { + (*self).run(stop_receiver.0).await?; + Ok(()) + } +} diff --git a/core/node/node_framework/src/implementations/layers/reorg_detector_checker.rs b/core/node/node_framework/src/implementations/layers/reorg_detector_checker.rs deleted file mode 100644 index eee63e6763b1..000000000000 --- a/core/node/node_framework/src/implementations/layers/reorg_detector_checker.rs +++ /dev/null @@ -1,88 +0,0 @@ -use std::time::Duration; - -use anyhow::Context; -use zksync_dal::{ConnectionPool, Core}; -use zksync_reorg_detector::{self, ReorgDetector}; - -use crate::{ - implementations::resources::{ - main_node_client::MainNodeClientResource, - pools::{MasterPool, PoolResource}, - }, - precondition::Precondition, - service::{ServiceContext, StopReceiver}, - task::TaskId, - wiring_layer::{WiringError, WiringLayer}, -}; - -const REORG_DETECTED_SLEEP_INTERVAL: Duration = Duration::from_secs(1); - -/// The layer is responsible for integrating reorg checking into the system. -/// When a reorg is detected, the system will not start running until it is fixed. -#[derive(Debug)] -pub struct ReorgDetectorCheckerLayer; - -#[async_trait::async_trait] -impl WiringLayer for ReorgDetectorCheckerLayer { - fn layer_name(&self) -> &'static str { - "reorg_detector_checker_layer" - } - - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { - // Get resources. - let main_node_client = context.get_resource::().await?.0; - - let pool_resource = context.get_resource::>().await?; - let pool = pool_resource.get().await?; - - // Create and insert precondition. - context.add_precondition(Box::new(CheckerPrecondition { - pool: pool.clone(), - reorg_detector: ReorgDetector::new(main_node_client, pool), - })); - - Ok(()) - } -} - -pub struct CheckerPrecondition { - pool: ConnectionPool, - reorg_detector: ReorgDetector, -} - -#[async_trait::async_trait] -impl Precondition for CheckerPrecondition { - fn id(&self) -> TaskId { - "reorg_detector_checker".into() - } - - async fn check(mut self: Box, mut stop_receiver: StopReceiver) -> anyhow::Result<()> { - // Given that this is a precondition -- i.e. something that starts before some invariants are met, - // we need to first ensure that there is at least one batch in the database (there may be none if - // either genesis or snapshot recovery has not been performed yet). - let earliest_batch = zksync_dal::helpers::wait_for_l1_batch( - &self.pool, - REORG_DETECTED_SLEEP_INTERVAL, - &mut stop_receiver.0, - ) - .await?; - if earliest_batch.is_none() { - // Stop signal received. - return Ok(()); - } - - loop { - match self.reorg_detector.run_once(stop_receiver.0.clone()).await { - Ok(()) => return Ok(()), - Err(zksync_reorg_detector::Error::ReorgDetected(last_correct_l1_batch)) => { - tracing::warn!( - "Reorg detected, last correct L1 batch #{}. Waiting till it will be resolved. Sleep for {} seconds and retry", - last_correct_l1_batch, REORG_DETECTED_SLEEP_INTERVAL.as_secs() - ); - tokio::time::sleep(REORG_DETECTED_SLEEP_INTERVAL).await; - } - Err(err) => return Err(err).context("reorg_detector.check_consistency()"), - } - } - } -} diff --git a/core/node/node_framework/src/implementations/layers/reorg_detector_runner.rs b/core/node/node_framework/src/implementations/layers/reorg_detector_runner.rs deleted file mode 100644 index 55ee621c15b0..000000000000 --- a/core/node/node_framework/src/implementations/layers/reorg_detector_runner.rs +++ /dev/null @@ -1,73 +0,0 @@ -use std::sync::Arc; - -use anyhow::Context; -use zksync_block_reverter::BlockReverter; -use zksync_reorg_detector::{self, ReorgDetector}; - -use crate::{ - implementations::resources::{ - main_node_client::MainNodeClientResource, - pools::{MasterPool, PoolResource}, - reverter::BlockReverterResource, - }, - service::{ServiceContext, StopReceiver}, - task::{TaskId, UnconstrainedOneshotTask}, - wiring_layer::{WiringError, WiringLayer}, -}; - -/// Layer responsible for detecting reorg and reverting blocks in case it was found. -#[derive(Debug)] -pub struct ReorgDetectorRunnerLayer; - -#[async_trait::async_trait] -impl WiringLayer for ReorgDetectorRunnerLayer { - fn layer_name(&self) -> &'static str { - "reorg_detector_runner_layer" - } - - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { - // Get resources. - let main_node_client = context.get_resource::().await?.0; - - let pool_resource = context.get_resource::>().await?; - let pool = pool_resource.get().await?; - - let reverter = context.get_resource::().await?.0; - - // Create and insert task. - context.add_unconstrained_oneshot_task(Box::new(RunnerUnconstrainedOneshotTask { - reorg_detector: ReorgDetector::new(main_node_client, pool), - reverter, - })); - - Ok(()) - } -} - -pub struct RunnerUnconstrainedOneshotTask { - reorg_detector: ReorgDetector, - reverter: Arc, -} - -#[async_trait::async_trait] -impl UnconstrainedOneshotTask for RunnerUnconstrainedOneshotTask { - fn id(&self) -> TaskId { - "reorg_detector_runner".into() - } - - async fn run_unconstrained_oneshot( - mut self: Box, - stop_receiver: StopReceiver, - ) -> anyhow::Result<()> { - match self.reorg_detector.run_once(stop_receiver.0.clone()).await { - Ok(()) => {} - Err(zksync_reorg_detector::Error::ReorgDetected(last_correct_l1_batch)) => { - tracing::info!("Reverting to l1 batch number {last_correct_l1_batch}"); - self.reverter.roll_back(last_correct_l1_batch).await?; - tracing::info!("Revert successfully completed"); - } - Err(err) => return Err(err).context("reorg_detector.check_consistency()"), - } - Ok(()) - } -} diff --git a/core/node/node_framework/src/implementations/layers/sigint.rs b/core/node/node_framework/src/implementations/layers/sigint.rs index 2d11f1525370..014bfdbdde14 100644 --- a/core/node/node_framework/src/implementations/layers/sigint.rs +++ b/core/node/node_framework/src/implementations/layers/sigint.rs @@ -1,42 +1,55 @@ use tokio::sync::oneshot; use crate::{ - service::{ServiceContext, StopReceiver}, - task::{TaskId, UnconstrainedTask}, + service::StopReceiver, + task::{Task, TaskId, TaskKind}, wiring_layer::{WiringError, WiringLayer}, + IntoContext, }; -/// Layer that changes the handling of SIGINT signal, preventing an immediate shutdown. +/// Wiring layer that changes the handling of SIGINT signal, preventing an immediate shutdown. /// Instead, it would propagate the signal to the rest of the node, allowing it to shut down gracefully. #[derive(Debug)] pub struct SigintHandlerLayer; +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + #[context(task)] + pub task: SigintHandlerTask, +} + #[async_trait::async_trait] impl WiringLayer for SigintHandlerLayer { + type Input = (); + type Output = Output; + fn layer_name(&self) -> &'static str { "sigint_handler_layer" } - async fn wire(self: Box, mut node: ServiceContext<'_>) -> Result<(), WiringError> { - // SIGINT may happen at any time, so we must handle it as soon as it happens. - node.add_unconstrained_task(Box::new(SigintHandlerTask)); - Ok(()) + async fn wire(self, _input: Self::Input) -> Result { + Ok(Output { + task: SigintHandlerTask, + }) } } #[derive(Debug)] -struct SigintHandlerTask; +pub struct SigintHandlerTask; #[async_trait::async_trait] -impl UnconstrainedTask for SigintHandlerTask { +impl Task for SigintHandlerTask { + fn kind(&self) -> TaskKind { + // SIGINT may happen at any time, so we must handle it as soon as it happens. + TaskKind::UnconstrainedTask + } + fn id(&self) -> TaskId { "sigint_handler".into() } - async fn run_unconstrained( - self: Box, - mut stop_receiver: StopReceiver, - ) -> anyhow::Result<()> { + async fn run(self: Box, mut stop_receiver: StopReceiver) -> anyhow::Result<()> { let (sigint_sender, sigint_receiver) = oneshot::channel(); let mut sigint_sender = Some(sigint_sender); // Has to be done this way since `set_handler` requires `FnMut`. ctrlc::set_handler(move || { @@ -51,7 +64,9 @@ impl UnconstrainedTask for SigintHandlerTask { // Wait for either SIGINT or stop signal. tokio::select! { - _ = sigint_receiver => {}, + _ = sigint_receiver => { + tracing::info!("Received SIGINT signal"); + }, _ = stop_receiver.0.changed() => {}, }; diff --git a/core/node/node_framework/src/implementations/layers/state_keeper/external_io.rs b/core/node/node_framework/src/implementations/layers/state_keeper/external_io.rs index 1ec80fef4272..31b76550767c 100644 --- a/core/node/node_framework/src/implementations/layers/state_keeper/external_io.rs +++ b/core/node/node_framework/src/implementations/layers/state_keeper/external_io.rs @@ -13,16 +13,32 @@ use crate::{ state_keeper::{ConditionalSealerResource, StateKeeperIOResource}, sync_state::SyncStateResource, }, - resource::Unique, - service::ServiceContext, wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, }; +/// Wiring layer for `ExternalIO`, an IO part of state keeper used by the external node. #[derive(Debug)] pub struct ExternalIOLayer { chain_id: L2ChainId, } +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub pool: PoolResource, + pub main_node_client: MainNodeClientResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + pub sync_state: SyncStateResource, + pub action_queue_sender: ActionQueueSenderResource, + pub io: StateKeeperIOResource, + pub sealer: ConditionalSealerResource, +} + impl ExternalIOLayer { pub fn new(chain_id: L2ChainId) -> Self { Self { chain_id } @@ -31,38 +47,38 @@ impl ExternalIOLayer { #[async_trait::async_trait] impl WiringLayer for ExternalIOLayer { + type Input = Input; + type Output = Output; + fn layer_name(&self) -> &'static str { "external_io_layer" } - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { - // Fetch required resources. - let master_pool = context.get_resource::>().await?; - let MainNodeClientResource(main_node_client) = context.get_resource().await?; - + async fn wire(self, input: Self::Input) -> Result { // Create `SyncState` resource. let sync_state = SyncState::default(); - context.insert_resource(SyncStateResource(sync_state))?; // Create `ActionQueueSender` resource. let (action_queue_sender, action_queue) = ActionQueue::new(); - context.insert_resource(ActionQueueSenderResource(Unique::new(action_queue_sender)))?; // Create external IO resource. - let io_pool = master_pool.get().await.context("Get master pool")?; + let io_pool = input.pool.get().await.context("Get master pool")?; let io = ExternalIO::new( io_pool, action_queue, - Box::new(main_node_client.for_component("external_io")), + Box::new(input.main_node_client.0.for_component("external_io")), self.chain_id, ) - .await .context("Failed initializing I/O for external node state keeper")?; - context.insert_resource(StateKeeperIOResource(Unique::new(Box::new(io))))?; // Create sealer. - context.insert_resource(ConditionalSealerResource(Arc::new(NoopSealer)))?; + let sealer = ConditionalSealerResource(Arc::new(NoopSealer)); - Ok(()) + Ok(Output { + sync_state: sync_state.into(), + action_queue_sender: action_queue_sender.into(), + io: io.into(), + sealer, + }) } } diff --git a/core/node/node_framework/src/implementations/layers/state_keeper/main_batch_executor.rs b/core/node/node_framework/src/implementations/layers/state_keeper/main_batch_executor.rs index 82e6e52274aa..33d3b5676aac 100644 --- a/core/node/node_framework/src/implementations/layers/state_keeper/main_batch_executor.rs +++ b/core/node/node_framework/src/implementations/layers/state_keeper/main_batch_executor.rs @@ -2,17 +2,23 @@ use zksync_state_keeper::MainBatchExecutor; use crate::{ implementations::resources::state_keeper::BatchExecutorResource, - resource::Unique, - service::ServiceContext, wiring_layer::{WiringError, WiringLayer}, + IntoContext, }; +/// Wiring layer for `MainBatchExecutor`, part of the state keeper responsible for running the VM. #[derive(Debug)] pub struct MainBatchExecutorLayer { save_call_traces: bool, optional_bytecode_compression: bool, } +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + pub batch_executor: BatchExecutorResource, +} + impl MainBatchExecutorLayer { pub fn new(save_call_traces: bool, optional_bytecode_compression: bool) -> Self { Self { @@ -24,15 +30,19 @@ impl MainBatchExecutorLayer { #[async_trait::async_trait] impl WiringLayer for MainBatchExecutorLayer { + type Input = (); + type Output = Output; + fn layer_name(&self) -> &'static str { "main_batch_executor_layer" } - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { + async fn wire(self, _input: Self::Input) -> Result { let builder = MainBatchExecutor::new(self.save_call_traces, self.optional_bytecode_compression); - context.insert_resource(BatchExecutorResource(Unique::new(Box::new(builder))))?; - Ok(()) + Ok(Output { + batch_executor: builder.into(), + }) } } diff --git a/core/node/node_framework/src/implementations/layers/state_keeper/mempool_io.rs b/core/node/node_framework/src/implementations/layers/state_keeper/mempool_io.rs index 1a913fd990bf..6be6544ee3df 100644 --- a/core/node/node_framework/src/implementations/layers/state_keeper/mempool_io.rs +++ b/core/node/node_framework/src/implementations/layers/state_keeper/mempool_io.rs @@ -1,5 +1,3 @@ -use std::sync::Arc; - use anyhow::Context as _; use zksync_config::configs::{ chain::{MempoolConfig, StateKeeperConfig}, @@ -14,12 +12,27 @@ use crate::{ pools::{MasterPool, PoolResource}, state_keeper::{ConditionalSealerResource, StateKeeperIOResource}, }, - resource::Unique, - service::{ServiceContext, StopReceiver}, + service::StopReceiver, task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, }; +/// Wiring layer for `MempoolIO`, an IO part of state keeper used by the main node. +/// +/// ## Requests resources +/// +/// - `FeeInputResource` +/// - `PoolResource` +/// +/// ## Adds resources +/// +/// - `StateKeeperIOResource` +/// - `ConditionalSealerResource` +/// +/// ## Adds tasks +/// +/// - `MempoolFetcherTask` #[derive(Debug)] pub struct MempoolIOLayer { zksync_network_id: L2ChainId, @@ -28,6 +41,22 @@ pub struct MempoolIOLayer { wallets: wallets::StateKeeper, } +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub fee_input: FeeInputResource, + pub master_pool: PoolResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + pub state_keeper_io: StateKeeperIOResource, + pub conditional_sealer: ConditionalSealerResource, + #[context(task)] + pub mempool_fetcher: MempoolFetcher, +} + impl MempoolIOLayer { pub fn new( zksync_network_id: L2ChainId, @@ -63,14 +92,16 @@ impl MempoolIOLayer { #[async_trait::async_trait] impl WiringLayer for MempoolIOLayer { + type Input = Input; + type Output = Output; + fn layer_name(&self) -> &'static str { "mempool_io_layer" } - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { - // Fetch required resources. - let batch_fee_input_provider = context.get_resource::().await?.0; - let master_pool = context.get_resource::>().await?; + async fn wire(self, input: Self::Input) -> Result { + let batch_fee_input_provider = input.fee_input.0; + let master_pool = input.master_pool; // Create mempool fetcher task. let mempool_guard = self.build_mempool_guard(&master_pool).await?; @@ -84,7 +115,6 @@ impl WiringLayer for MempoolIOLayer { &self.mempool_config, mempool_fetcher_pool, ); - context.add_task(Box::new(MempoolFetcherTask(mempool_fetcher))); // Create mempool IO resource. let mempool_db_pool = master_pool @@ -99,28 +129,26 @@ impl WiringLayer for MempoolIOLayer { self.wallets.fee_account.address(), self.mempool_config.delay_interval(), self.zksync_network_id, - ) - .await?; - context.insert_resource(StateKeeperIOResource(Unique::new(Box::new(io))))?; + )?; // Create sealer. let sealer = SequencerSealer::new(self.state_keeper_config); - context.insert_resource(ConditionalSealerResource(Arc::new(sealer)))?; - Ok(()) + Ok(Output { + state_keeper_io: io.into(), + conditional_sealer: sealer.into(), + mempool_fetcher, + }) } } -#[derive(Debug)] -struct MempoolFetcherTask(MempoolFetcher); - #[async_trait::async_trait] -impl Task for MempoolFetcherTask { +impl Task for MempoolFetcher { fn id(&self) -> TaskId { "state_keeper/mempool_fetcher".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { - self.0.run(stop_receiver.0).await + (*self).run(stop_receiver.0).await } } diff --git a/core/node/node_framework/src/implementations/layers/state_keeper/mod.rs b/core/node/node_framework/src/implementations/layers/state_keeper/mod.rs index 97364f6388cd..b0dfe0f1600c 100644 --- a/core/node/node_framework/src/implementations/layers/state_keeper/mod.rs +++ b/core/node/node_framework/src/implementations/layers/state_keeper/mod.rs @@ -24,22 +24,39 @@ use crate::{ StateKeeperIOResource, }, }, - service::{ServiceContext, StopReceiver}, - task::{Task, TaskId}, + service::{ShutdownHook, StopReceiver}, + task::{Task, TaskId, TaskKind}, wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, }; -/// Requests: -/// - `StateKeeperIOResource` -/// - `BatchExecutorResource` -/// - `ConditionalSealerResource` -/// +/// Wiring layer for the state keeper. #[derive(Debug)] pub struct StateKeeperLayer { state_keeper_db_path: String, rocksdb_options: RocksdbStorageOptions, } +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub state_keeper_io: StateKeeperIOResource, + pub batch_executor: BatchExecutorResource, + pub output_handler: OutputHandlerResource, + pub conditional_sealer: ConditionalSealerResource, + pub master_pool: PoolResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + #[context(task)] + pub state_keeper: StateKeeperTask, + #[context(task)] + pub rocksdb_catchup: AsyncCatchupTask, + pub rocksdb_termination_hook: ShutdownHook, +} + impl StateKeeperLayer { pub fn new(state_keeper_db_path: String, rocksdb_options: RocksdbStorageOptions) -> Self { Self { @@ -51,52 +68,62 @@ impl StateKeeperLayer { #[async_trait::async_trait] impl WiringLayer for StateKeeperLayer { + type Input = Input; + type Output = Output; + fn layer_name(&self) -> &'static str { "state_keeper_layer" } - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { - let io = context - .get_resource::() - .await? + async fn wire(self, input: Self::Input) -> Result { + let io = input + .state_keeper_io .0 .take() .context("StateKeeperIO was provided but taken by some other task")?; - let batch_executor_base = context - .get_resource::() - .await? + let batch_executor_base = input + .batch_executor .0 .take() .context("L1BatchExecutorBuilder was provided but taken by some other task")?; - let output_handler = context - .get_resource::() - .await? + let output_handler = input + .output_handler .0 .take() .context("HandleStateKeeperOutput was provided but taken by another task")?; - let sealer = context.get_resource::().await?.0; - let master_pool = context.get_resource::>().await?; + let sealer = input.conditional_sealer.0; + let master_pool = input.master_pool; - let (storage_factory, task) = AsyncRocksdbCache::new( + let (storage_factory, rocksdb_catchup) = AsyncRocksdbCache::new( master_pool.get_custom(2).await?, self.state_keeper_db_path, self.rocksdb_options, ); - context.add_task(Box::new(RocksdbCatchupTask(task))); - context.add_task(Box::new(StateKeeperTask { + let state_keeper = StateKeeperTask { io, batch_executor_base, output_handler, sealer, storage_factory: Arc::new(storage_factory), - })); - Ok(()) + }; + + let rocksdb_termination_hook = ShutdownHook::new("rocksdb_terminaton", async { + // Wait for all the instances of RocksDB to be destroyed. + tokio::task::spawn_blocking(RocksDB::await_rocksdb_termination) + .await + .context("failed terminating RocksDB instances") + }); + Ok(Output { + state_keeper, + rocksdb_catchup, + rocksdb_termination_hook, + }) } } #[derive(Debug)] -struct StateKeeperTask { +pub struct StateKeeperTask { io: Box, batch_executor_base: Box, output_handler: OutputHandler, @@ -119,29 +146,21 @@ impl Task for StateKeeperTask { self.sealer, self.storage_factory, ); - let result = state_keeper.run().await; - - // Wait for all the instances of RocksDB to be destroyed. - tokio::task::spawn_blocking(RocksDB::await_rocksdb_termination) - .await - .unwrap(); - - result + state_keeper.run().await } } -#[derive(Debug)] -struct RocksdbCatchupTask(AsyncCatchupTask); - #[async_trait::async_trait] -impl Task for RocksdbCatchupTask { +impl Task for AsyncCatchupTask { + fn kind(&self) -> TaskKind { + TaskKind::OneshotTask + } + fn id(&self) -> TaskId { "state_keeper/rocksdb_catchup_task".into() } - async fn run(self: Box, mut stop_receiver: StopReceiver) -> anyhow::Result<()> { - self.0.run(stop_receiver.0.clone()).await?; - stop_receiver.0.changed().await?; - Ok(()) + async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { + (*self).run(stop_receiver.0).await } } diff --git a/core/node/node_framework/src/implementations/layers/state_keeper/output_handler.rs b/core/node/node_framework/src/implementations/layers/state_keeper/output_handler.rs index d0e94f637e08..f639d72fe40a 100644 --- a/core/node/node_framework/src/implementations/layers/state_keeper/output_handler.rs +++ b/core/node/node_framework/src/implementations/layers/state_keeper/output_handler.rs @@ -1,6 +1,7 @@ use anyhow::Context as _; +use zksync_node_framework_derive::FromContext; use zksync_state_keeper::{ - io::seal_logic::l2_block_seal_subtasks::L2BlockSealProcess, OutputHandler, + io::seal_logic::l2_block_seal_subtasks::L2BlockSealProcess, L2BlockSealerTask, OutputHandler, StateKeeperPersistence, TreeWritesPersistence, }; use zksync_types::Address; @@ -12,11 +13,26 @@ use crate::{ sync_state::SyncStateResource, }, resource::Unique, - service::{ServiceContext, StopReceiver}, + service::StopReceiver, task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, + IntoContext, }; +/// Wiring layer for the state keeper output handler. +/// +/// ## Requests resources +/// +/// - `PoolResource` +/// - `SyncStateResource` (optional) +/// +/// ## Adds resources +/// +/// - `OutputHandlerResource` +/// +/// ## Adds tasks +/// +/// - `L2BlockSealerTask` #[derive(Debug)] pub struct OutputHandlerLayer { l2_shared_bridge_addr: Address, @@ -31,6 +47,21 @@ pub struct OutputHandlerLayer { protective_reads_persistence_enabled: bool, } +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub master_pool: PoolResource, + pub sync_state: Option, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + pub output_handler: OutputHandlerResource, + #[context(task)] + pub l2_block_sealer: L2BlockSealerTask, +} + impl OutputHandlerLayer { pub fn new(l2_shared_bridge_addr: Address, l2_block_seal_queue_capacity: usize) -> Self { Self { @@ -57,23 +88,18 @@ impl OutputHandlerLayer { #[async_trait::async_trait] impl WiringLayer for OutputHandlerLayer { + type Input = Input; + type Output = Output; + fn layer_name(&self) -> &'static str { "state_keeper_output_handler_layer" } - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { - // Fetch required resources. - let master_pool = context.get_resource::>().await?; - // Use `SyncState` if provided. - let sync_state = match context.get_resource::().await { - Ok(sync_state) => Some(sync_state.0), - Err(WiringError::ResourceLacking { .. }) => None, - Err(err) => return Err(err), - }; - + async fn wire(self, input: Self::Input) -> Result { // Create L2 block sealer task and output handler. // L2 Block sealing process is parallelized, so we have to provide enough pooled connections. - let persistence_pool = master_pool + let persistence_pool = input + .master_pool .get_custom(L2BlockSealProcess::subtasks_len()) .await .context("Get master pool")?; @@ -87,7 +113,7 @@ impl WiringLayer for OutputHandlerLayer { } if !self.protective_reads_persistence_enabled { // **Important:** Disabling protective reads persistence is only sound if the node will never - // run a full Merkle tree. + // run a full Merkle tree OR an accompanying protective-reads-writer is being run. tracing::warn!("Disabling persisting protective reads; this should be safe, but is considered an experimental option at the moment"); persistence = persistence.without_protective_reads(); } @@ -95,19 +121,18 @@ impl WiringLayer for OutputHandlerLayer { let tree_writes_persistence = TreeWritesPersistence::new(persistence_pool); let mut output_handler = OutputHandler::new(Box::new(persistence)) .with_handler(Box::new(tree_writes_persistence)); - if let Some(sync_state) = sync_state { - output_handler = output_handler.with_handler(Box::new(sync_state)); + if let Some(sync_state) = input.sync_state { + output_handler = output_handler.with_handler(Box::new(sync_state.0)); } - context.insert_resource(OutputHandlerResource(Unique::new(output_handler)))?; - context.add_task(Box::new(L2BlockSealerTask(l2_block_sealer))); + let output_handler = OutputHandlerResource(Unique::new(output_handler)); - Ok(()) + Ok(Output { + output_handler, + l2_block_sealer, + }) } } -#[derive(Debug)] -struct L2BlockSealerTask(zksync_state_keeper::L2BlockSealerTask); - #[async_trait::async_trait] impl Task for L2BlockSealerTask { fn id(&self) -> TaskId { @@ -116,6 +141,6 @@ impl Task for L2BlockSealerTask { async fn run(self: Box, _stop_receiver: StopReceiver) -> anyhow::Result<()> { // Miniblock sealer will exit itself once sender is dropped. - self.0.run().await + (*self).run().await } } diff --git a/core/node/node_framework/src/implementations/layers/sync_state_updater.rs b/core/node/node_framework/src/implementations/layers/sync_state_updater.rs index fcbe51f581e1..1f86b43f7a5b 100644 --- a/core/node/node_framework/src/implementations/layers/sync_state_updater.rs +++ b/core/node/node_framework/src/implementations/layers/sync_state_updater.rs @@ -8,53 +8,74 @@ use crate::{ pools::{MasterPool, PoolResource}, sync_state::SyncStateResource, }, - service::{ServiceContext, StopReceiver}, + service::StopReceiver, task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, }; -/// Runs the dynamic sync state updater for `SyncState` if no `SyncState` was provided before. -/// This layer may be used as a fallback for EN API if API server runs without the core component. +/// Wiring layer for [`SyncState`] maintenance. +/// If [`SyncStateResource`] is already provided by another layer, this layer does nothing. #[derive(Debug)] pub struct SyncStateUpdaterLayer; +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + /// Fetched to check whether the `SyncState` was already provided by another layer. + pub sync_state: Option, + pub master_pool: PoolResource, + pub main_node_client: MainNodeClientResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + pub sync_state: Option, + #[context(task)] + pub sync_state_updater: Option, +} + #[async_trait::async_trait] impl WiringLayer for SyncStateUpdaterLayer { + type Input = Input; + type Output = Output; + fn layer_name(&self) -> &'static str { "sync_state_updater_layer" } - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { - if context.get_resource::().await.is_ok() { + async fn wire(self, input: Self::Input) -> Result { + if input.sync_state.is_some() { // `SyncState` was provided by some other layer -- we assume that the layer that added this resource // will be responsible for its maintenance. tracing::info!( "SyncState was provided by another layer, skipping SyncStateUpdaterLayer" ); - return Ok(()); + return Ok(Output { + sync_state: None, + sync_state_updater: None, + }); } - let pool = context.get_resource::>().await?; - let MainNodeClientResource(main_node_client) = context.get_resource().await?; + let connection_pool = input.master_pool.get().await?; + let MainNodeClientResource(main_node_client) = input.main_node_client; let sync_state = SyncState::default(); - // Insert resource. - context.insert_resource(SyncStateResource(sync_state.clone()))?; - - // Insert task - context.add_task(Box::new(SyncStateUpdater { - sync_state, - connection_pool: pool.get().await?, - main_node_client, - })); - - Ok(()) + Ok(Output { + sync_state: Some(sync_state.clone().into()), + sync_state_updater: Some(SyncStateUpdater { + sync_state, + connection_pool, + main_node_client, + }), + }) } } #[derive(Debug)] -struct SyncStateUpdater { +pub struct SyncStateUpdater { sync_state: SyncState, connection_pool: ConnectionPool, main_node_client: Box>, diff --git a/core/node/node_framework/src/implementations/layers/tee_verifier_input_producer.rs b/core/node/node_framework/src/implementations/layers/tee_verifier_input_producer.rs index 76ae0b26971f..68789082a226 100644 --- a/core/node/node_framework/src/implementations/layers/tee_verifier_input_producer.rs +++ b/core/node/node_framework/src/implementations/layers/tee_verifier_input_producer.rs @@ -7,11 +7,13 @@ use crate::{ object_store::ObjectStoreResource, pools::{MasterPool, PoolResource}, }, - service::{ServiceContext, StopReceiver}, + service::StopReceiver, task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, }; +/// Wiring layer for [`TeeVerifierInputProducer`]. #[derive(Debug)] pub struct TeeVerifierInputProducerLayer { l2_chain_id: L2ChainId, @@ -23,40 +25,45 @@ impl TeeVerifierInputProducerLayer { } } +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub master_pool: PoolResource, + pub object_store: ObjectStoreResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + #[context(task)] + pub task: TeeVerifierInputProducer, +} + #[async_trait::async_trait] impl WiringLayer for TeeVerifierInputProducerLayer { + type Input = Input; + type Output = Output; + fn layer_name(&self) -> &'static str { "tee_verifier_input_producer_layer" } - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { - // Get resources. - let pool_resource = context - .get_resource::>() - .await? - .get() - .await?; - let object_store = context.get_resource::().await?; - let tee = - TeeVerifierInputProducer::new(pool_resource, object_store.0, self.l2_chain_id).await?; - - context.add_task(Box::new(TeeVerifierInputProducerTask { tee })); + async fn wire(self, input: Self::Input) -> Result { + let pool = input.master_pool.get().await?; + let ObjectStoreResource(object_store) = input.object_store; + let task = TeeVerifierInputProducer::new(pool, object_store, self.l2_chain_id).await?; - Ok(()) + Ok(Output { task }) } } -pub struct TeeVerifierInputProducerTask { - tee: TeeVerifierInputProducer, -} - #[async_trait::async_trait] -impl Task for TeeVerifierInputProducerTask { +impl Task for TeeVerifierInputProducer { fn id(&self) -> TaskId { "tee_verifier_input_producer".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { - self.tee.run(stop_receiver.0, None).await + (*self).run(stop_receiver.0, None).await } } diff --git a/core/node/node_framework/src/implementations/layers/tree_data_fetcher.rs b/core/node/node_framework/src/implementations/layers/tree_data_fetcher.rs index c45071ce418b..ca2e80142401 100644 --- a/core/node/node_framework/src/implementations/layers/tree_data_fetcher.rs +++ b/core/node/node_framework/src/implementations/layers/tree_data_fetcher.rs @@ -8,16 +8,35 @@ use crate::{ main_node_client::MainNodeClientResource, pools::{MasterPool, PoolResource}, }, - service::{ServiceContext, StopReceiver}, + service::StopReceiver, task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, }; +/// Wiring layer for [`TreeDataFetcher`]. #[derive(Debug)] pub struct TreeDataFetcherLayer { diamond_proxy_addr: Address, } +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub master_pool: PoolResource, + pub main_node_client: MainNodeClientResource, + pub eth_client: EthInterfaceResource, + #[context(default)] + pub app_health: AppHealthCheckResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + #[context(task)] + pub task: TreeDataFetcher, +} + impl TreeDataFetcherLayer { pub fn new(diamond_proxy_addr: Address) -> Self { Self { diamond_proxy_addr } @@ -26,32 +45,33 @@ impl TreeDataFetcherLayer { #[async_trait::async_trait] impl WiringLayer for TreeDataFetcherLayer { + type Input = Input; + type Output = Output; + fn layer_name(&self) -> &'static str { "tree_data_fetcher_layer" } - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { - let pool = context.get_resource::>().await?; - let MainNodeClientResource(client) = context.get_resource().await?; - let EthInterfaceResource(eth_client) = context.get_resource().await?; + async fn wire(self, input: Self::Input) -> Result { + let pool = input.master_pool.get().await?; + let MainNodeClientResource(client) = input.main_node_client; + let EthInterfaceResource(eth_client) = input.eth_client; tracing::warn!( "Running tree data fetcher (allows a node to operate w/o a Merkle tree or w/o waiting the tree to catch up). \ This is an experimental feature; do not use unless you know what you're doing" ); - let fetcher = TreeDataFetcher::new(client, pool.get().await?) - .with_l1_data(eth_client, self.diamond_proxy_addr)?; + let task = + TreeDataFetcher::new(client, pool).with_l1_data(eth_client, self.diamond_proxy_addr)?; // Insert healthcheck - let AppHealthCheckResource(app_health) = context.get_resource_or_default().await; - app_health - .insert_component(fetcher.health_check()) + input + .app_health + .0 + .insert_component(task.health_check()) .map_err(WiringError::internal)?; - // Insert task - context.add_task(Box::new(fetcher)); - - Ok(()) + Ok(Output { task }) } } diff --git a/core/node/node_framework/src/implementations/layers/validate_chain_ids.rs b/core/node/node_framework/src/implementations/layers/validate_chain_ids.rs index 0f04a35d484a..1e23bdfbd622 100644 --- a/core/node/node_framework/src/implementations/layers/validate_chain_ids.rs +++ b/core/node/node_framework/src/implementations/layers/validate_chain_ids.rs @@ -5,18 +5,43 @@ use crate::{ implementations::resources::{ eth_interface::EthInterfaceResource, main_node_client::MainNodeClientResource, }, - precondition::Precondition, - service::{ServiceContext, StopReceiver}, - task::TaskId, + service::StopReceiver, + task::{Task, TaskId, TaskKind}, wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, }; +/// Wiring layer for chain ID validation precondition for external node. +/// Ensures that chain IDs are consistent locally, on main node, and on L1. +/// +/// ## Requests resources +/// +/// - `EthInterfaceResource` +/// - `MainNodeClientResource +/// +/// ## Adds preconditions +/// +/// - `ValidateChainIdsTask` #[derive(Debug)] pub struct ValidateChainIdsLayer { l1_chain_id: L1ChainId, l2_chain_id: L2ChainId, } +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub eth_client: EthInterfaceResource, + pub main_node_client: MainNodeClientResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + #[context(task)] + pub task: ValidateChainIdsTask, +} + impl ValidateChainIdsLayer { pub fn new(l1_chain_id: L1ChainId, l2_chain_id: L2ChainId) -> Self { Self { @@ -28,13 +53,16 @@ impl ValidateChainIdsLayer { #[async_trait::async_trait] impl WiringLayer for ValidateChainIdsLayer { + type Input = Input; + type Output = Output; + fn layer_name(&self) -> &'static str { "validate_chain_ids_layer" } - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { - let EthInterfaceResource(query_client) = context.get_resource().await?; - let MainNodeClientResource(main_node_client) = context.get_resource().await?; + async fn wire(self, input: Self::Input) -> Result { + let EthInterfaceResource(query_client) = input.eth_client; + let MainNodeClientResource(main_node_client) = input.main_node_client; let task = ValidateChainIdsTask::new( self.l1_chain_id, @@ -43,19 +71,21 @@ impl WiringLayer for ValidateChainIdsLayer { main_node_client, ); - context.add_precondition(Box::new(task)); - - Ok(()) + Ok(Output { task }) } } #[async_trait::async_trait] -impl Precondition for ValidateChainIdsTask { +impl Task for ValidateChainIdsTask { + fn kind(&self) -> TaskKind { + TaskKind::Precondition + } + fn id(&self) -> TaskId { "validate_chain_ids".into() } - async fn check(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { + async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { (*self).run_once(stop_receiver.0).await } } diff --git a/core/node/node_framework/src/implementations/layers/vm_runner/bwip.rs b/core/node/node_framework/src/implementations/layers/vm_runner/bwip.rs new file mode 100644 index 000000000000..74b4b5e32072 --- /dev/null +++ b/core/node/node_framework/src/implementations/layers/vm_runner/bwip.rs @@ -0,0 +1,110 @@ +use zksync_config::configs::vm_runner::BasicWitnessInputProducerConfig; +use zksync_types::L2ChainId; +use zksync_vm_runner::{ + BasicWitnessInputProducer, BasicWitnessInputProducerIo, ConcurrentOutputHandlerFactoryTask, + StorageSyncTask, +}; + +use crate::{ + implementations::resources::{ + object_store::ObjectStoreResource, + pools::{MasterPool, PoolResource}, + }, + service::StopReceiver, + task::{Task, TaskId}, + wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, +}; + +#[derive(Debug)] +pub struct BasicWitnessInputProducerLayer { + basic_witness_input_producer_config: BasicWitnessInputProducerConfig, + zksync_network_id: L2ChainId, +} + +impl BasicWitnessInputProducerLayer { + pub fn new( + basic_witness_input_producer_config: BasicWitnessInputProducerConfig, + zksync_network_id: L2ChainId, + ) -> Self { + Self { + basic_witness_input_producer_config, + zksync_network_id, + } + } +} + +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub master_pool: PoolResource, + pub object_store: ObjectStoreResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + #[context(task)] + pub output_handler_factory_task: + ConcurrentOutputHandlerFactoryTask, + #[context(task)] + pub loader_task: StorageSyncTask, + #[context(task)] + pub basic_witness_input_producer: BasicWitnessInputProducer, +} + +#[async_trait::async_trait] +impl WiringLayer for BasicWitnessInputProducerLayer { + type Input = Input; + type Output = Output; + + fn layer_name(&self) -> &'static str { + "vm_runner_bwip" + } + + async fn wire(self, input: Self::Input) -> Result { + let Input { + master_pool, + object_store, + } = input; + + let (basic_witness_input_producer, tasks) = BasicWitnessInputProducer::new( + // One for `StorageSyncTask` which can hold a long-term connection in case it needs to + // catch up cache. + // + // One for `ConcurrentOutputHandlerFactoryTask`/`VmRunner` as they need occasional access + // to DB for querying last processed batch and last ready to be loaded batch. + // + // `window_size` connections for `BasicWitnessInputProducer` + // as there can be multiple output handlers holding multi-second connections to process + // BWIP data. + master_pool + .get_custom(self.basic_witness_input_producer_config.window_size + 2) + .await?, + object_store.0, + self.basic_witness_input_producer_config.db_path, + self.zksync_network_id, + self.basic_witness_input_producer_config + .first_processed_batch, + self.basic_witness_input_producer_config.window_size, + ) + .await?; + + Ok(Output { + output_handler_factory_task: tasks.output_handler_factory_task, + loader_task: tasks.loader_task, + basic_witness_input_producer, + }) + } +} + +#[async_trait::async_trait] +impl Task for BasicWitnessInputProducer { + fn id(&self) -> TaskId { + "vm_runner/bwip".into() + } + + async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { + (*self).run(&stop_receiver.0).await + } +} diff --git a/core/node/node_framework/src/implementations/layers/vm_runner/mod.rs b/core/node/node_framework/src/implementations/layers/vm_runner/mod.rs index a105ad81ee60..91e92ffcd1ba 100644 --- a/core/node/node_framework/src/implementations/layers/vm_runner/mod.rs +++ b/core/node/node_framework/src/implementations/layers/vm_runner/mod.rs @@ -5,6 +5,7 @@ use crate::{ task::{Task, TaskId}, }; +pub mod bwip; pub mod protective_reads; #[async_trait::async_trait] @@ -13,10 +14,8 @@ impl Task for StorageSyncTask { format!("vm_runner/{}/storage_sync", self.io().name()).into() } - async fn run(self: Box, mut stop_receiver: StopReceiver) -> anyhow::Result<()> { - StorageSyncTask::run(*self, stop_receiver.0.clone()).await?; - stop_receiver.0.changed().await?; - Ok(()) + async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { + (*self).run(stop_receiver.0).await } } @@ -26,9 +25,7 @@ impl Task for ConcurrentOutputHandlerFactoryTask { format!("vm_runner/{}/output_handler", self.io().name()).into() } - async fn run(self: Box, mut stop_receiver: StopReceiver) -> anyhow::Result<()> { - ConcurrentOutputHandlerFactoryTask::run(*self, stop_receiver.0.clone()).await?; - stop_receiver.0.changed().await?; - Ok(()) + async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { + (*self).run(stop_receiver.0).await } } diff --git a/core/node/node_framework/src/implementations/layers/vm_runner/protective_reads.rs b/core/node/node_framework/src/implementations/layers/vm_runner/protective_reads.rs index a55f8dd7ac85..3b07d0cea139 100644 --- a/core/node/node_framework/src/implementations/layers/vm_runner/protective_reads.rs +++ b/core/node/node_framework/src/implementations/layers/vm_runner/protective_reads.rs @@ -1,20 +1,42 @@ use zksync_config::configs::vm_runner::ProtectiveReadsWriterConfig; +use zksync_node_framework_derive::FromContext; use zksync_types::L2ChainId; -use zksync_vm_runner::ProtectiveReadsWriter; +use zksync_vm_runner::{ + ConcurrentOutputHandlerFactoryTask, ProtectiveReadsIo, ProtectiveReadsWriter, StorageSyncTask, +}; use crate::{ implementations::resources::pools::{MasterPool, PoolResource}, - service::{ServiceContext, StopReceiver}, + service::StopReceiver, task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, + IntoContext, }; +/// Wiring layer for protective reads writer. #[derive(Debug)] pub struct ProtectiveReadsWriterLayer { protective_reads_writer_config: ProtectiveReadsWriterConfig, zksync_network_id: L2ChainId, } +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub master_pool: PoolResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + #[context(task)] + pub protective_reads_writer: ProtectiveReadsWriter, + #[context(task)] + pub loader_task: StorageSyncTask, + #[context(task)] + pub output_handler_factory_task: ConcurrentOutputHandlerFactoryTask, +} + impl ProtectiveReadsWriterLayer { pub fn new( protective_reads_writer_config: ProtectiveReadsWriterConfig, @@ -29,12 +51,15 @@ impl ProtectiveReadsWriterLayer { #[async_trait::async_trait] impl WiringLayer for ProtectiveReadsWriterLayer { + type Input = Input; + type Output = Output; + fn layer_name(&self) -> &'static str { "vm_runner_protective_reads" } - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { - let master_pool = context.get_resource::>().await?; + async fn wire(self, input: Self::Input) -> Result { + let master_pool = input.master_pool; let (protective_reads_writer, tasks) = ProtectiveReadsWriter::new( // One for `StorageSyncTask` which can hold a long-term connection in case it needs to @@ -56,27 +81,21 @@ impl WiringLayer for ProtectiveReadsWriterLayer { ) .await?; - context.add_task(Box::new(tasks.loader_task)); - context.add_task(Box::new(tasks.output_handler_factory_task)); - context.add_task(Box::new(ProtectiveReadsWriterTask { + Ok(Output { protective_reads_writer, - })); - Ok(()) + loader_task: tasks.loader_task, + output_handler_factory_task: tasks.output_handler_factory_task, + }) } } -#[derive(Debug)] -struct ProtectiveReadsWriterTask { - protective_reads_writer: ProtectiveReadsWriter, -} - #[async_trait::async_trait] -impl Task for ProtectiveReadsWriterTask { +impl Task for ProtectiveReadsWriter { fn id(&self) -> TaskId { "vm_runner/protective_reads_writer".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { - self.protective_reads_writer.run(&stop_receiver.0).await + (*self).run(&stop_receiver.0).await } } diff --git a/core/node/node_framework/src/implementations/layers/web3_api/caches.rs b/core/node/node_framework/src/implementations/layers/web3_api/caches.rs index c01a62748fa4..b7718a41fab0 100644 --- a/core/node/node_framework/src/implementations/layers/web3_api/caches.rs +++ b/core/node/node_framework/src/implementations/layers/web3_api/caches.rs @@ -1,23 +1,40 @@ use std::time::Duration; -use zksync_node_api_server::web3::mempool_cache::{self, MempoolCache}; +use zksync_node_api_server::web3::mempool_cache::{MempoolCache, MempoolCacheUpdateTask}; +use zksync_node_framework_derive::FromContext; use crate::{ implementations::resources::{ pools::{PoolResource, ReplicaPool}, web3_api::MempoolCacheResource, }, - service::{ServiceContext, StopReceiver}, + service::StopReceiver, task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, + IntoContext, }; +/// Wiring layer for API mempool cache. #[derive(Debug)] pub struct MempoolCacheLayer { capacity: usize, update_interval: Duration, } +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub replica_pool: PoolResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + pub mempool_cache: MempoolCacheResource, + #[context(task)] + pub update_task: MempoolCacheUpdateTask, +} + impl MempoolCacheLayer { pub fn new(capacity: usize, update_interval: Duration) -> Self { Self { @@ -29,24 +46,24 @@ impl MempoolCacheLayer { #[async_trait::async_trait] impl WiringLayer for MempoolCacheLayer { + type Input = Input; + type Output = Output; + fn layer_name(&self) -> &'static str { "mempool_cache_layer" } - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { - let pool_resource = context.get_resource::>().await?; - let replica_pool = pool_resource.get().await?; + async fn wire(self, input: Self::Input) -> Result { + let replica_pool = input.replica_pool.get().await?; let mempool_cache = MempoolCache::new(self.capacity); let update_task = mempool_cache.update_task(replica_pool, self.update_interval); - context.add_task(Box::new(MempoolCacheUpdateTask(update_task))); - context.insert_resource(MempoolCacheResource(mempool_cache))?; - Ok(()) + Ok(Output { + mempool_cache: mempool_cache.into(), + update_task, + }) } } -#[derive(Debug)] -pub struct MempoolCacheUpdateTask(mempool_cache::MempoolCacheUpdateTask); - #[async_trait::async_trait] impl Task for MempoolCacheUpdateTask { fn id(&self) -> TaskId { @@ -54,6 +71,6 @@ impl Task for MempoolCacheUpdateTask { } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { - self.0.run(stop_receiver.0).await + (*self).run(stop_receiver.0).await } } diff --git a/core/node/node_framework/src/implementations/layers/web3_api/server.rs b/core/node/node_framework/src/implementations/layers/web3_api/server.rs index da0d9d3cc33a..8b35e13827be 100644 --- a/core/node/node_framework/src/implementations/layers/web3_api/server.rs +++ b/core/node/node_framework/src/implementations/layers/web3_api/server.rs @@ -13,9 +13,10 @@ use crate::{ sync_state::SyncStateResource, web3_api::{MempoolCacheResource, TreeApiClientResource, TxSenderResource}, }, - service::{ServiceContext, StopReceiver}, + service::StopReceiver, task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, }; /// Set of optional variables that can be altered to modify the behavior of API builder. @@ -56,6 +57,7 @@ impl Web3ServerOptionalConfig { api_builder = api_builder .with_websocket_requests_per_minute_limit(websocket_requests_per_minute_limit); } + api_builder = api_builder.with_extended_tracing(self.with_extended_tracing); api_builder } } @@ -67,6 +69,22 @@ enum Transport { Ws, } +/// Wiring layer for Web3 JSON RPC server. +/// +/// ## Requests resources +/// +/// - `PoolResource` +/// - `TxSenderResource` +/// - `SyncStateResource` (optional) +/// - `TreeApiClientResource` (optional) +/// - `MempoolCacheResource` +/// - `CircuitBreakersResource` (adds a circuit breaker) +/// - `AppHealthCheckResource` (adds a health check) +/// +/// ## Adds tasks +/// +/// - `Web3ApiTask` -- wrapper for all the tasks spawned by the API. +/// - `ApiTaskGarbageCollector` -- maintenance task that manages API tasks. #[derive(Debug)] pub struct Web3ServerLayer { transport: Transport, @@ -75,6 +93,29 @@ pub struct Web3ServerLayer { optional_config: Web3ServerOptionalConfig, } +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub replica_pool: PoolResource, + pub tx_sender: TxSenderResource, + pub sync_state: Option, + pub tree_api_client: Option, + pub mempool_cache: MempoolCacheResource, + #[context(default)] + pub circuit_breakers: CircuitBreakersResource, + #[context(default)] + pub app_health: AppHealthCheckResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + #[context(task)] + pub web3_api_task: Web3ApiTask, + #[context(task)] + pub garbage_collector_task: ApiTaskGarbageCollector, +} + impl Web3ServerLayer { pub fn http( port: u16, @@ -105,6 +146,9 @@ impl Web3ServerLayer { #[async_trait::async_trait] impl WiringLayer for Web3ServerLayer { + type Input = Input; + type Output = Output; + fn layer_name(&self) -> &'static str { match self.transport { Transport::Http => "web3_http_server_layer", @@ -112,23 +156,15 @@ impl WiringLayer for Web3ServerLayer { } } - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { + async fn wire(self, input: Self::Input) -> Result { // Get required resources. - let replica_resource_pool = context.get_resource::>().await?; + let replica_resource_pool = input.replica_pool; let updaters_pool = replica_resource_pool.get_custom(2).await?; let replica_pool = replica_resource_pool.get().await?; - let tx_sender = context.get_resource::().await?.0; - let sync_state = match context.get_resource::().await { - Ok(sync_state) => Some(sync_state.0), - Err(WiringError::ResourceLacking { .. }) => None, - Err(err) => return Err(err), - }; - let tree_api_client = match context.get_resource::().await { - Ok(client) => Some(client.0), - Err(WiringError::ResourceLacking { .. }) => None, - Err(err) => return Err(err), - }; - let MempoolCacheResource(mempool_cache) = context.get_resource().await?; + let TxSenderResource(tx_sender) = input.tx_sender; + let MempoolCacheResource(mempool_cache) = input.mempool_cache; + let sync_state = input.sync_state.map(|state| state.0); + let tree_api_client = input.tree_api_client.map(|client| client.0); // Build server. let mut api_builder = @@ -163,16 +199,15 @@ impl WiringLayer for Web3ServerLayer { // Insert healthcheck. let api_health_check = server.health_check(); - let AppHealthCheckResource(app_health) = context.get_resource_or_default().await; - app_health + input + .app_health + .0 .insert_component(api_health_check) .map_err(WiringError::internal)?; // Insert circuit breaker. - let circuit_breaker_resource = context - .get_resource_or_default::() - .await; - circuit_breaker_resource + input + .circuit_breakers .breakers .insert(Box::new(ReplicationLagChecker { pool: replica_pool, @@ -188,10 +223,10 @@ impl WiringLayer for Web3ServerLayer { task_sender, }; let garbage_collector_task = ApiTaskGarbageCollector { task_receiver }; - context.add_task(Box::new(web3_api_task)); - context.add_task(Box::new(garbage_collector_task)); - - Ok(()) + Ok(Output { + web3_api_task, + garbage_collector_task, + }) } } @@ -206,7 +241,7 @@ impl WiringLayer for Web3ServerLayer { // TODO (QIT-26): Once we switch the codebase to only use the framework, we need to properly refactor the API to only // use abstractions provided by this framework and not spawn any tasks on its own. #[derive(Debug)] -struct Web3ApiTask { +pub struct Web3ApiTask { transport: Transport, server: ApiServer, task_sender: oneshot::Sender>, @@ -236,7 +271,7 @@ impl Task for Web3ApiTask { /// Helper task that waits for a list of task join handles and then awaits them all. /// For more details, see [`Web3ApiTask`]. #[derive(Debug)] -struct ApiTaskGarbageCollector { +pub struct ApiTaskGarbageCollector { task_receiver: oneshot::Receiver>, } @@ -250,7 +285,10 @@ impl Task for ApiTaskGarbageCollector { // We can ignore the stop signal here, since we're tied to the main API task through the channel: // it'll either get dropped if API cannot be built or will send something through the channel. // The tasks it sends are aware of the stop receiver themselves. - let tasks = self.task_receiver.await?; + let Ok(tasks) = self.task_receiver.await else { + // API cannot be built, so there are no tasks to wait for. + return Ok(()); + }; let _ = futures::future::join_all(tasks).await; Ok(()) } diff --git a/core/node/node_framework/src/implementations/layers/web3_api/tree_api_client.rs b/core/node/node_framework/src/implementations/layers/web3_api/tree_api_client.rs index 42166e16b1dd..07371a65131e 100644 --- a/core/node/node_framework/src/implementations/layers/web3_api/tree_api_client.rs +++ b/core/node/node_framework/src/implementations/layers/web3_api/tree_api_client.rs @@ -6,19 +6,34 @@ use crate::{ implementations::resources::{ healthcheck::AppHealthCheckResource, web3_api::TreeApiClientResource, }, - service::ServiceContext, wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, }; -/// Layer that inserts the `TreeApiHttpClient` into the `ServiceContext` resources, if there is no +/// Wiring layer that provides the `TreeApiHttpClient` into the `ServiceContext` resources, if there is no /// other client already inserted. /// -/// In case a client is already provided in the contest, the layer does nothing. +/// In case a client is already provided in the context, this layer does nothing. #[derive(Debug)] pub struct TreeApiClientLayer { url: Option, } +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + /// Fetched to check whether the `TreeApiClientResource` was already provided by another layer. + pub tree_api_client: Option, + #[context(default)] + pub app_health: AppHealthCheckResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + pub tree_api_client: Option, +} + impl TreeApiClientLayer { pub fn http(url: Option) -> Self { Self { url } @@ -27,33 +42,36 @@ impl TreeApiClientLayer { #[async_trait::async_trait] impl WiringLayer for TreeApiClientLayer { + type Input = Input; + type Output = Output; + fn layer_name(&self) -> &'static str { "tree_api_client_layer" } - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { - if let Some(url) = &self.url { - let client = Arc::new(TreeApiHttpClient::new(url)); - match context.insert_resource(TreeApiClientResource(client.clone())) { - Ok(()) => { - // There was no client added before, we added one. - } - Err(WiringError::ResourceAlreadyProvided { .. }) => { - // Some other client was already added. We don't want to replace it. - return Ok(()); - } - err @ Err(_) => { - // Propagate any other error. - return err; - } - } - - // Only provide the health check if necessary. - let AppHealthCheckResource(app_health) = context.get_resource_or_default().await; - app_health - .insert_custom_component(client) - .map_err(WiringError::internal)?; + async fn wire(self, input: Self::Input) -> Result { + if input.tree_api_client.is_some() { + tracing::info!("Tree API client is already provided"); + return Ok(Output { + tree_api_client: None, + }); } - Ok(()) + + let Some(url) = &self.url else { + tracing::info!("No Tree API client URL provided, not adding a fallback client"); + return Ok(Output { + tree_api_client: None, + }); + }; + + let client = Arc::new(TreeApiHttpClient::new(url)); + input + .app_health + .0 + .insert_custom_component(client.clone()) + .map_err(WiringError::internal)?; + Ok(Output { + tree_api_client: Some(client.into()), + }) } } diff --git a/core/node/node_framework/src/implementations/layers/web3_api/tx_sender.rs b/core/node/node_framework/src/implementations/layers/web3_api/tx_sender.rs index 010778315e58..4ece9b024300 100644 --- a/core/node/node_framework/src/implementations/layers/web3_api/tx_sender.rs +++ b/core/node/node_framework/src/implementations/layers/web3_api/tx_sender.rs @@ -1,11 +1,11 @@ -use std::{fmt, sync::Arc, time::Duration}; +use std::{sync::Arc, time::Duration}; use tokio::sync::RwLock; use zksync_node_api_server::{ execution_sandbox::{VmConcurrencyBarrier, VmConcurrencyLimiter}, tx_sender::{ApiContracts, TxSenderBuilder, TxSenderConfig}, }; -use zksync_state::PostgresStorageCaches; +use zksync_state::{PostgresStorageCaches, PostgresStorageCachesTask}; use zksync_types::Address; use zksync_web3_decl::{ client::{DynClient, L2}, @@ -21,9 +21,10 @@ use crate::{ state_keeper::ConditionalSealerResource, web3_api::{TxSenderResource, TxSinkResource}, }, - service::{ServiceContext, StopReceiver}, + service::StopReceiver, task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, }; #[derive(Debug)] @@ -33,6 +34,25 @@ pub struct PostgresStorageCachesConfig { pub latest_values_cache_size: u64, } +/// Wiring layer for the `TxSender`. +/// Prepares the `TxSender` itself, as well as the tasks required for its maintenance. +/// +/// ## Requests resources +/// +/// - `TxSinkResource` +/// - `PoolResource` +/// - `ConditionalSealerResource` (optional) +/// - `FeeInputResource` +/// +/// ## Adds resources +/// +/// - `TxSenderResource` +/// +/// ## Adds tasks +/// +/// - `PostgresStorageCachesTask` +/// - `VmConcurrencyBarrierTask` +/// - `WhitelistedTokensForAaUpdateTask` (optional) #[derive(Debug)] pub struct TxSenderLayer { tx_sender_config: TxSenderConfig, @@ -42,6 +62,28 @@ pub struct TxSenderLayer { whitelisted_tokens_for_aa_cache: bool, } +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub tx_sink: TxSinkResource, + pub replica_pool: PoolResource, + pub fee_input: FeeInputResource, + pub main_node_client: Option, + pub sealer: Option, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + pub tx_sender: TxSenderResource, + #[context(task)] + pub vm_concurrency_barrier: VmConcurrencyBarrier, + #[context(task)] + pub postgres_storage_caches_task: Option, + #[context(task)] + pub whitelisted_tokens_for_aa_update_task: Option, +} + impl TxSenderLayer { pub fn new( tx_sender_config: TxSenderConfig, @@ -70,21 +112,19 @@ impl TxSenderLayer { #[async_trait::async_trait] impl WiringLayer for TxSenderLayer { + type Input = Input; + type Output = Output; + fn layer_name(&self) -> &'static str { "tx_sender_layer" } - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { + async fn wire(self, input: Self::Input) -> Result { // Get required resources. - let tx_sink = context.get_resource::().await?.0; - let pool_resource = context.get_resource::>().await?; - let replica_pool = pool_resource.get().await?; - let sealer = match context.get_resource::().await { - Ok(sealer) => Some(sealer.0), - Err(WiringError::ResourceLacking { .. }) => None, - Err(other) => return Err(other), - }; - let fee_input = context.get_resource::().await?.0; + let tx_sink = input.tx_sink.0; + let replica_pool = input.replica_pool.get().await?; + let sealer = input.sealer.map(|s| s.0); + let fee_input = input.fee_input.0; // Initialize Postgres caches. let factory_deps_capacity = self.postgres_storage_caches_config.factory_deps_cache_size; @@ -95,20 +135,18 @@ impl WiringLayer for TxSenderLayer { let mut storage_caches = PostgresStorageCaches::new(factory_deps_capacity, initial_writes_capacity); - if values_capacity > 0 { - let values_cache_task = storage_caches - .configure_storage_values_cache(values_capacity, replica_pool.clone()); - context.add_task(Box::new(PostgresStorageCachesTask { - task: values_cache_task, - })); - } + let postgres_storage_caches_task = if values_capacity > 0 { + Some( + storage_caches + .configure_storage_values_cache(values_capacity, replica_pool.clone()), + ) + } else { + None + }; // Initialize `VmConcurrencyLimiter`. let (vm_concurrency_limiter, vm_concurrency_barrier) = VmConcurrencyLimiter::new(self.max_vm_concurrency); - context.add_task(Box::new(VmConcurrencyBarrierTask { - barrier: vm_concurrency_barrier, - })); // Build `TxSender`. let mut tx_sender = TxSenderBuilder::new(self.tx_sender_config, replica_pool, tx_sink); @@ -117,15 +155,23 @@ impl WiringLayer for TxSenderLayer { } // Add the task for updating the whitelisted tokens for the AA cache. - if self.whitelisted_tokens_for_aa_cache { - let MainNodeClientResource(main_node_client) = context.get_resource().await?; + let whitelisted_tokens_for_aa_update_task = if self.whitelisted_tokens_for_aa_cache { + let MainNodeClientResource(main_node_client) = + input.main_node_client.ok_or_else(|| { + WiringError::Configuration( + "Main node client is required for the whitelisted tokens for AA cache" + .into(), + ) + })?; let whitelisted_tokens = Arc::new(RwLock::new(Default::default())); - context.add_task(Box::new(WhitelistedTokensForAaUpdateTask { + tx_sender = tx_sender.with_whitelisted_tokens_for_aa(whitelisted_tokens.clone()); + Some(WhitelistedTokensForAaUpdateTask { whitelisted_tokens: whitelisted_tokens.clone(), main_node_client, - })); - tx_sender = tx_sender.with_whitelisted_tokens_for_aa(whitelisted_tokens); - } + }) + } else { + None + }; let tx_sender = tx_sender.build( fee_input, @@ -133,20 +179,13 @@ impl WiringLayer for TxSenderLayer { self.api_contracts, storage_caches, ); - context.insert_resource(TxSenderResource(tx_sender))?; - Ok(()) - } -} - -struct PostgresStorageCachesTask { - task: zksync_state::PostgresStorageCachesTask, -} - -impl fmt::Debug for PostgresStorageCachesTask { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("PostgresStorageCachesTask") - .finish_non_exhaustive() + Ok(Output { + tx_sender: tx_sender.into(), + postgres_storage_caches_task, + vm_concurrency_barrier, + whitelisted_tokens_for_aa_update_task, + }) } } @@ -157,16 +196,12 @@ impl Task for PostgresStorageCachesTask { } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { - self.task.run(stop_receiver.0).await + (*self).run(stop_receiver.0).await } } -struct VmConcurrencyBarrierTask { - barrier: VmConcurrencyBarrier, -} - #[async_trait::async_trait] -impl Task for VmConcurrencyBarrierTask { +impl Task for VmConcurrencyBarrier { fn id(&self) -> TaskId { "vm_concurrency_barrier_task".into() } @@ -175,18 +210,18 @@ impl Task for VmConcurrencyBarrierTask { // Wait for the stop signal. stop_receiver.0.changed().await?; // Stop signal was received: seal the barrier so that no new VM requests are accepted. - self.barrier.close(); + self.close(); // Wait until all the existing API requests are processed. // We don't have to synchronize this with API servers being stopped, as they can decide themselves how to handle // ongoing requests during the shutdown. // We don't have to implement a timeout here either, as it'll be handled by the framework itself. - self.barrier.wait_until_stopped().await; + self.wait_until_stopped().await; Ok(()) } } #[derive(Debug)] -struct WhitelistedTokensForAaUpdateTask { +pub struct WhitelistedTokensForAaUpdateTask { whitelisted_tokens: Arc>>, main_node_client: Box>, } diff --git a/core/node/node_framework/src/implementations/layers/web3_api/tx_sink.rs b/core/node/node_framework/src/implementations/layers/web3_api/tx_sink.rs deleted file mode 100644 index 98ed50ba9e45..000000000000 --- a/core/node/node_framework/src/implementations/layers/web3_api/tx_sink.rs +++ /dev/null @@ -1,71 +0,0 @@ -use std::sync::Arc; - -use zksync_node_api_server::tx_sender::{ - master_pool_sink::MasterPoolSink, - proxy::{AccountNonceSweeperTask, TxProxy}, -}; - -use crate::{ - implementations::resources::{ - main_node_client::MainNodeClientResource, - pools::{MasterPool, PoolResource}, - web3_api::TxSinkResource, - }, - service::{ServiceContext, StopReceiver}, - task::{Task, TaskId}, - wiring_layer::{WiringError, WiringLayer}, -}; - -#[derive(Debug)] -#[non_exhaustive] -pub enum TxSinkLayer { - MasterPoolSink, - ProxySink, -} - -#[async_trait::async_trait] -impl WiringLayer for TxSinkLayer { - fn layer_name(&self) -> &'static str { - "tx_sink_layer" - } - - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { - let tx_sink = match self.as_ref() { - TxSinkLayer::MasterPoolSink => { - let pool = context - .get_resource::>() - .await? - .get() - .await?; - TxSinkResource(Arc::new(MasterPoolSink::new(pool))) - } - TxSinkLayer::ProxySink => { - let MainNodeClientResource(client) = context.get_resource().await?; - let proxy = TxProxy::new(client); - - let pool = context - .get_resource::>() - .await? - .get_singleton() - .await?; - let task = proxy.account_nonce_sweeper_task(pool); - context.add_task(Box::new(task)); - - TxSinkResource(Arc::new(proxy)) - } - }; - context.insert_resource(tx_sink)?; - Ok(()) - } -} - -#[async_trait::async_trait] -impl Task for AccountNonceSweeperTask { - fn id(&self) -> TaskId { - "account_nonce_sweeper_task".into() - } - - async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { - (*self).run(stop_receiver.0).await - } -} diff --git a/core/node/node_framework/src/implementations/layers/web3_api/tx_sink/master_pool_sink.rs b/core/node/node_framework/src/implementations/layers/web3_api/tx_sink/master_pool_sink.rs new file mode 100644 index 000000000000..79951a95ab1b --- /dev/null +++ b/core/node/node_framework/src/implementations/layers/web3_api/tx_sink/master_pool_sink.rs @@ -0,0 +1,42 @@ +use zksync_node_api_server::tx_sender::master_pool_sink::MasterPoolSink; + +use crate::{ + implementations::resources::{ + pools::{MasterPool, PoolResource}, + web3_api::TxSinkResource, + }, + wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, +}; + +/// Wiring layer for [`MasterPoolSink`], [`TxSink`](zksync_node_api_server::tx_sender::tx_sink::TxSink) implementation. +pub struct MasterPoolSinkLayer; + +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub master_pool: PoolResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + pub tx_sink: TxSinkResource, +} + +#[async_trait::async_trait] +impl WiringLayer for MasterPoolSinkLayer { + type Input = Input; + type Output = Output; + + fn layer_name(&self) -> &'static str { + "master_pook_sink_layer" + } + + async fn wire(self, input: Self::Input) -> Result { + let pool = input.master_pool.get().await?; + Ok(Output { + tx_sink: MasterPoolSink::new(pool).into(), + }) + } +} diff --git a/core/node/node_framework/src/implementations/layers/web3_api/tx_sink/mod.rs b/core/node/node_framework/src/implementations/layers/web3_api/tx_sink/mod.rs new file mode 100644 index 000000000000..61b9fb1d9e9e --- /dev/null +++ b/core/node/node_framework/src/implementations/layers/web3_api/tx_sink/mod.rs @@ -0,0 +1,4 @@ +pub use self::{master_pool_sink::MasterPoolSinkLayer, proxy_sink::ProxySinkLayer}; + +pub mod master_pool_sink; +pub mod proxy_sink; diff --git a/core/node/node_framework/src/implementations/layers/web3_api/tx_sink/proxy_sink.rs b/core/node/node_framework/src/implementations/layers/web3_api/tx_sink/proxy_sink.rs new file mode 100644 index 000000000000..4340dbdb3f43 --- /dev/null +++ b/core/node/node_framework/src/implementations/layers/web3_api/tx_sink/proxy_sink.rs @@ -0,0 +1,66 @@ +use zksync_node_api_server::tx_sender::proxy::{AccountNonceSweeperTask, TxProxy}; + +use crate::{ + implementations::resources::{ + main_node_client::MainNodeClientResource, + pools::{MasterPool, PoolResource}, + web3_api::TxSinkResource, + }, + service::StopReceiver, + task::{Task, TaskId}, + wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, +}; + +/// Wiring layer for [`TxProxy`], [`TxSink`](zksync_node_api_server::tx_sender::tx_sink::TxSink) implementation. +#[derive(Debug)] +pub struct ProxySinkLayer; + +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub main_node_client: MainNodeClientResource, + pub master_pool: PoolResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + pub tx_sink: TxSinkResource, + #[context(task)] + pub account_nonce_sweeper_task: AccountNonceSweeperTask, +} + +#[async_trait::async_trait] +impl WiringLayer for ProxySinkLayer { + type Input = Input; + type Output = Output; + + fn layer_name(&self) -> &'static str { + "proxy_sink_layer" + } + + async fn wire(self, input: Self::Input) -> Result { + let MainNodeClientResource(client) = input.main_node_client; + let proxy = TxProxy::new(client); + + let pool = input.master_pool.get_singleton().await?; + let task = proxy.account_nonce_sweeper_task(pool); + + Ok(Output { + tx_sink: proxy.into(), + account_nonce_sweeper_task: task, + }) + } +} + +#[async_trait::async_trait] +impl Task for AccountNonceSweeperTask { + fn id(&self) -> TaskId { + "account_nonce_sweeper_task".into() + } + + async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { + (*self).run(stop_receiver.0).await + } +} diff --git a/core/node/node_framework/src/implementations/resources/action_queue.rs b/core/node/node_framework/src/implementations/resources/action_queue.rs index 164f8dca3109..7edb8bad3111 100644 --- a/core/node/node_framework/src/implementations/resources/action_queue.rs +++ b/core/node/node_framework/src/implementations/resources/action_queue.rs @@ -2,6 +2,8 @@ use zksync_node_sync::ActionQueueSender; use crate::resource::{Resource, Unique}; +/// A resource that provides [`ActionQueueSender`] to the service. +/// This resource is unique, e.g. it's expected to be consumed by a single service. #[derive(Debug, Clone)] pub struct ActionQueueSenderResource(pub Unique); @@ -10,3 +12,9 @@ impl Resource for ActionQueueSenderResource { "external_node/action_queue_sender".into() } } + +impl From for ActionQueueSenderResource { + fn from(sender: ActionQueueSender) -> Self { + Self(Unique::new(sender)) + } +} diff --git a/core/node/node_framework/src/implementations/resources/base_token_ratio_provider.rs b/core/node/node_framework/src/implementations/resources/base_token_ratio_provider.rs new file mode 100644 index 000000000000..6699d5dfc70b --- /dev/null +++ b/core/node/node_framework/src/implementations/resources/base_token_ratio_provider.rs @@ -0,0 +1,27 @@ +use std::sync::Arc; + +use zksync_base_token_adjuster::{BaseTokenRatioProvider, NoOpRatioProvider}; + +use crate::resource::Resource; + +/// A resource that provides [`BaseTokenRatioProvider`] implementation to the service. +#[derive(Debug, Clone)] +pub struct BaseTokenRatioProviderResource(pub Arc); + +impl Default for BaseTokenRatioProviderResource { + fn default() -> Self { + Self(Arc::new(NoOpRatioProvider::default())) + } +} + +impl Resource for BaseTokenRatioProviderResource { + fn name() -> String { + "common/base_token_ratio_provider".into() + } +} + +impl From> for BaseTokenRatioProviderResource { + fn from(provider: Arc) -> Self { + Self(provider) + } +} diff --git a/core/node/node_framework/src/implementations/resources/circuit_breakers.rs b/core/node/node_framework/src/implementations/resources/circuit_breakers.rs index 6b9eebb7b96e..038d03a31eb0 100644 --- a/core/node/node_framework/src/implementations/resources/circuit_breakers.rs +++ b/core/node/node_framework/src/implementations/resources/circuit_breakers.rs @@ -4,6 +4,7 @@ use zksync_circuit_breaker::CircuitBreakers; use crate::resource::Resource; +/// A resource that provides [`CircuitBreakers`] to the service. #[derive(Debug, Clone, Default)] pub struct CircuitBreakersResource { pub breakers: Arc, diff --git a/core/node/node_framework/src/implementations/resources/da_client.rs b/core/node/node_framework/src/implementations/resources/da_client.rs new file mode 100644 index 000000000000..51aba6d19d4e --- /dev/null +++ b/core/node/node_framework/src/implementations/resources/da_client.rs @@ -0,0 +1,13 @@ +use zksync_da_client::DataAvailabilityClient; + +use crate::resource::Resource; + +/// Represents a client of a certain DA solution. +#[derive(Debug, Clone)] +pub struct DAClientResource(pub Box); + +impl Resource for DAClientResource { + fn name() -> String { + "common/da_client".into() + } +} diff --git a/core/node/node_framework/src/implementations/resources/eth_interface.rs b/core/node/node_framework/src/implementations/resources/eth_interface.rs index 7a72abd11a9a..cf470c0379da 100644 --- a/core/node/node_framework/src/implementations/resources/eth_interface.rs +++ b/core/node/node_framework/src/implementations/resources/eth_interface.rs @@ -3,6 +3,7 @@ use zksync_web3_decl::client::{DynClient, L1}; use crate::resource::Resource; +/// A resource that provides L1 interface object to the service. #[derive(Debug, Clone)] pub struct EthInterfaceResource(pub Box>); @@ -12,6 +13,7 @@ impl Resource for EthInterfaceResource { } } +/// A resource that provides L1 interface with signing capabilities to the service. #[derive(Debug, Clone)] pub struct BoundEthInterfaceResource(pub Box); @@ -21,6 +23,7 @@ impl Resource for BoundEthInterfaceResource { } } +/// Same as `BoundEthInterfaceResource`, but for managing EIP-4844 blobs. #[derive(Debug, Clone)] pub struct BoundEthInterfaceForBlobsResource(pub Box); diff --git a/core/node/node_framework/src/implementations/resources/fee_input.rs b/core/node/node_framework/src/implementations/resources/fee_input.rs index fbbf6be3db8c..10271977bac7 100644 --- a/core/node/node_framework/src/implementations/resources/fee_input.rs +++ b/core/node/node_framework/src/implementations/resources/fee_input.rs @@ -4,7 +4,7 @@ use zksync_node_fee_model::BatchFeeModelInputProvider; use crate::resource::Resource; -/// Wrapper for the batch fee model input provider. +/// A resource that provides [`BatchFeeModelInputProvider`] implementation to the service. #[derive(Debug, Clone)] pub struct FeeInputResource(pub Arc); @@ -13,3 +13,9 @@ impl Resource for FeeInputResource { "common/fee_input".into() } } + +impl From> for FeeInputResource { + fn from(provider: Arc) -> Self { + Self(provider) + } +} diff --git a/core/node/node_framework/src/implementations/resources/healthcheck.rs b/core/node/node_framework/src/implementations/resources/healthcheck.rs index e1df3ada8f30..b4810eba46be 100644 --- a/core/node/node_framework/src/implementations/resources/healthcheck.rs +++ b/core/node/node_framework/src/implementations/resources/healthcheck.rs @@ -6,6 +6,7 @@ pub use zksync_health_check::{CheckHealth, ReactiveHealthCheck}; use crate::resource::Resource; +/// A resource that provides [`AppHealthCheck`] to the service. #[derive(Debug, Clone, Default)] pub struct AppHealthCheckResource(pub Arc); diff --git a/core/node/node_framework/src/implementations/resources/l1_tx_params.rs b/core/node/node_framework/src/implementations/resources/l1_tx_params.rs index 57ba1f4af165..676828c39885 100644 --- a/core/node/node_framework/src/implementations/resources/l1_tx_params.rs +++ b/core/node/node_framework/src/implementations/resources/l1_tx_params.rs @@ -4,7 +4,7 @@ use zksync_node_fee_model::l1_gas_price::L1TxParamsProvider; use crate::resource::Resource; -/// Wrapper for the l1 tx params provider. +/// A resource that provides [`L1TxParamsProvider`] implementation to the service. #[derive(Debug, Clone)] pub struct L1TxParamsResource(pub Arc); @@ -13,3 +13,9 @@ impl Resource for L1TxParamsResource { "common/l1_tx_params".into() } } + +impl From> for L1TxParamsResource { + fn from(provider: Arc) -> Self { + Self(provider) + } +} diff --git a/core/node/node_framework/src/implementations/resources/main_node_client.rs b/core/node/node_framework/src/implementations/resources/main_node_client.rs index 903a6ce9b9be..491d39726ea9 100644 --- a/core/node/node_framework/src/implementations/resources/main_node_client.rs +++ b/core/node/node_framework/src/implementations/resources/main_node_client.rs @@ -2,6 +2,7 @@ use zksync_web3_decl::client::{DynClient, L2}; use crate::resource::Resource; +/// A resource that provides L2 interface object to the service. #[derive(Debug, Clone)] pub struct MainNodeClientResource(pub Box>); @@ -10,3 +11,9 @@ impl Resource for MainNodeClientResource { "external_node/main_node_client".into() } } + +impl>>> From for MainNodeClientResource { + fn from(client: T) -> Self { + Self(client.into()) + } +} diff --git a/core/node/node_framework/src/implementations/resources/mod.rs b/core/node/node_framework/src/implementations/resources/mod.rs index edfb280d4db7..4f82f4c3a911 100644 --- a/core/node/node_framework/src/implementations/resources/mod.rs +++ b/core/node/node_framework/src/implementations/resources/mod.rs @@ -1,5 +1,7 @@ pub mod action_queue; +pub mod base_token_ratio_provider; pub mod circuit_breakers; +pub mod da_client; pub mod eth_interface; pub mod fee_input; pub mod healthcheck; @@ -7,6 +9,7 @@ pub mod l1_tx_params; pub mod main_node_client; pub mod object_store; pub mod pools; +pub mod price_api_client; pub mod reverter; pub mod state_keeper; pub mod sync_state; diff --git a/core/node/node_framework/src/implementations/resources/object_store.rs b/core/node/node_framework/src/implementations/resources/object_store.rs index d53c7540c793..fbfc20d93180 100644 --- a/core/node/node_framework/src/implementations/resources/object_store.rs +++ b/core/node/node_framework/src/implementations/resources/object_store.rs @@ -4,7 +4,7 @@ use zksync_object_store::ObjectStore; use crate::resource::Resource; -/// Wrapper for the object store. +/// A resource that provides [`ObjectStore`] to the service. #[derive(Debug, Clone)] pub struct ObjectStoreResource(pub Arc); diff --git a/core/node/node_framework/src/implementations/resources/pools.rs b/core/node/node_framework/src/implementations/resources/pools.rs index b33933f83e21..8355bb1bdd62 100644 --- a/core/node/node_framework/src/implementations/resources/pools.rs +++ b/core/node/node_framework/src/implementations/resources/pools.rs @@ -6,10 +6,10 @@ use std::{ time::Duration, }; -use prover_dal::Prover; use tokio::sync::Mutex; use zksync_dal::{ConnectionPool, Core}; use zksync_db_connection::connection_pool::ConnectionPoolBuilder; +use zksync_prover_dal::Prover; use zksync_types::url::SensitiveUrl; use crate::resource::Resource; diff --git a/core/node/node_framework/src/implementations/resources/price_api_client.rs b/core/node/node_framework/src/implementations/resources/price_api_client.rs new file mode 100644 index 000000000000..6543120a26c1 --- /dev/null +++ b/core/node/node_framework/src/implementations/resources/price_api_client.rs @@ -0,0 +1,27 @@ +use std::sync::Arc; + +use zksync_external_price_api::{NoOpPriceAPIClient, PriceAPIClient}; + +use crate::resource::Resource; + +/// A resource that provides [`PriceAPIClient`] implementation to the service. +#[derive(Debug, Clone)] +pub struct PriceAPIClientResource(pub Arc); + +impl Default for PriceAPIClientResource { + fn default() -> Self { + Self(Arc::new(NoOpPriceAPIClient)) + } +} + +impl Resource for PriceAPIClientResource { + fn name() -> String { + "common/price_api_client".into() + } +} + +impl From> for PriceAPIClientResource { + fn from(provider: Arc) -> Self { + Self(provider) + } +} diff --git a/core/node/node_framework/src/implementations/resources/reverter.rs b/core/node/node_framework/src/implementations/resources/reverter.rs index 2a2bdb142a85..9186c727800f 100644 --- a/core/node/node_framework/src/implementations/resources/reverter.rs +++ b/core/node/node_framework/src/implementations/resources/reverter.rs @@ -1,15 +1,19 @@ -use std::sync::Arc; - use zksync_block_reverter::BlockReverter; -use crate::resource::Resource; +use crate::resource::{Resource, Unique}; -/// Wrapper for the block reverter. +/// A resource that provides [`BlockReverter`] to the service. #[derive(Debug, Clone)] -pub struct BlockReverterResource(pub Arc); +pub struct BlockReverterResource(pub Unique); impl Resource for BlockReverterResource { fn name() -> String { "common/block_reverter".into() } } + +impl From for BlockReverterResource { + fn from(reverter: BlockReverter) -> Self { + Self(Unique::new(reverter)) + } +} diff --git a/core/node/node_framework/src/implementations/resources/state_keeper.rs b/core/node/node_framework/src/implementations/resources/state_keeper.rs index 804822154497..5db570d7989b 100644 --- a/core/node/node_framework/src/implementations/resources/state_keeper.rs +++ b/core/node/node_framework/src/implementations/resources/state_keeper.rs @@ -6,6 +6,8 @@ use zksync_state_keeper::{ use crate::resource::{Resource, Unique}; +/// A resource that provides [`StateKeeperIO`] implementation to the service. +/// This resource is unique, e.g. it's expected to be consumed by a single service. #[derive(Debug, Clone)] pub struct StateKeeperIOResource(pub Unique>); @@ -15,6 +17,14 @@ impl Resource for StateKeeperIOResource { } } +impl From for StateKeeperIOResource { + fn from(io: T) -> Self { + Self(Unique::new(Box::new(io))) + } +} + +/// A resource that provides [`BatchExecutor`] implementation to the service. +/// This resource is unique, e.g. it's expected to be consumed by a single service. #[derive(Debug, Clone)] pub struct BatchExecutorResource(pub Unique>); @@ -24,6 +34,14 @@ impl Resource for BatchExecutorResource { } } +impl From for BatchExecutorResource { + fn from(executor: T) -> Self { + Self(Unique::new(Box::new(executor))) + } +} + +/// A resource that provides [`OutputHandler`] implementation to the service. +/// This resource is unique, e.g. it's expected to be consumed by a single service. #[derive(Debug, Clone)] pub struct OutputHandlerResource(pub Unique); @@ -33,6 +51,13 @@ impl Resource for OutputHandlerResource { } } +impl From for OutputHandlerResource { + fn from(handler: OutputHandler) -> Self { + Self(Unique::new(handler)) + } +} + +/// A resource that provides [`ConditionalSealer`] implementation to the service. #[derive(Debug, Clone)] pub struct ConditionalSealerResource(pub Arc); @@ -41,3 +66,12 @@ impl Resource for ConditionalSealerResource { "state_keeper/conditional_sealer".into() } } + +impl From for ConditionalSealerResource +where + T: ConditionalSealer + 'static, +{ + fn from(sealer: T) -> Self { + Self(Arc::new(sealer)) + } +} diff --git a/core/node/node_framework/src/implementations/resources/sync_state.rs b/core/node/node_framework/src/implementations/resources/sync_state.rs index 87eb565ac077..d2854d187672 100644 --- a/core/node/node_framework/src/implementations/resources/sync_state.rs +++ b/core/node/node_framework/src/implementations/resources/sync_state.rs @@ -2,11 +2,18 @@ use zksync_node_sync::SyncState; use crate::resource::Resource; +/// A resource that provides [`SyncState`] to the service. #[derive(Debug, Clone)] pub struct SyncStateResource(pub SyncState); impl Resource for SyncStateResource { fn name() -> String { - "sync_state".into() + "common/sync_state".into() + } +} + +impl From for SyncStateResource { + fn from(sync_state: SyncState) -> Self { + Self(sync_state) } } diff --git a/core/node/node_framework/src/implementations/resources/web3_api.rs b/core/node/node_framework/src/implementations/resources/web3_api.rs index ba555ccca0ef..78340884a1b4 100644 --- a/core/node/node_framework/src/implementations/resources/web3_api.rs +++ b/core/node/node_framework/src/implementations/resources/web3_api.rs @@ -8,6 +8,7 @@ use zksync_node_api_server::{ use crate::resource::Resource; +/// A resource that provides [`TxSender`] to the service. #[derive(Debug, Clone)] pub struct TxSenderResource(pub TxSender); @@ -17,6 +18,13 @@ impl Resource for TxSenderResource { } } +impl From for TxSenderResource { + fn from(sender: TxSender) -> Self { + Self(sender) + } +} + +/// A resource that provides [`TxSink`] implementation to the service. #[derive(Debug, Clone)] pub struct TxSinkResource(pub Arc); @@ -26,6 +34,13 @@ impl Resource for TxSinkResource { } } +impl From for TxSinkResource { + fn from(sink: T) -> Self { + Self(Arc::new(sink)) + } +} + +/// A resource that provides [`TreeApiClient`] implementation to the service. #[derive(Debug, Clone)] pub struct TreeApiClientResource(pub Arc); @@ -35,6 +50,13 @@ impl Resource for TreeApiClientResource { } } +impl From> for TreeApiClientResource { + fn from(client: Arc) -> Self { + Self(client) + } +} + +/// A resource that provides [`MempoolCache`] to the service. #[derive(Debug, Clone)] pub struct MempoolCacheResource(pub MempoolCache); @@ -43,3 +65,9 @@ impl Resource for MempoolCacheResource { "api/mempool_cache".into() } } + +impl From for MempoolCacheResource { + fn from(cache: MempoolCache) -> Self { + Self(cache) + } +} diff --git a/core/node/node_framework/src/lib.rs b/core/node/node_framework/src/lib.rs index 4f688ab56adb..633086103fb4 100644 --- a/core/node/node_framework/src/lib.rs +++ b/core/node/node_framework/src/lib.rs @@ -1,26 +1,29 @@ //! # ZK Stack node initialization framework. //! -//! ## Introduction -//! //! This crate provides core abstractions that allow one to compose a ZK Stack node. //! Main concepts used in this crate are: //! - [`WiringLayer`](wiring_layer::WiringLayer) - builder interface for tasks. //! - [`Task`](task::Task) - a unit of work that can be executed by the node. //! - [`Resource`](resource::Resource) - a piece of logic that can be shared between tasks. Most resources are //! represented by generic interfaces and also serve as points of customization for tasks. -//! - [`ResourceProvider`](resource::ResourceProvider) - a trait that allows one to provide resources to the node. //! - [`ZkStackService`](service::ZkStackService) - a container for tasks and resources that takes care of initialization, running //! and shutting down. -//! -//! The general flow to compose a node is as follows: -//! - Create a [`ResourceProvider`](resource::ResourceProvider) that can provide all the resources that the node needs. -//! - Create a [`ZkStackService`](node::ZkStackService) with that [`ResourceProvider`](resource::ResourceProvider). -//! - Add tasks to the node. -//! - Run it. +//! - [`ZkStackServiceBuilder`](service::ZkStackServiceBuilder) - a builder for the service. pub mod implementations; -pub mod precondition; pub mod resource; pub mod service; pub mod task; pub mod wiring_layer; + +/// Derive macro for the `FromContext` trait. +pub use zksync_node_framework_derive::FromContext; +/// Derive macro for the `IntoContext` trait. +pub use zksync_node_framework_derive::IntoContext; + +pub use self::{ + resource::Resource, + service::{FromContext, IntoContext, StopReceiver}, + task::{Task, TaskId}, + wiring_layer::{WiringError, WiringLayer}, +}; diff --git a/core/node/node_framework/src/precondition.rs b/core/node/node_framework/src/precondition.rs deleted file mode 100644 index a612c5b90a8b..000000000000 --- a/core/node/node_framework/src/precondition.rs +++ /dev/null @@ -1,33 +0,0 @@ -use std::sync::Arc; - -use tokio::sync::Barrier; - -use crate::{service::StopReceiver, task::TaskId}; - -#[async_trait::async_trait] -pub trait Precondition: 'static + Send + Sync { - /// Unique name of the precondition. - fn id(&self) -> TaskId; - - async fn check(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()>; -} - -impl dyn Precondition { - /// An internal helper method that runs a precondition check and lifts the barrier as soon - /// as the check is finished. - pub(super) async fn check_with_barrier( - self: Box, - mut stop_receiver: StopReceiver, - preconditions_barrier: Arc, - ) -> anyhow::Result<()> { - self.check(stop_receiver.clone()).await?; - tokio::select! { - _ = preconditions_barrier.wait() => { - Ok(()) - } - _ = stop_receiver.0.changed() => { - Ok(()) - } - } - } -} diff --git a/core/node/node_framework/src/resource/lazy_resource.rs b/core/node/node_framework/src/resource/lazy_resource.rs deleted file mode 100644 index 3f70187627b8..000000000000 --- a/core/node/node_framework/src/resource/lazy_resource.rs +++ /dev/null @@ -1,194 +0,0 @@ -use std::sync::Arc; - -use thiserror::Error; -use tokio::sync::watch; - -use super::Resource; -use crate::service::StopReceiver; - -/// A lazy resource represents a resource that isn't available at the time when the tasks start. -/// -/// Normally it's used to represent the resources that should be provided by one task to another one. -/// Lazy resources are aware of the node lifecycle, so attempt to resolve the resource won't hang -/// if the resource is never provided: the resolve future will fail once the stop signal is sent by the node. -#[derive(Debug)] -pub struct LazyResource { - resolve_sender: Arc>>, - stop_receiver: StopReceiver, -} - -impl Resource for LazyResource { - fn name() -> String { - format!("lazy {}", T::name()) - } -} - -impl Clone for LazyResource { - fn clone(&self) -> Self { - Self { - resolve_sender: self.resolve_sender.clone(), - stop_receiver: self.stop_receiver.clone(), - } - } -} - -impl LazyResource { - /// Creates a new lazy resource. - /// Provided stop receiver will be used to prevent resolving from hanging if the resource is never provided. - pub fn new(stop_receiver: StopReceiver) -> Self { - let (resolve_sender, _resolve_receiver) = watch::channel(None); - - Self { - resolve_sender: Arc::new(resolve_sender), - stop_receiver, - } - } - - /// Returns a future that resolves to the resource once it is provided. - /// If the resource is never provided, the method will return an error once the node is shutting down. - pub async fn resolve(mut self) -> Result { - let mut resolve_receiver = self.resolve_sender.subscribe(); - if let Some(resource) = resolve_receiver.borrow().as_ref() { - return Ok(resource.clone()); - } - - let result = tokio::select! { - _ = self.stop_receiver.0.changed() => { - Err(LazyResourceError::NodeShutdown) - } - _ = resolve_receiver.changed() => { - // ^ we can ignore the error on `changed`, since we hold a strong reference to the sender. - let resource = resolve_receiver.borrow().as_ref().expect("Can only change if provided").clone(); - Ok(resource) - } - }; - - if result.is_ok() { - tracing::info!("Lazy resource {} has been resolved", T::name()); - } - - result - } - - /// Provides the resource. - /// May be called at most once. Subsequent calls will return an error. - pub async fn provide(&mut self, resource: T) -> Result<(), LazyResourceError> { - let sent = self.resolve_sender.send_if_modified(|current| { - if current.is_some() { - return false; - } - *current = Some(resource.clone()); - true - }); - - if !sent { - return Err(LazyResourceError::ResourceAlreadyProvided); - } - - tracing::info!("Lazy resource {} has been provided", T::name()); - - Ok(()) - } -} - -#[derive(Debug, Error, PartialEq)] -pub enum LazyResourceError { - #[error("Node is shutting down")] - NodeShutdown, - #[error("Resource is already provided")] - ResourceAlreadyProvided, -} - -#[cfg(test)] -mod tests { - use super::*; - - #[derive(Debug, Clone, PartialEq)] - struct TestResource(Arc); - - impl Resource for TestResource { - fn name() -> String { - "test_resource".into() - } - } - - struct TestContext { - test_resource: TestResource, - lazy_resource: LazyResource, - stop_sender: watch::Sender, - } - - impl TestContext { - fn new() -> Self { - let (stop_sender, stop_receiver) = watch::channel(false); - Self { - test_resource: TestResource(Arc::new(1)), - lazy_resource: LazyResource::::new(StopReceiver(stop_receiver)), - stop_sender, - } - } - } - - #[tokio::test] - async fn test_already_provided_resource_case() { - let TestContext { - test_resource, - lazy_resource, - stop_sender: _, - } = TestContext::new(); - - lazy_resource - .clone() - .provide(test_resource.clone()) - .await - .unwrap(); - - assert_eq!( - lazy_resource.clone().provide(test_resource.clone()).await, - Err(LazyResourceError::ResourceAlreadyProvided), - "Incorrect result for providing same resource twice" - ); - } - - #[tokio::test] - async fn test_successful_resolve_case() { - let TestContext { - test_resource, - lazy_resource, - stop_sender: _, - } = TestContext::new(); - - lazy_resource - .clone() - .provide(test_resource.clone()) - .await - .unwrap(); - - assert_eq!( - lazy_resource.clone().resolve().await, - Ok(test_resource.clone()), - "Incorrect result for resolving the resource before node shutdown" - ); - } - - #[tokio::test] - async fn test_node_shutdown_case() { - let TestContext { - test_resource: _, - lazy_resource, - stop_sender, - } = TestContext::new(); - - let resolve_task = tokio::spawn(async move { lazy_resource.resolve().await }); - - stop_sender.send(true).unwrap(); - - let result = resolve_task.await.unwrap(); - - assert_eq!( - result, - Err(LazyResourceError::NodeShutdown), - "Incorrect result for resolving the resource after the node shutdown" - ); - } -} diff --git a/core/node/node_framework/src/resource/mod.rs b/core/node/node_framework/src/resource/mod.rs index cf000acf8bb0..5ddc7ba2e45e 100644 --- a/core/node/node_framework/src/resource/mod.rs +++ b/core/node/node_framework/src/resource/mod.rs @@ -1,12 +1,7 @@ use std::{any::TypeId, fmt}; -pub use self::{ - lazy_resource::LazyResource, resource_collection::ResourceCollection, resource_id::ResourceId, - unique::Unique, -}; +pub use self::{resource_id::ResourceId, unique::Unique}; -mod lazy_resource; -mod resource_collection; mod resource_id; mod unique; @@ -14,9 +9,39 @@ mod unique; /// Typically, the type that implements this trait also should implement `Clone` /// since the same resource may be requested by several tasks and thus it would be an additional /// bound on most methods that work with [`Resource`]. +/// +/// # Example +/// +/// ``` +/// # use zksync_node_framework::resource::Resource; +/// # use std::sync::Arc; +/// +/// /// An abstract interface you want to share. +/// /// Normally you want the interface to be thread-safe. +/// trait MyInterface: 'static + Send + Sync { +/// fn do_something(&self); +/// } +/// +/// /// Resource wrapper. +/// #[derive(Clone)] +/// struct MyResource(Arc); +/// +/// impl Resource for MyResource { +/// fn name() -> String { +/// // It is a helpful practice to follow a structured naming pattern for resource names. +/// // For example, you can use a certain prefix for all resources related to a some component, e.g. `api`. +/// "common/my_resource".to_string() +/// } +/// } +/// ``` pub trait Resource: 'static + Send + Sync + std::any::Any { + /// Invoked after the wiring phase of the service is done. + /// Can be used to perform additional resource preparation, knowing that the resource + /// is guaranteed to be requested by all the tasks that need it. fn on_resource_wired(&mut self) {} + /// Returns the name of the resource. + /// Used for logging purposes. fn name() -> String; } @@ -26,10 +51,10 @@ pub trait Resource: 'static + Send + Sync + std::any::Any { /// This trait is implemented for any type that implements [`Resource`], so there is no need to /// implement it manually. pub(crate) trait StoredResource: 'static + std::any::Any + Send + Sync { - /// An object-safe version of [`Resource::resource_id`]. + /// An object-safe version of [`Resource::name`]. fn stored_resource_id(&self) -> ResourceId; - /// An object-safe version of [`Resource::on_resoure_wired`]. + /// An object-safe version of [`Resource::on_resource_wired`]. fn stored_resource_wired(&mut self); } diff --git a/core/node/node_framework/src/resource/resource_collection.rs b/core/node/node_framework/src/resource/resource_collection.rs deleted file mode 100644 index 7f867f236d95..000000000000 --- a/core/node/node_framework/src/resource/resource_collection.rs +++ /dev/null @@ -1,172 +0,0 @@ -use std::{ - fmt, - sync::{Arc, Mutex}, -}; - -use thiserror::Error; -use tokio::sync::watch; - -use super::Resource; - -/// Collection of resources that can be extended during the initialization phase, and then resolved once -/// the wiring is complete. -/// -/// During component initialization, resource collections can be requested by the components in order to push new -/// elements there. Once the initialization is complete, it is no longer possible to push new elements, and the -/// collection can be resolved into a vector of resources. -/// -/// Collections implement `Clone`, so they can be consumed by several tasks. Every task that resolves the collection -/// is guaranteed to have the same set of resources. -/// -/// The purpose of this container is to allow different tasks to register their resource in a single place for some -/// other task to consume. For example, tasks may register their healthchecks, and then healthcheck task will observe -/// all the provided healthchecks. -pub struct ResourceCollection { - /// Collection of the resources. - resources: Arc>>, - /// Sender indicating that the wiring is complete. - wiring_complete_sender: Arc>, - /// Flag indicating that the collection has been resolved. - wired: watch::Receiver, -} - -impl Resource for ResourceCollection { - fn on_resource_wired(&mut self) { - self.wiring_complete_sender.send(true).ok(); - } - - fn name() -> String { - format!("collection of {}", T::name()) - } -} - -impl Default for ResourceCollection { - fn default() -> Self { - Self::new() - } -} - -impl Clone for ResourceCollection { - fn clone(&self) -> Self { - Self { - resources: self.resources.clone(), - wiring_complete_sender: self.wiring_complete_sender.clone(), - wired: self.wired.clone(), - } - } -} - -impl fmt::Debug for ResourceCollection { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("ResourceCollection") - .field("resources", &"{..}") - .finish_non_exhaustive() - } -} - -#[derive(Debug, Error)] -pub enum ResourceCollectionError { - #[error("Adding resources to the collection is not allowed after the wiring is complete")] - AlreadyWired, -} - -impl ResourceCollection { - pub(crate) fn new() -> Self { - let (wiring_complete_sender, wired) = watch::channel(false); - Self { - resources: Arc::default(), - wiring_complete_sender: Arc::new(wiring_complete_sender), - wired, - } - } - - /// Adds a new element to the resource collection. - /// Returns an error if the wiring is already complete. - pub fn push(&self, resource: T) -> Result<(), ResourceCollectionError> { - // This check is sufficient, since no task is guaranteed to be running when the value changes. - if *self.wired.borrow() { - return Err(ResourceCollectionError::AlreadyWired); - } - - let mut handle = self.resources.lock().unwrap(); - handle.push(resource); - tracing::info!( - "A new item has been added to the resource collection {}", - Self::name() - ); - Ok(()) - } - - /// Waits until the wiring is complete, and resolves the collection into a vector of resources. - pub async fn resolve(mut self) -> Vec { - // Guaranteed not to hang on server shutdown, since the node will invoke the `on_wiring_complete` before any task - // is actually spawned (per framework rules). For most cases, this check will resolve immediately, unless - // some tasks would spawn something from the `IntoZkSyncTask` impl. - self.wired.changed().await.expect("Sender can't be dropped"); - - tracing::info!("Resource collection {} has been resolved", Self::name()); - - let handle = self.resources.lock().unwrap(); - (*handle).clone() - } -} - -#[cfg(test)] -mod tests { - use assert_matches::assert_matches; - use futures::FutureExt; - - use super::*; - - #[derive(Debug, Clone, PartialEq)] - struct TestResource(Arc); - - impl Resource for TestResource { - fn name() -> String { - "test_resource".into() - } - } - - #[test] - fn test_push() { - let collection = ResourceCollection::::new(); - let resource1 = TestResource(Arc::new(1)); - collection.clone().push(resource1.clone()).unwrap(); - - let resource2 = TestResource(Arc::new(2)); - collection.clone().push(resource2.clone()).unwrap(); - - assert_eq!( - *collection.resources.lock().unwrap(), - vec![resource1, resource2] - ); - } - - #[test] - fn test_already_wired() { - let mut collection = ResourceCollection::::new(); - let resource = TestResource(Arc::new(1)); - - let rc_clone = collection.clone(); - - collection.on_resource_wired(); - - assert_matches!( - rc_clone.push(resource), - Err(ResourceCollectionError::AlreadyWired) - ); - } - - #[test] - fn test_resolve() { - let mut collection = ResourceCollection::::new(); - let result = collection.clone().resolve().now_or_never(); - - assert!(result.is_none()); - - collection.on_resource_wired(); - - let resolved = collection.resolve().now_or_never(); - assert_eq!(resolved.unwrap(), vec![]); - } -} diff --git a/core/node/node_framework/src/resource/unique.rs b/core/node/node_framework/src/resource/unique.rs index 9a256d8f55f3..5c9bdcfe0e12 100644 --- a/core/node/node_framework/src/resource/unique.rs +++ b/core/node/node_framework/src/resource/unique.rs @@ -29,6 +29,7 @@ impl Unique { } /// Takes the resource from the container. + /// Will return `None` if the resource was already taken. pub fn take(&self) -> Option { let result = self.inner.lock().unwrap().take(); diff --git a/core/node/node_framework/src/service/context.rs b/core/node/node_framework/src/service/context.rs index 81d094630c32..8197fdfa9d7f 100644 --- a/core/node/node_framework/src/service/context.rs +++ b/core/node/node_framework/src/service/context.rs @@ -1,15 +1,17 @@ use std::any::type_name; +use super::shutdown_hook::ShutdownHook; use crate::{ - precondition::Precondition, resource::{Resource, ResourceId, StoredResource}, - service::ZkStackService, - task::{OneshotTask, Task, UnconstrainedOneshotTask, UnconstrainedTask}, + service::{named_future::NamedFuture, ZkStackService}, + task::Task, wiring_layer::WiringError, }; -/// An interface to the service's resources provided to the tasks during initialization. -/// Provides the ability to fetch required resources, and also gives access to the Tokio runtime handle. +/// An interface to the service provided to the tasks during initialization. +/// This the main point of interaction between with the service. +/// +/// The context provides access to the runtime, resources, and allows adding new tasks. #[derive(Debug)] pub struct ServiceContext<'a> { layer: &'a str, @@ -17,16 +19,26 @@ pub struct ServiceContext<'a> { } impl<'a> ServiceContext<'a> { + /// Instantiates a new context. + /// The context keeps information about the layer that created it for reporting purposes. pub(super) fn new(layer: &'a str, service: &'a mut ZkStackService) -> Self { Self { layer, service } } /// Provides access to the runtime used by the service. + /// /// Can be used to spawn additional tasks within the same runtime. - /// If some tasks stores the handle to spawn additional tasks, it is expected to do all the required + /// If some task stores the handle to spawn additional tasks, it is expected to do all the required /// cleanup. /// - /// In most cases, however, it is recommended to use [`add_task`] method instead. + /// In most cases, however, it is recommended to use [`add_task`](ServiceContext::add_task) or its alternative + /// instead. + /// + /// ## Note + /// + /// While `tokio::spawn` and `tokio::spawn_blocking` will work as well, using the runtime handle + /// from the context is still a recommended way to get access to runtime, as it tracks the access + /// to the runtimes by layers. pub fn runtime_handle(&self) -> &tokio::runtime::Handle { tracing::info!( "Layer {} has requested access to the Tokio runtime", @@ -36,73 +48,42 @@ impl<'a> ServiceContext<'a> { } /// Adds a task to the service. + /// /// Added tasks will be launched after the wiring process will be finished and all the preconditions /// are met. - pub fn add_task(&mut self, task: Box) -> &mut Self { + pub fn add_task(&mut self, task: T) -> &mut Self { tracing::info!("Layer {} has added a new task: {}", self.layer, task.id()); - self.service.runnables.tasks.push(task); - self - } - - /// Adds an unconstrained task to the service. - /// Unconstrained tasks will be launched immediately after the wiring process is finished. - pub fn add_unconstrained_task(&mut self, task: Box) -> &mut Self { - tracing::info!( - "Layer {} has added a new unconstrained task: {}", - self.layer, - task.id() - ); - self.service.runnables.unconstrained_tasks.push(task); - self - } - - /// Adds a precondition to the service. - pub fn add_precondition(&mut self, precondition: Box) -> &mut Self { - tracing::info!( - "Layer {} has added a new precondition: {}", - self.layer, - precondition.id() - ); - self.service.runnables.preconditions.push(precondition); + self.service.runnables.tasks.push(Box::new(task)); self } - /// Adds an oneshot task to the service. - pub fn add_oneshot_task(&mut self, task: Box) -> &mut Self { - tracing::info!( - "Layer {} has added a new oneshot task: {}", - self.layer, - task.id() - ); - self.service.runnables.oneshot_tasks.push(task); - self - } - - /// Adds an unconstrained oneshot task to the service. - pub fn add_unconstrained_oneshot_task( - &mut self, - task: Box, - ) -> &mut Self { + /// Adds a future to be invoked after node shutdown. + /// May be used to perform cleanup tasks. + /// + /// The future is guaranteed to only be polled after all the node tasks are stopped or timed out. + /// All the futures will be awaited sequentially. + pub fn add_shutdown_hook(&mut self, hook: ShutdownHook) -> &mut Self { tracing::info!( - "Layer {} has added a new unconstrained oneshot task: {}", + "Layer {} has added a new shutdown hook: {}", self.layer, - task.id() + hook.id ); self.service .runnables - .unconstrained_oneshot_tasks - .push(task); + .shutdown_hooks + .push(NamedFuture::new(hook.future, hook.id)); self } - /// Attempts to retrieve the resource with the specified name. - /// Internally the resources are stored as [`std::any::Any`], and this method does the downcasting - /// on behalf of the caller. + /// Attempts to retrieve the resource of the specified type. /// /// ## Panics /// - /// Panics if the resource with the specified name exists, but is not of the requested type. - pub async fn get_resource(&mut self) -> Result { + /// Panics if the resource with the specified [`ResourceId`] exists, but is not of the requested type. + pub fn get_resource(&mut self) -> Result { + // Implementation details: + // Internally the resources are stored as [`std::any::Any`], and this method does the downcasting + // on behalf of the caller. #[allow(clippy::borrowed_box)] let downcast_clone = |resource: &Box| { resource @@ -143,13 +124,13 @@ impl<'a> ServiceContext<'a> { }) } - /// Attempts to retrieve the resource with the specified name. + /// Attempts to retrieve the resource of the specified type. /// If the resource is not available, it is created using the provided closure. - pub async fn get_resource_or_insert_with T>( + pub fn get_resource_or_insert_with T>( &mut self, f: F, ) -> T { - if let Ok(resource) = self.get_resource::().await { + if let Ok(resource) = self.get_resource::() { return resource; } @@ -166,18 +147,19 @@ impl<'a> ServiceContext<'a> { resource } - /// Attempts to retrieve the resource with the specified name. + /// Attempts to retrieve the resource of the specified type. /// If the resource is not available, it is created using `T::default()`. - pub async fn get_resource_or_default(&mut self) -> T { - self.get_resource_or_insert_with(T::default).await + pub fn get_resource_or_default(&mut self) -> T { + self.get_resource_or_insert_with(T::default) } /// Adds a resource to the service. - /// If the resource with the same name is already provided, the method will return an error. + /// + /// If the resource with the same type is already provided, the method will return an error. pub fn insert_resource(&mut self, resource: T) -> Result<(), WiringError> { let id = ResourceId::of::(); if self.service.resources.contains_key(&id) { - tracing::warn!( + tracing::info!( "Layer {} has attempted to provide resource {} of type {}, but it is already available", self.layer, T::name(), diff --git a/core/node/node_framework/src/service/context_traits.rs b/core/node/node_framework/src/service/context_traits.rs new file mode 100644 index 000000000000..129bbb1a00f0 --- /dev/null +++ b/core/node/node_framework/src/service/context_traits.rs @@ -0,0 +1,133 @@ +use crate::{resource::Resource, service::context::ServiceContext, wiring_layer::WiringError}; + +/// Trait used as input for wiring layers, aiming to provide all the resources the layer needs for wiring. +/// +/// For most cases, the most conevenient way to implement this trait is to use the `#[derive(FromContext)]`. +/// Otherwise, the trait has several blanket implementations (including the implementation for `()` and `Option`). +/// +/// # Example +/// +/// ``` +/// use zksync_node_framework::FromContext; +/// # #[derive(Clone)] +/// # struct MandatoryResource; +/// # impl zksync_node_framework::resource::Resource for MandatoryResource { fn name() -> String { "a".into() } } +/// # #[derive(Clone)] +/// # struct OptionalResource; +/// # impl zksync_node_framework::resource::Resource for OptionalResource { fn name() -> String { "b".into() } } +/// # #[derive(Default, Clone)] +/// # struct ResourceWithDefault; +/// # impl zksync_node_framework::resource::Resource for ResourceWithDefault { fn name() -> String { "c".into() } } +/// #[derive(FromContext)] +/// struct MyWiringLayerInput { +/// // The following field _must_ be present in the context. +/// mandatory_resource: MandatoryResource, +/// // The following field is optional. +/// // If will be `None` if there is no such resource in the context. +/// optional_resource: Option, +/// // The following field is guaranteed to fetch the value from the context. +/// // In case the value is missing, a default value will be added to the context. +/// #[context(default)] +/// resource_with_default: ResourceWithDefault, +/// } +/// ``` +pub trait FromContext: Sized { + fn from_context(context: &mut ServiceContext<'_>) -> Result; +} + +impl FromContext for T { + fn from_context(context: &mut ServiceContext<'_>) -> Result { + context.get_resource::() + } +} + +impl FromContext for () { + fn from_context(_context: &mut ServiceContext<'_>) -> Result { + Ok(()) + } +} + +impl FromContext for Option { + fn from_context(context: &mut ServiceContext<'_>) -> Result { + match T::from_context(context) { + Ok(inner) => Ok(Some(inner)), + Err(WiringError::ResourceLacking { .. }) => Ok(None), + Err(err) => Err(err), + } + } +} + +/// Trait used as output for wiring layers, aiming to provide all the resources and tasks the layer creates. +/// +/// For most cases, the most conevenient way to implement this trait is to use the `#[derive(IntoContext)]`. +/// Otherwise, the trait has several blanket implementations (including the implementation for `()` and `Option`). +/// Note, however, that due to the lack of specialization, the blanket implementation for `Option` is not +/// provided. When used in the macro, tasks must be annotated with the `#[context(task)]` attribute. +/// +/// Note: returning a resource that already exists in the context will result in a wiring error. If you need to provide +/// a "substitute" resource, request `Option` of it in the `FromContext` implementation to check whether it's already +/// provided. +/// +/// +/// # Example +/// +/// ``` +/// use zksync_node_framework::IntoContext; +/// # struct MyTask; +/// # #[async_trait::async_trait] +/// # impl zksync_node_framework::task::Task for MyTask { +/// # fn id(&self) -> zksync_node_framework::TaskId { "a".into() } +/// # async fn run(self: Box, _: zksync_node_framework::StopReceiver) -> anyhow::Result<()> { Ok(()) } +/// # } +/// # struct MaybeTask; +/// # #[async_trait::async_trait] +/// # impl zksync_node_framework::task::Task for MaybeTask { +/// # fn id(&self) -> zksync_node_framework::TaskId { "b".into() } +/// # async fn run(self: Box, _: zksync_node_framework::StopReceiver) -> anyhow::Result<()> { Ok(()) } +/// # } +/// # struct MyResource; +/// # impl zksync_node_framework::resource::Resource for MyResource { fn name() -> String { "a".into() } } +/// # struct MaybeResource; +/// # impl zksync_node_framework::resource::Resource for MaybeResource { fn name() -> String { "b".into() } } +/// #[derive(IntoContext)] +/// struct MyWiringLayerOutput { +/// // This resource will be inserted unconditionally. +/// // Will err if such resource is already present in the context. +/// recource: MyResource, +/// // Will only provide the resource if it's `Some`. +/// maybe_resource: Option, +/// // Will provide task unconditionally. +/// #[context(task)] +/// task: MyTask, +/// // Will provide task only if it's `Some`. +/// #[context(task)] +/// maybe_task: Option, +/// } +/// ``` +pub trait IntoContext { + fn into_context(self, context: &mut ServiceContext<'_>) -> Result<(), WiringError>; +} + +// Unfortunately, without specialization we cannot provide a blanket implementation for `T: Task` +// as well. `Resource` is chosen because it also has a blanket implementation of `FromContext`. +impl IntoContext for T { + fn into_context(self, context: &mut ServiceContext<'_>) -> Result<(), WiringError> { + context.insert_resource(self) + } +} + +impl IntoContext for () { + fn into_context(self, _context: &mut ServiceContext<'_>) -> Result<(), WiringError> { + Ok(()) + } +} + +impl IntoContext for Option { + fn into_context(self, context: &mut ServiceContext<'_>) -> Result<(), WiringError> { + if let Some(inner) = self { + inner.into_context(context) + } else { + Ok(()) + } + } +} diff --git a/core/node/node_framework/src/service/error.rs b/core/node/node_framework/src/service/error.rs index 173745e74c75..890cc6b7d4b6 100644 --- a/core/node/node_framework/src/service/error.rs +++ b/core/node/node_framework/src/service/error.rs @@ -1,5 +1,21 @@ -use crate::wiring_layer::WiringError; +use crate::{task::TaskId, wiring_layer::WiringError}; +/// An error that can occur during the task lifecycle. +#[derive(Debug, thiserror::Error)] +pub enum TaskError { + #[error("Task {0} failed: {1}")] + TaskFailed(TaskId, anyhow::Error), + #[error("Task {0} panicked: {1}")] + TaskPanicked(TaskId, String), + #[error("Shutdown for task {0} timed out")] + TaskShutdownTimedOut(TaskId), + #[error("Shutdown hook {0} failed: {1}")] + ShutdownHookFailed(TaskId, anyhow::Error), + #[error("Shutdown hook {0} timed out")] + ShutdownHookTimedOut(TaskId), +} + +/// An error that can occur during the service lifecycle. #[derive(Debug, thiserror::Error)] pub enum ZkStackServiceError { #[error("Detected a Tokio Runtime. ZkStackService manages its own runtime and does not support nested runtimes")] @@ -8,6 +24,6 @@ pub enum ZkStackServiceError { NoTasks, #[error("One or more wiring layers failed to initialize: {0:?}")] Wiring(Vec<(String, WiringError)>), - #[error(transparent)] - Task(#[from] anyhow::Error), + #[error("One or more tasks failed: {0:?}")] + Task(Vec), } diff --git a/core/node/node_framework/src/service/mod.rs b/core/node/node_framework/src/service/mod.rs index 4a504f393c3a..22102a60efb7 100644 --- a/core/node/node_framework/src/service/mod.rs +++ b/core/node/node_framework/src/service/mod.rs @@ -1,21 +1,33 @@ use std::{collections::HashMap, time::Duration}; -use anyhow::Context; -use futures::{future::BoxFuture, FutureExt}; -use tokio::{runtime::Runtime, sync::watch}; +use error::TaskError; +use futures::future::Fuse; +use tokio::{runtime::Runtime, sync::watch, task::JoinHandle}; use zksync_utils::panic_extractor::try_extract_panic_message; -use self::runnables::Runnables; -pub use self::{context::ServiceContext, error::ZkStackServiceError, stop_receiver::StopReceiver}; +pub use self::{ + context::ServiceContext, + context_traits::{FromContext, IntoContext}, + error::ZkStackServiceError, + shutdown_hook::ShutdownHook, + stop_receiver::StopReceiver, +}; use crate::{ resource::{ResourceId, StoredResource}, - service::runnables::TaskReprs, - wiring_layer::{WiringError, WiringLayer}, + service::{ + named_future::NamedFuture, + runnables::{NamedBoxFuture, Runnables, TaskReprs}, + }, + task::TaskId, + wiring_layer::{WireFn, WiringError, WiringLayer, WiringLayerExt}, }; mod context; +mod context_traits; mod error; +mod named_future; mod runnables; +mod shutdown_hook; mod stop_receiver; #[cfg(test)] mod tests; @@ -27,7 +39,9 @@ const TASK_SHUTDOWN_TIMEOUT: Duration = Duration::from_secs(30); #[derive(Default, Debug)] pub struct ZkStackServiceBuilder { /// List of wiring layers. - layers: Vec>, + // Note: It has to be a `Vec` and not e.g. `HashMap` because the order in which we + // iterate through it matters. + layers: Vec<(&'static str, WireFn)>, } impl ZkStackServiceBuilder { @@ -36,6 +50,7 @@ impl ZkStackServiceBuilder { } /// Adds a wiring layer. + /// /// During the [`run`](ZkStackService::run) call the service will invoke /// `wire` method of every layer in the order they were added. /// @@ -44,16 +59,21 @@ impl ZkStackServiceBuilder { /// This may be useful if the same layer is a prerequisite for multiple other layers: it is safe /// to add it multiple times, and it will only be wired once. pub fn add_layer(&mut self, layer: T) -> &mut Self { + let name = layer.layer_name(); if !self .layers .iter() - .any(|existing_layer| existing_layer.layer_name() == layer.layer_name()) + .any(|(existing_name, _)| name == *existing_name) { - self.layers.push(Box::new(layer)); + self.layers.push((name, layer.into_wire_fn())); } self } + /// Builds the service. + /// + /// In case of errors during wiring phase, will return the list of all the errors that happened, in the order + /// of their occurrence. pub fn build(&mut self) -> Result { if tokio::runtime::Handle::try_current().is_ok() { return Err(ZkStackServiceError::RuntimeDetected); @@ -71,6 +91,7 @@ impl ZkStackServiceBuilder { runnables: Default::default(), stop_sender, runtime, + errors: Vec::new(), }) } } @@ -82,7 +103,7 @@ pub struct ZkStackService { /// Cache of resources that have been requested at least by one task. resources: HashMap>, /// List of wiring layers. - layers: Vec>, + layers: Vec<(&'static str, WireFn)>, /// Different kinds of tasks for the service. runnables: Runnables, @@ -90,27 +111,53 @@ pub struct ZkStackService { stop_sender: watch::Sender, /// Tokio runtime used to spawn tasks. runtime: Runtime, + + /// Collector for the task errors met during the service execution. + errors: Vec, } +type TaskFuture = NamedFuture>>>; + impl ZkStackService { /// Runs the system. pub fn run(mut self) -> Result<(), ZkStackServiceError> { + self.wire()?; + + let TaskReprs { + tasks, + shutdown_hooks, + } = self.prepare_tasks(); + + let remaining = self.run_tasks(tasks); + self.shutdown_tasks(remaining); + self.run_shutdown_hooks(shutdown_hooks); + + tracing::info!("Exiting the service"); + if self.errors.is_empty() { + Ok(()) + } else { + Err(ZkStackServiceError::Task(self.errors)) + } + } + + /// Performs wiring of the service. + /// After invoking this method, the collected tasks will be collected in `self.runnables`. + fn wire(&mut self) -> Result<(), ZkStackServiceError> { // Initialize tasks. let wiring_layers = std::mem::take(&mut self.layers); let mut errors: Vec<(String, WiringError)> = Vec::new(); let runtime_handle = self.runtime.handle().clone(); - for layer in wiring_layers { - let name = layer.layer_name().to_string(); + for (name, WireFn(wire_fn)) in wiring_layers { // We must process wiring layers sequentially and in the same order as they were added. - let task_result = - runtime_handle.block_on(layer.wire(ServiceContext::new(&name, &mut self))); + let mut context = ServiceContext::new(name, self); + let task_result = wire_fn(&runtime_handle, &mut context); if let Err(err) = task_result { // We don't want to bail on the first error, since it'll provide worse DevEx: // People likely want to fix as much problems as they can in one go, rather than have // to fix them one by one. - errors.push((name, err)); + errors.push((name.to_string(), err)); continue; }; } @@ -127,118 +174,130 @@ impl ZkStackService { return Err(ZkStackServiceError::NoTasks); } - let only_oneshot_tasks = self.runnables.is_oneshot_only(); + // Wiring is now complete. + for resource in self.resources.values_mut() { + resource.stored_resource_wired(); + } + self.resources = HashMap::default(); // Decrement reference counters for resources. + tracing::info!("Wiring complete"); + + Ok(()) + } + /// Prepares collected tasks for running. + fn prepare_tasks(&mut self) -> TaskReprs { // Barrier that will only be lifted once all the preconditions are met. // It will be awaited by the tasks before they start running and by the preconditions once they are fulfilled. let task_barrier = self.runnables.task_barrier(); // Collect long-running tasks. let stop_receiver = StopReceiver(self.stop_sender.subscribe()); - let TaskReprs { - mut long_running_tasks, - oneshot_tasks, - } = self - .runnables - .prepare_tasks(task_barrier.clone(), stop_receiver.clone()); - - // Wiring is now complete. - for resource in self.resources.values_mut() { - resource.stored_resource_wired(); - } - drop(self.resources); // Decrement reference counters for resources. - tracing::info!("Wiring complete"); - - // Create a system task that is cancellation-aware and will only exit on either oneshot task failure or - // stop signal. - let oneshot_runner_system_task = - oneshot_runner_task(oneshot_tasks, stop_receiver, only_oneshot_tasks); - long_running_tasks.push(oneshot_runner_system_task); + self.runnables + .prepare_tasks(task_barrier.clone(), stop_receiver.clone()) + } + /// Spawn the provided tasks and runs them until at least one task exits, and returns the list + /// of remaining tasks. + /// Adds error, if any, to the `errors` vector. + fn run_tasks(&mut self, tasks: Vec>>) -> Vec { // Prepare tasks for running. let rt_handle = self.runtime.handle().clone(); - let join_handles: Vec<_> = long_running_tasks + let join_handles: Vec<_> = tasks .into_iter() - .map(|task| rt_handle.spawn(task).fuse()) + .map(|task| task.spawn(&rt_handle).fuse()) .collect(); + // Collect names for remaining tasks for reporting purposes. + let mut tasks_names: Vec<_> = join_handles.iter().map(|task| task.id()).collect(); + // Run the tasks until one of them exits. - let (resolved, _, remaining) = self + let (resolved, resolved_idx, remaining) = self .runtime .block_on(futures::future::select_all(join_handles)); - let result = match resolved { - Ok(Ok(())) => Ok(()), - Ok(Err(err)) => Err(err).context("Task failed"), - Err(panic_err) => { - let panic_msg = try_extract_panic_message(panic_err); - Err(anyhow::format_err!( - "One of the tasks panicked: {panic_msg}" - )) - } - }; + // Extract the result and report it to logs early, before waiting for any other task to shutdown. + // We will also collect the errors from the remaining tasks, hence a vector. + let task_name = tasks_names.swap_remove(resolved_idx); + self.handle_task_exit(resolved, task_name); + tracing::info!("One of the task has exited, shutting down the node"); + + remaining + } + + /// Sends the stop signal and waits for the remaining tasks to finish. + fn shutdown_tasks(&mut self, remaining: Vec) { + // Send stop signal to remaining tasks and wait for them to finish. + self.stop_sender.send(true).ok(); + // Collect names for remaining tasks for reporting purposes. + // We have to re-collect, becuase `select_all` does not guarantes the order of returned remaining futures. + let remaining_tasks_names: Vec<_> = remaining.iter().map(|task| task.id()).collect(); let remaining_tasks_with_timeout: Vec<_> = remaining .into_iter() .map(|task| async { tokio::time::timeout(TASK_SHUTDOWN_TIMEOUT, task).await }) .collect(); - // Send stop signal to remaining tasks and wait for them to finish. - // Given that we are shutting down, we do not really care about returned values. - self.stop_sender.send(true).ok(); let execution_results = self .runtime .block_on(futures::future::join_all(remaining_tasks_with_timeout)); - let execution_timeouts_count = execution_results.iter().filter(|&r| r.is_err()).count(); - if execution_timeouts_count > 0 { - tracing::warn!( - "{execution_timeouts_count} tasks didn't finish in {TASK_SHUTDOWN_TIMEOUT:?} and were dropped" - ); - } else { - tracing::info!("Remaining tasks finished without reaching timeouts"); - } - tracing::info!("Exiting the service"); - result?; - Ok(()) + // Report the results of the remaining tasks. + for (name, result) in remaining_tasks_names.into_iter().zip(execution_results) { + match result { + Ok(resolved) => { + self.handle_task_exit(resolved, name); + } + Err(_) => { + tracing::error!("Task {name} timed out"); + self.errors.push(TaskError::TaskShutdownTimedOut(name)); + } + } + } } -} -fn oneshot_runner_task( - oneshot_tasks: Vec>>, - mut stop_receiver: StopReceiver, - only_oneshot_tasks: bool, -) -> BoxFuture<'static, anyhow::Result<()>> { - Box::pin(async move { - let oneshot_tasks = oneshot_tasks.into_iter().map(|fut| async move { - // Spawn each oneshot task as a separate tokio task. - // This way we can handle the cases when such a task panics and propagate the message - // to the service. - let handle = tokio::runtime::Handle::current(); - match handle.spawn(fut).await { - Ok(Ok(())) => Ok(()), - Ok(Err(err)) => Err(err), - Err(panic_err) => { - let panic_msg = try_extract_panic_message(panic_err); - Err(anyhow::format_err!("Oneshot task panicked: {panic_msg}")) + /// Runs the provided shutdown hooks. + fn run_shutdown_hooks(&mut self, shutdown_hooks: Vec>>) { + // Run shutdown hooks sequentially. + for hook in shutdown_hooks { + let name = hook.id().clone(); + // Limit each shutdown hook to the same timeout as the tasks. + let hook_with_timeout = + async move { tokio::time::timeout(TASK_SHUTDOWN_TIMEOUT, hook).await }; + match self.runtime.block_on(hook_with_timeout) { + Ok(Ok(())) => { + tracing::info!("Shutdown hook {name} completed"); + } + Ok(Err(err)) => { + tracing::error!("Shutdown hook {name} failed: {err}"); + self.errors.push(TaskError::ShutdownHookFailed(name, err)); + } + Err(_) => { + tracing::error!("Shutdown hook {name} timed out"); + self.errors.push(TaskError::ShutdownHookTimedOut(name)); } } - }); + } + } - match futures::future::try_join_all(oneshot_tasks).await { - Err(err) => Err(err), - Ok(_) if only_oneshot_tasks => { - // We only run oneshot tasks in this service, so we can exit now. - Ok(()) + /// Checks the result of the task execution, logs the result, and stores the error if any. + fn handle_task_exit( + &mut self, + task_result: Result, tokio::task::JoinError>, + task_name: TaskId, + ) { + match task_result { + Ok(Ok(())) => { + tracing::info!("Task {task_name} finished"); } - Ok(_) => { - // All oneshot tasks have exited and we have at least one long-running task. - // Simply wait for the stop signal. - stop_receiver.0.changed().await.ok(); - Ok(()) + Ok(Err(err)) => { + tracing::error!("Task {task_name} failed: {err}"); + self.errors.push(TaskError::TaskFailed(task_name, err)); } - } - // Note that we don't have to `select` on the stop signal explicitly: - // Each prerequisite is given a stop signal, and if everyone respects it, this future - // will still resolve once the stop signal is received. - }) + Err(panic_err) => { + let panic_msg = try_extract_panic_message(panic_err); + tracing::error!("Task {task_name} panicked: {panic_msg}"); + self.errors + .push(TaskError::TaskPanicked(task_name, panic_msg)); + } + }; + } } diff --git a/core/node/node_framework/src/service/named_future.rs b/core/node/node_framework/src/service/named_future.rs new file mode 100644 index 000000000000..283fbbb327c9 --- /dev/null +++ b/core/node/node_framework/src/service/named_future.rs @@ -0,0 +1,69 @@ +use std::{fmt, future::Future, pin::Pin, task}; + +use futures::future::{Fuse, FutureExt}; +use pin_project_lite::pin_project; +use tokio::task::JoinHandle; + +use crate::task::TaskId; + +pin_project! { + /// Implements a future with the name tag attached. + pub struct NamedFuture { + #[pin] + inner: F, + name: TaskId, + } +} + +impl NamedFuture +where + F: Future + Send + 'static, + F::Output: Send + 'static, +{ + /// Creates a new future with the name tag attached. + pub fn new(inner: F, name: TaskId) -> Self { + Self { inner, name } + } + + /// Returns the ID of the task attached to the future. + pub fn id(&self) -> TaskId { + self.name.clone() + } + + /// Fuses the wrapped future. + pub fn fuse(self) -> NamedFuture> { + NamedFuture { + name: self.name, + inner: self.inner.fuse(), + } + } + + /// Spawns the wrapped future on the provided runtime handle. + /// Returns a named wrapper over the join handle. + pub fn spawn(self, handle: &tokio::runtime::Handle) -> NamedFuture> { + NamedFuture { + name: self.name, + inner: handle.spawn(self.inner), + } + } +} + +impl Future for NamedFuture +where + F: Future, +{ + type Output = F::Output; + + fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> task::Poll { + tracing::info_span!("NamedFuture", name = %self.name) + .in_scope(|| self.project().inner.poll(cx)) + } +} + +impl fmt::Debug for NamedFuture { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("NamedFuture") + .field("name", &self.name) + .finish_non_exhaustive() + } +} diff --git a/core/node/node_framework/src/service/runnables.rs b/core/node/node_framework/src/service/runnables.rs index 7f35e384d6cc..c3a7c21d2e80 100644 --- a/core/node/node_framework/src/service/runnables.rs +++ b/core/node/node_framework/src/service/runnables.rs @@ -1,64 +1,45 @@ use std::{fmt, sync::Arc}; use anyhow::Context as _; -use futures::future::BoxFuture; +use futures::{future::BoxFuture, FutureExt as _}; use tokio::sync::Barrier; +use zksync_utils::panic_extractor::try_extract_panic_message; -use super::StopReceiver; -use crate::{ - precondition::Precondition, - task::{OneshotTask, Task, UnconstrainedOneshotTask, UnconstrainedTask}, -}; +use super::{named_future::NamedFuture, StopReceiver}; +use crate::task::{Task, TaskKind}; + +/// Alias for futures with the name assigned. +pub(crate) type NamedBoxFuture = NamedFuture>; /// A collection of different flavors of tasks. #[derive(Default)] pub(super) struct Runnables { - /// Preconditions added to the service. - pub(super) preconditions: Vec>, /// Tasks added to the service. pub(super) tasks: Vec>, - /// Oneshot tasks added to the service. - pub(super) oneshot_tasks: Vec>, - /// Unconstrained tasks added to the service. - pub(super) unconstrained_tasks: Vec>, - /// Unconstrained oneshot tasks added to the service. - pub(super) unconstrained_oneshot_tasks: Vec>, + /// List of hooks to be invoked after node shutdown. + pub(super) shutdown_hooks: Vec>>, } impl fmt::Debug for Runnables { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - // Macro that iterates over a `Vec`, invokes `.id()` method and collects the results into a `Vec`. - // Returns a reference to created `Vec` to satisfy the `.field` method signature. - macro_rules! ids { - ($vec:expr) => { - &$vec.iter().map(|x| x.id()).collect::>() - }; - } - f.debug_struct("Runnables") - .field("preconditions", ids!(self.preconditions)) - .field("tasks", ids!(self.tasks)) - .field("oneshot_tasks", ids!(self.oneshot_tasks)) - .field("unconstrained_tasks", ids!(self.unconstrained_tasks)) - .field( - "unconstrained_oneshot_tasks", - ids!(self.unconstrained_oneshot_tasks), - ) + .field("tasks", &self.tasks) + .field("shutdown_hooks", &self.shutdown_hooks) .finish() } } /// A unified representation of tasks that can be run by the service. pub(super) struct TaskReprs { - pub(super) long_running_tasks: Vec>>, - pub(super) oneshot_tasks: Vec>>, + pub(super) tasks: Vec>>, + pub(super) shutdown_hooks: Vec>>, } impl fmt::Debug for TaskReprs { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("TaskReprs") - .field("long_running_tasks", &self.long_running_tasks.len()) - .field("oneshot_tasks", &self.oneshot_tasks.len()) + .field("long_running_tasks", &self.tasks.len()) + .field("shutdown_hooks", &self.shutdown_hooks.len()) .finish() } } @@ -69,149 +50,104 @@ impl Runnables { pub(super) fn is_empty(&self) -> bool { // We don't consider preconditions to be tasks. self.tasks.is_empty() - && self.oneshot_tasks.is_empty() - && self.unconstrained_tasks.is_empty() - && self.unconstrained_oneshot_tasks.is_empty() - } - - /// Returns `true` if there are no long-running tasks in the collection. - pub(super) fn is_oneshot_only(&self) -> bool { - self.tasks.is_empty() && self.unconstrained_tasks.is_empty() } /// Prepares a barrier that should be shared between tasks and preconditions. /// The barrier is configured to wait for all the participants to be ready. /// Barrier does not assume the existence of unconstrained tasks. pub(super) fn task_barrier(&self) -> Arc { - Arc::new(Barrier::new( - self.tasks.len() + self.preconditions.len() + self.oneshot_tasks.len(), - )) + let barrier_size = self + .tasks + .iter() + .filter(|t| { + matches!( + t.kind(), + TaskKind::Precondition | TaskKind::OneshotTask | TaskKind::Task + ) + }) + .count(); + Arc::new(Barrier::new(barrier_size)) } /// Transforms the collection of tasks into a set of universal futures. pub(super) fn prepare_tasks( - mut self, + &mut self, task_barrier: Arc, stop_receiver: StopReceiver, ) -> TaskReprs { let mut long_running_tasks = Vec::new(); - self.collect_unconstrained_tasks(&mut long_running_tasks, stop_receiver.clone()); - self.collect_tasks( - &mut long_running_tasks, - task_barrier.clone(), - stop_receiver.clone(), - ); - let mut oneshot_tasks = Vec::new(); - self.collect_preconditions( - &mut oneshot_tasks, - task_barrier.clone(), - stop_receiver.clone(), - ); - self.collect_oneshot_tasks( - &mut oneshot_tasks, - task_barrier.clone(), - stop_receiver.clone(), - ); - self.collect_unconstrained_oneshot_tasks(&mut oneshot_tasks, stop_receiver.clone()); - - TaskReprs { - long_running_tasks, - oneshot_tasks, - } - } - fn collect_unconstrained_tasks( - &mut self, - tasks: &mut Vec>>, - stop_receiver: StopReceiver, - ) { - for task in std::mem::take(&mut self.unconstrained_tasks) { - let name = task.id(); - let stop_receiver = stop_receiver.clone(); - let task_future = Box::pin(async move { - task.run_unconstrained(stop_receiver) - .await - .with_context(|| format!("Task {name} failed")) - }); - tasks.push(task_future); - } - } - - fn collect_tasks( - &mut self, - tasks: &mut Vec>>, - task_barrier: Arc, - stop_receiver: StopReceiver, - ) { for task in std::mem::take(&mut self.tasks) { let name = task.id(); + let kind = task.kind(); let stop_receiver = stop_receiver.clone(); let task_barrier = task_barrier.clone(); - let task_future = Box::pin(async move { - task.run_with_barrier(stop_receiver, task_barrier) - .await - .with_context(|| format!("Task {name} failed")) - }); - tasks.push(task_future); + let task_future: BoxFuture<'static, _> = + Box::pin(task.run_internal(stop_receiver, task_barrier)); + let named_future = NamedFuture::new(task_future, name); + if kind.is_oneshot() { + oneshot_tasks.push(named_future); + } else { + long_running_tasks.push(named_future); + } } - } - fn collect_preconditions( - &mut self, - oneshot_tasks: &mut Vec>>, - task_barrier: Arc, - stop_receiver: StopReceiver, - ) { - for precondition in std::mem::take(&mut self.preconditions) { - let name = precondition.id(); - let stop_receiver = stop_receiver.clone(); - let task_barrier = task_barrier.clone(); - let task_future = Box::pin(async move { - precondition - .check_with_barrier(stop_receiver, task_barrier) - .await - .with_context(|| format!("Precondition {name} failed")) - }); - oneshot_tasks.push(task_future); - } - } + let only_oneshot_tasks = long_running_tasks.is_empty(); + // Create a system task that is cancellation-aware and will only exit on either oneshot task failure or + // stop signal. + let oneshot_runner_system_task = + oneshot_runner_task(oneshot_tasks, stop_receiver, only_oneshot_tasks); + long_running_tasks.push(oneshot_runner_system_task); - fn collect_oneshot_tasks( - &mut self, - oneshot_tasks: &mut Vec>>, - task_barrier: Arc, - stop_receiver: StopReceiver, - ) { - for oneshot_task in std::mem::take(&mut self.oneshot_tasks) { - let name = oneshot_task.id(); - let stop_receiver = stop_receiver.clone(); - let task_barrier = task_barrier.clone(); - let task_future = Box::pin(async move { - oneshot_task - .run_oneshot_with_barrier(stop_receiver, task_barrier) - .await - .with_context(|| format!("Oneshot task {name} failed")) - }); - oneshot_tasks.push(task_future); + TaskReprs { + tasks: long_running_tasks, + shutdown_hooks: std::mem::take(&mut self.shutdown_hooks), } } +} - fn collect_unconstrained_oneshot_tasks( - &mut self, - oneshot_tasks: &mut Vec>>, - stop_receiver: StopReceiver, - ) { - for unconstrained_oneshot_task in std::mem::take(&mut self.unconstrained_oneshot_tasks) { - let name = unconstrained_oneshot_task.id(); - let stop_receiver = stop_receiver.clone(); - let task_future = Box::pin(async move { - unconstrained_oneshot_task - .run_unconstrained_oneshot(stop_receiver) - .await - .with_context(|| format!("Unconstrained oneshot task {name} failed")) - }); - oneshot_tasks.push(task_future); +fn oneshot_runner_task( + oneshot_tasks: Vec>>, + mut stop_receiver: StopReceiver, + only_oneshot_tasks: bool, +) -> NamedBoxFuture> { + let future = async move { + let oneshot_tasks = oneshot_tasks.into_iter().map(|fut| async move { + // Spawn each oneshot task as a separate tokio task. + // This way we can handle the cases when such a task panics and propagate the message + // to the service. + let handle = tokio::runtime::Handle::current(); + let name = fut.id().to_string(); + match handle.spawn(fut).await { + Ok(Ok(())) => Ok(()), + Ok(Err(err)) => Err(err).with_context(|| format!("Oneshot task {name} failed")), + Err(panic_err) => { + let panic_msg = try_extract_panic_message(panic_err); + Err(anyhow::format_err!( + "Oneshot task {name} panicked: {panic_msg}" + )) + } + } + }); + + match futures::future::try_join_all(oneshot_tasks).await { + Err(err) => Err(err), + Ok(_) if only_oneshot_tasks => { + // We only run oneshot tasks in this service, so we can exit now. + Ok(()) + } + Ok(_) => { + // All oneshot tasks have exited and we have at least one long-running task. + // Simply wait for the stop signal. + stop_receiver.0.changed().await.ok(); + Ok(()) + } } - } + // Note that we don't have to `select` on the stop signal explicitly: + // Each prerequisite is given a stop signal, and if everyone respects it, this future + // will still resolve once the stop signal is received. + }; + + NamedBoxFuture::new(future.boxed(), "oneshot_runner".into()) } diff --git a/core/node/node_framework/src/service/shutdown_hook.rs b/core/node/node_framework/src/service/shutdown_hook.rs new file mode 100644 index 000000000000..caeb26809bde --- /dev/null +++ b/core/node/node_framework/src/service/shutdown_hook.rs @@ -0,0 +1,47 @@ +use std::{fmt, future::Future}; + +use futures::{future::BoxFuture, FutureExt}; + +use crate::{IntoContext, TaskId}; + +/// A named future that will be invoked after all the tasks are stopped. +/// The future is expected to perform a cleanup or a shutdown of the service. +/// +/// All the shutdown hooks will be executed sequentially, so they may assume that +/// no other tasks are running at the moment of execution on the same node. However, +/// an unique access to the database is not guaranteed, since the node may run in a +/// distributed mode, so this should not be used for potentially destructive actions. +pub struct ShutdownHook { + pub(crate) id: TaskId, + pub(crate) future: BoxFuture<'static, anyhow::Result<()>>, +} + +impl fmt::Debug for ShutdownHook { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("ShutdownHook") + .field("name", &self.id) + .finish() + } +} + +impl ShutdownHook { + pub fn new( + name: &'static str, + hook: impl Future> + Send + 'static, + ) -> Self { + Self { + id: name.into(), + future: hook.boxed(), + } + } +} + +impl IntoContext for ShutdownHook { + fn into_context( + self, + context: &mut super::ServiceContext<'_>, + ) -> Result<(), crate::WiringError> { + context.add_shutdown_hook(self); + Ok(()) + } +} diff --git a/core/node/node_framework/src/service/stop_receiver.rs b/core/node/node_framework/src/service/stop_receiver.rs index 7a181b49a80d..e174cf62ba36 100644 --- a/core/node/node_framework/src/service/stop_receiver.rs +++ b/core/node/node_framework/src/service/stop_receiver.rs @@ -8,9 +8,3 @@ use tokio::sync::watch; /// and prevent tasks from hanging by accident. #[derive(Debug, Clone)] pub struct StopReceiver(pub watch::Receiver); - -impl StopReceiver { - pub fn new(receiver: watch::Receiver) -> Self { - Self(receiver) - } -} diff --git a/core/node/node_framework/src/service/tests.rs b/core/node/node_framework/src/service/tests.rs index b5bcc3aaa255..e801e97b7e96 100644 --- a/core/node/node_framework/src/service/tests.rs +++ b/core/node/node_framework/src/service/tests.rs @@ -5,11 +5,9 @@ use assert_matches::assert_matches; use tokio::{runtime::Runtime, sync::Barrier}; use crate::{ - service::{ - ServiceContext, StopReceiver, WiringError, WiringLayer, ZkStackServiceBuilder, - ZkStackServiceError, - }, + service::{StopReceiver, WiringError, WiringLayer, ZkStackServiceBuilder, ZkStackServiceError}, task::{Task, TaskId}, + IntoContext, }; // `ZkStack` Service's `new()` method has to have a check for nested runtime. @@ -30,11 +28,14 @@ struct DefaultLayer { #[async_trait::async_trait] impl WiringLayer for DefaultLayer { + type Input = (); + type Output = (); + fn layer_name(&self) -> &'static str { self.name } - async fn wire(self: Box, mut _node: ServiceContext<'_>) -> Result<(), WiringError> { + async fn wire(self, _input: Self::Input) -> Result { Ok(()) } } @@ -87,11 +88,14 @@ struct WireErrorLayer; #[async_trait::async_trait] impl WiringLayer for WireErrorLayer { + type Input = (); + type Output = (); + fn layer_name(&self) -> &'static str { "wire_error_layer" } - async fn wire(self: Box, _node: ServiceContext<'_>) -> Result<(), WiringError> { + async fn wire(self, _input: Self::Input) -> Result { Err(WiringError::Internal(anyhow!("wiring error"))) } } @@ -110,15 +114,24 @@ fn test_run_with_error_tasks() { #[derive(Debug)] struct TaskErrorLayer; +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +struct TaskErrorLayerOutput { + #[context(task)] + task: ErrorTask, +} + #[async_trait::async_trait] impl WiringLayer for TaskErrorLayer { + type Input = (); + type Output = TaskErrorLayerOutput; + fn layer_name(&self) -> &'static str { "task_error_layer" } - async fn wire(self: Box, mut node: ServiceContext<'_>) -> Result<(), WiringError> { - node.add_task(Box::new(ErrorTask)); - Ok(()) + async fn wire(self, _input: Self::Input) -> Result { + Ok(TaskErrorLayerOutput { task: ErrorTask }) } } @@ -150,25 +163,32 @@ struct TasksLayer { remaining_task_was_run: Arc>, } +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +struct TasksLayerOutput { + #[context(task)] + successful_task: SuccessfulTask, + #[context(task)] + remaining_task: RemainingTask, +} + #[async_trait::async_trait] impl WiringLayer for TasksLayer { + type Input = (); + type Output = TasksLayerOutput; + fn layer_name(&self) -> &'static str { "tasks_layer" } - async fn wire(self: Box, mut node: ServiceContext<'_>) -> Result<(), WiringError> { - // Barrier is needed to make sure that both tasks have started, otherwise the second task - // may exit even before it starts. + async fn wire(self, _input: Self::Input) -> Result { let barrier = Arc::new(Barrier::new(2)); - node.add_task(Box::new(SuccessfulTask( - barrier.clone(), - self.successful_task_was_run.clone(), - ))) - .add_task(Box::new(RemainingTask( - barrier.clone(), - self.remaining_task_was_run.clone(), - ))); - Ok(()) + let successful_task = SuccessfulTask(barrier.clone(), self.successful_task_was_run.clone()); + let remaining_task = RemainingTask(barrier, self.remaining_task_was_run.clone()); + Ok(TasksLayerOutput { + successful_task, + remaining_task, + }) } } diff --git a/core/node/node_framework/src/task.rs b/core/node/node_framework/src/task.rs deleted file mode 100644 index 8ff73d75d8fa..000000000000 --- a/core/node/node_framework/src/task.rs +++ /dev/null @@ -1,191 +0,0 @@ -//! Tasks define the "runnable" concept of the service, e.g. a unit of work that can be executed by the service. -//! -//! ## Task kinds -//! -//! This module defines different flavors of tasks. -//! The most basic one is [`Task`], which is only launched after all the preconditions are met (more on this later), -//! and is expected to run until the node is shut down. This is the most common type of task, e.g. API server, -//! state keeper, and metadata calculator are examples of such tasks. -//! -//! Then there exists an [`OneshotTask`], which has a clear exit condition that does not cause the node to shut down. -//! This is useful for tasks that are expected to run once and then exit, e.g. a task that performs a programmatic -//! migration. -//! -//! Finally, the task can be unconstrained by preconditions, which means that it will start immediately without -//! waiting for any preconditions to be met. This kind of tasks is represent by [`UnconstrainedTask`] and -//! [`UnconstrainedOneshotTask`]. -//! -//! ## Tasks and preconditions -//! -//! Besides tasks, service also has a concept of preconditions(crate::precondition::Precondition). Precondition is a -//! piece of logic that is expected to be met before the task can start. One can think of preconditions as a way to -//! express invariants that the tasks may rely on. -//! -//! In this notion, the difference between a task and an unconstrained task is that the former has all the invariants -//! checked already, and unrestricted task is responsible for *manually checking any invariants it may rely on*. -//! -//! The unrestricted tasks are rarely needed, but two common cases for them are: -//! - A task that must be started as soon as possible, e.g. healthcheck server. -//! - A task that may be a driving force for some precondition to be met. - -use std::{ - fmt::{Display, Formatter}, - ops::Deref, - sync::Arc, -}; - -use tokio::sync::Barrier; - -use crate::service::StopReceiver; - -/// A unique human-readable identifier of a task. -#[derive(Debug, Clone, PartialEq, Eq, Hash)] -pub struct TaskId(String); - -impl TaskId { - pub fn new(value: String) -> Self { - TaskId(value) - } -} - -impl Display for TaskId { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - f.write_str(&self.0) - } -} - -impl From<&str> for TaskId { - fn from(value: &str) -> Self { - TaskId(value.to_owned()) - } -} - -impl From for TaskId { - fn from(value: String) -> Self { - TaskId(value) - } -} - -impl Deref for TaskId { - type Target = str; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -/// A task implementation. -/// -/// Note: any `Task` added to the service will only start after all the [preconditions](crate::precondition::Precondition) -/// are met. If a task should start immediately, one should use [`UnconstrainedTask`](crate::task::UnconstrainedTask). -#[async_trait::async_trait] -pub trait Task: 'static + Send { - /// Unique name of the task. - fn id(&self) -> TaskId; - - /// Runs the task. - /// - /// Once any of the task returns, the node will shutdown. - /// If the task returns an error, the node will spawn an error-level log message and will return a non-zero - /// exit code. - /// - /// `stop_receiver` argument contains a channel receiver that will change its value once the node requests - /// a shutdown. Every task is expected to either await or periodically check the state of channel and stop - /// its execution once the channel is changed. - /// - /// Each task is expected to perform the required cleanup after receiving the stop signal. - async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()>; -} - -impl dyn Task { - /// An internal helper method that guards running the task with a tokio Barrier. - /// Used to make sure that the task is not started until all the preconditions are met. - pub(super) async fn run_with_barrier( - self: Box, - mut stop_receiver: StopReceiver, - preconditions_barrier: Arc, - ) -> anyhow::Result<()> { - // Wait either for barrier to be lifted or for the stop signal to be received. - tokio::select! { - _ = preconditions_barrier.wait() => { - self.run(stop_receiver).await - } - _ = stop_receiver.0.changed() => { - Ok(()) - } - } - } -} - -/// A oneshot task implementation. -/// The difference from [`Task`] is that this kind of task may exit without causing the service to shutdown. -/// -/// Note: any `Task` added to the service will only start after all the [preconditions](crate::precondition::Precondition) -/// are met. If a task should start immediately, one should use [`UnconstrainedTask`](crate::task::UnconstrainedTask). -#[async_trait::async_trait] -pub trait OneshotTask: 'static + Send { - /// Unique name of the task. - fn id(&self) -> TaskId; - - /// Runs the task. - /// - /// Unlike [`Task::run`], this method is expected to return once the task is finished, without causing the - /// node to shutdown. - /// - /// `stop_receiver` argument contains a channel receiver that will change its value once the node requests - /// a shutdown. Every task is expected to either await or periodically check the state of channel and stop - /// its execution once the channel is changed. - /// - /// Each task is expected to perform the required cleanup after receiving the stop signal. - async fn run_oneshot(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()>; -} - -impl dyn OneshotTask { - /// An internal helper method that guards running the task with a tokio Barrier. - /// Used to make sure that the task is not started until all the preconditions are met. - pub(super) async fn run_oneshot_with_barrier( - self: Box, - mut stop_receiver: StopReceiver, - preconditions_barrier: Arc, - ) -> anyhow::Result<()> { - // Wait either for barrier to be lifted or for the stop signal to be received. - tokio::select! { - _ = preconditions_barrier.wait() => { - self.run_oneshot(stop_receiver).await - } - _ = stop_receiver.0.changed() => { - Ok(()) - } - } - } -} - -/// A task implementation that is not constrained by preconditions. -/// -/// This trait is used to define tasks that should start immediately after the wiring phase, without waiting for -/// any preconditions to be met. -/// -/// *Warning*. An unconstrained task may not be aware of the state of the node and is expected to cautiously check -/// any invariants it may rely on. -#[async_trait::async_trait] -pub trait UnconstrainedTask: 'static + Send { - /// Unique name of the task. - fn id(&self) -> TaskId; - - /// Runs the task without waiting for any precondition to be met. - async fn run_unconstrained(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()>; -} - -/// An unconstrained analog of [`OneshotTask`]. -/// See [`UnconstrainedTask`] and [`OneshotTask`] for more details. -#[async_trait::async_trait] -pub trait UnconstrainedOneshotTask: 'static + Send { - /// Unique name of the task. - fn id(&self) -> TaskId; - - /// Runs the task without waiting for any precondition to be met. - async fn run_unconstrained_oneshot( - self: Box, - stop_receiver: StopReceiver, - ) -> anyhow::Result<()>; -} diff --git a/core/node/node_framework/src/task/mod.rs b/core/node/node_framework/src/task/mod.rs new file mode 100644 index 000000000000..8113a751441a --- /dev/null +++ b/core/node/node_framework/src/task/mod.rs @@ -0,0 +1,138 @@ +//! Tasks define the "runnable" concept of the service, e.g. a unit of work that can be executed by the service. + +use std::{ + fmt::{self, Formatter}, + sync::Arc, +}; + +use tokio::sync::Barrier; + +pub use self::types::{TaskId, TaskKind}; +use crate::service::StopReceiver; + +mod types; + +/// A task implementation. +/// Task defines the "runnable" concept of the service, e.g. a unit of work that can be executed by the service. +/// +/// Based on the task kind, the implemenation will be treated differently by the service. +/// +/// ## Task kinds +/// +/// There may be different kinds of tasks: +/// +/// ### `Task` +/// +/// A regular task. Returning from this task will cause the service to stop. [`Task::kind`] has a default +/// implementation that returns `TaskKind::Task`. +/// +/// Typically, the implementation of [`Task::run`] will be some form of loop that runs until either an +/// irrecoverable error happens (then task should return an error), or stop signal is received (then task should +/// return `Ok(())`). +/// +/// ### `OneshotTask` +/// +/// A task that can exit when completed without causing the service to terminate. +/// In case of `OneshotTask`s, the service will only exit when all the `OneshotTask`s have exited and there are +/// no more tasks running. +/// +/// ### `Precondition` +/// +/// A "barrier" task that is supposed to check invariants before the main tasks are started. +/// An example of a precondition task could be a task that checks if the database has all the required data. +/// Precondition tasks are often paired with some other kind of task that will make sure that the precondition +/// can be satisfied. This is required for a distributed service setup, where the precondition task will be +/// present on all the nodes, while a task that satisfies the precondition will be present only on one node. +/// +/// ### `UnconstrainedTask` +/// +/// A task that can run without waiting for preconditions. +/// Tasks of this kind are expected to check all the invariants they rely on themselves. +/// Usually, this kind of task is used either for tasks that must start as early as possible (e.g. healthcheck server), +/// or for tasks that cannot rely on preconditions. +/// +/// ### `UnconstrainedOneshotTask` +/// +/// A task that can run without waiting for preconditions and can exit without stopping the service. +/// Usually such tasks may be used for satisfying a precondition, for example, they can perform the database +/// setup. +#[async_trait::async_trait] +pub trait Task: 'static + Send { + /// Returns the kind of the task. + /// The returned values is expected to be static, and it will be used by the service + /// to determine how to handle the task. + fn kind(&self) -> TaskKind { + TaskKind::Task + } + + /// Unique name of the task. + fn id(&self) -> TaskId; + + /// Runs the task. + async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()>; +} + +impl dyn Task { + /// An internal helper method that guards running the task with a tokio Barrier. + /// Used to make sure that the task is not started until all the preconditions are met. + pub(super) async fn run_internal( + self: Box, + stop_receiver: StopReceiver, + preconditions_barrier: Arc, + ) -> anyhow::Result<()> { + match self.kind() { + TaskKind::Task | TaskKind::OneshotTask => { + self.run_with_barrier(stop_receiver, preconditions_barrier) + .await + } + TaskKind::UnconstrainedTask | TaskKind::UnconstrainedOneshotTask => { + self.run(stop_receiver).await + } + TaskKind::Precondition => { + self.check_precondition(stop_receiver, preconditions_barrier) + .await + } + } + } + + async fn run_with_barrier( + self: Box, + mut stop_receiver: StopReceiver, + preconditions_barrier: Arc, + ) -> anyhow::Result<()> { + // Wait either for barrier to be lifted or for the stop signal to be received. + tokio::select! { + _ = preconditions_barrier.wait() => { + self.run(stop_receiver).await + } + _ = stop_receiver.0.changed() => { + Ok(()) + } + } + } + + async fn check_precondition( + self: Box, + mut stop_receiver: StopReceiver, + preconditions_barrier: Arc, + ) -> anyhow::Result<()> { + self.run(stop_receiver.clone()).await?; + tokio::select! { + _ = preconditions_barrier.wait() => { + Ok(()) + } + _ = stop_receiver.0.changed() => { + Ok(()) + } + } + } +} + +impl fmt::Debug for dyn Task { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.debug_struct("Task") + .field("kind", &self.kind()) + .field("name", &self.id()) + .finish() + } +} diff --git a/core/node/node_framework/src/task/types.rs b/core/node/node_framework/src/task/types.rs new file mode 100644 index 000000000000..e9b8b6e37f26 --- /dev/null +++ b/core/node/node_framework/src/task/types.rs @@ -0,0 +1,61 @@ +use std::{ + fmt::{Display, Formatter}, + ops::Deref, +}; + +/// Task kind. +/// See [`Task`](super::Task) documentation for more details. +#[derive(Debug, Clone, Copy)] +#[non_exhaustive] +pub enum TaskKind { + Task, + OneshotTask, + UnconstrainedTask, + UnconstrainedOneshotTask, + Precondition, +} + +impl TaskKind { + pub(crate) fn is_oneshot(self) -> bool { + matches!( + self, + TaskKind::OneshotTask | TaskKind::UnconstrainedOneshotTask | TaskKind::Precondition + ) + } +} + +/// A unique human-readable identifier of a task. +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub struct TaskId(String); + +impl TaskId { + pub fn new(value: String) -> Self { + TaskId(value) + } +} + +impl Display for TaskId { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.write_str(&self.0) + } +} + +impl From<&str> for TaskId { + fn from(value: &str) -> Self { + TaskId(value.to_owned()) + } +} + +impl From for TaskId { + fn from(value: String) -> Self { + TaskId(value) + } +} + +impl Deref for TaskId { + type Target = str; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} diff --git a/core/node/node_framework/src/wiring_layer.rs b/core/node/node_framework/src/wiring_layer.rs index e37bb1c9d487..1cc133eea830 100644 --- a/core/node/node_framework/src/wiring_layer.rs +++ b/core/node/node_framework/src/wiring_layer.rs @@ -1,6 +1,24 @@ use std::fmt; -use crate::{resource::ResourceId, service::ServiceContext}; +use tokio::runtime; + +use crate::{resource::ResourceId, service::ServiceContext, FromContext, IntoContext}; + +/// An envelope for the wiring layer function. +/// Since `WiringLayer` has associated types, we cannot easily erase the types via `dyn WiringLayer`, +/// so instead we preserve the layer type within the closure, and represent the actual wiring logic +/// as a function of the service context instead. +/// See [`WiringLayerExt`] trait for more context. +#[allow(clippy::type_complexity)] // False positive, already a dedicated type. +pub(crate) struct WireFn( + pub Box) -> Result<(), WiringError>>, +); + +impl fmt::Debug for WireFn { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("WireFn").finish() + } +} /// Wiring layer provides a way to customize the `ZkStackService` by /// adding new tasks or resources to it. @@ -9,22 +27,35 @@ use crate::{resource::ResourceId, service::ServiceContext}; /// which resources they use or add, and the list of tasks they add. #[async_trait::async_trait] pub trait WiringLayer: 'static + Send + Sync { + type Input: FromContext; + type Output: IntoContext; + /// Identifier of the wiring layer. fn layer_name(&self) -> &'static str; /// Performs the wiring process, e.g. adds tasks and resources to the node. /// This method will be called once during the node initialization. - async fn wire(self: Box, context: ServiceContext<'_>) -> Result<(), WiringError>; + async fn wire(self, input: Self::Input) -> Result; } -impl fmt::Debug for dyn WiringLayer { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("WiringLayer") - .field("layer_name", &self.layer_name()) - .finish() +pub(crate) trait WiringLayerExt: WiringLayer { + /// Hires the actual type of the wiring layer into the closure, so that rest of application + /// doesn't have to know it. + fn into_wire_fn(self) -> WireFn + where + Self: Sized, + { + WireFn(Box::new(move |rt, ctx| { + let input = Self::Input::from_context(ctx)?; + let output = rt.block_on(self.wire(input))?; + output.into_context(ctx)?; + Ok(()) + })) } } +impl WiringLayerExt for T where T: WiringLayer {} + /// An error that can occur during the wiring phase. #[derive(thiserror::Error, Debug)] #[non_exhaustive] diff --git a/core/node/node_framework/tests/ui.rs b/core/node/node_framework/tests/ui.rs new file mode 100644 index 000000000000..f2f9697b2c13 --- /dev/null +++ b/core/node/node_framework/tests/ui.rs @@ -0,0 +1,11 @@ +#[test] +fn ui_pass() { + let t = trybuild::TestCases::new(); + t.pass("tests/ui/correct/*.rs"); +} + +#[test] +fn ui_fail() { + let t = trybuild::TestCases::new(); + t.compile_fail("tests/ui/incorrect/*.rs"); +} diff --git a/core/node/node_framework/tests/ui/correct/01_from_context.rs b/core/node/node_framework/tests/ui/correct/01_from_context.rs new file mode 100644 index 000000000000..165c53fd0886 --- /dev/null +++ b/core/node/node_framework/tests/ui/correct/01_from_context.rs @@ -0,0 +1,41 @@ +#![allow(dead_code)] + +use zksync_node_framework::{FromContext, Resource}; + +#[derive(Clone)] +struct ResourceA; + +impl Resource for ResourceA { + fn name() -> String { + "a".to_string() + } +} + +#[derive(Clone, Default)] +struct ResourceB; + +impl Resource for ResourceB { + fn name() -> String { + "b".to_string() + } +} + +#[derive(FromContext)] +struct SimpleStruct { + _field: ResourceA, + _field_2: ResourceB, +} + +#[derive(FromContext)] +struct StructWithDefault { + _field: ResourceA, + #[context(default)] + _field_default: ResourceB, +} + +#[derive(FromContext)] +struct StructWithOption { + _field: Option, +} + +fn main() {} diff --git a/core/node/node_framework/tests/ui/correct/02_into_context.rs b/core/node/node_framework/tests/ui/correct/02_into_context.rs new file mode 100644 index 000000000000..33104aeea2be --- /dev/null +++ b/core/node/node_framework/tests/ui/correct/02_into_context.rs @@ -0,0 +1,41 @@ +#![allow(dead_code)] + +use zksync_node_framework::{IntoContext, Resource, StopReceiver, Task, TaskId}; + +#[derive(Clone)] +struct ResourceA; + +impl Resource for ResourceA { + fn name() -> String { + "a".to_string() + } +} + +struct TaskA; + +#[async_trait::async_trait] +impl Task for TaskA { + fn id(&self) -> TaskId { + "batch_status_updater".into() + } + + async fn run(self: Box, _stop_receiver: StopReceiver) -> anyhow::Result<()> { + Ok(()) + } +} + +#[derive(IntoContext)] +struct SimpleStruct { + _field: ResourceA, + #[context(task)] + _field_2: TaskA, +} + +#[derive(IntoContext)] +struct Options { + _field: Option, + #[context(task)] + _field_2: Option, +} + +fn main() {} diff --git a/core/node/node_framework/tests/ui/incorrect/01_from_context_task.rs b/core/node/node_framework/tests/ui/incorrect/01_from_context_task.rs new file mode 100644 index 000000000000..b49347eef00b --- /dev/null +++ b/core/node/node_framework/tests/ui/incorrect/01_from_context_task.rs @@ -0,0 +1,34 @@ +#![allow(dead_code)] + +use zksync_node_framework::{FromContext, Resource, StopReceiver, Task, TaskId}; + +#[derive(Clone)] +struct ResourceA; + +impl Resource for ResourceA { + fn name() -> String { + "a".to_string() + } +} + +struct TaskA; + +#[async_trait::async_trait] +impl Task for TaskA { + fn id(&self) -> TaskId { + "batch_status_updater".into() + } + + async fn run(self: Box, _stop_receiver: StopReceiver) -> anyhow::Result<()> { + Ok(()) + } +} + +#[derive(FromContext)] +struct SimpleStruct { + _field: ResourceA, + #[context(task)] + _field_2: TaskA, +} + +fn main() {} diff --git a/core/node/node_framework/tests/ui/incorrect/01_from_context_task.stderr b/core/node/node_framework/tests/ui/incorrect/01_from_context_task.stderr new file mode 100644 index 000000000000..52acbc48be19 --- /dev/null +++ b/core/node/node_framework/tests/ui/incorrect/01_from_context_task.stderr @@ -0,0 +1,5 @@ +error: `task` attribute is not allowed in `FromContext` macro + --> tests/ui/incorrect/01_from_context_task.rs:31:5 + | +31 | _field_2: TaskA, + | ^^^^^^^^ diff --git a/core/node/node_framework/tests/ui/incorrect/02_into_context_default_task.rs b/core/node/node_framework/tests/ui/incorrect/02_into_context_default_task.rs new file mode 100644 index 000000000000..755605b8151d --- /dev/null +++ b/core/node/node_framework/tests/ui/incorrect/02_into_context_default_task.rs @@ -0,0 +1,35 @@ +#![allow(dead_code)] + +use zksync_node_framework::{IntoContext, Resource, StopReceiver, Task, TaskId}; + +#[derive(Clone)] +struct ResourceA; + +impl Resource for ResourceA { + fn name() -> String { + "a".to_string() + } +} + +#[derive(Default)] +struct TaskA; + +#[async_trait::async_trait] +impl Task for TaskA { + fn id(&self) -> TaskId { + "batch_status_updater".into() + } + + async fn run(self: Box, _stop_receiver: StopReceiver) -> anyhow::Result<()> { + Ok(()) + } +} + +#[derive(IntoContext)] +struct SimpleStruct { + _field: ResourceA, + #[context(task, default)] + _field_2: TaskA, +} + +fn main() {} diff --git a/core/node/node_framework/tests/ui/incorrect/02_into_context_default_task.stderr b/core/node/node_framework/tests/ui/incorrect/02_into_context_default_task.stderr new file mode 100644 index 000000000000..b1a751f45dbb --- /dev/null +++ b/core/node/node_framework/tests/ui/incorrect/02_into_context_default_task.stderr @@ -0,0 +1,5 @@ +error: `default` attribute is not allowed in `IntoContext` macro + --> tests/ui/incorrect/02_into_context_default_task.rs:32:5 + | +32 | _field_2: TaskA, + | ^^^^^^^^ diff --git a/core/node/node_framework/tests/ui/incorrect/03_into_context_default_resource.rs b/core/node/node_framework/tests/ui/incorrect/03_into_context_default_resource.rs new file mode 100644 index 000000000000..3f815b830eb5 --- /dev/null +++ b/core/node/node_framework/tests/ui/incorrect/03_into_context_default_resource.rs @@ -0,0 +1,35 @@ +#![allow(dead_code)] + +use zksync_node_framework::{IntoContext, Resource, StopReceiver, Task, TaskId}; + +#[derive(Clone)] +struct ResourceA; + +impl Resource for ResourceA { + fn name() -> String { + "a".to_string() + } +} + +struct TaskA; + +#[async_trait::async_trait] +impl Task for TaskA { + fn id(&self) -> TaskId { + "batch_status_updater".into() + } + + async fn run(self: Box, _stop_receiver: StopReceiver) -> anyhow::Result<()> { + Ok(()) + } +} + +#[derive(IntoContext)] +struct SimpleStruct { + #[context(default)] + _field: ResourceA, + #[context(task)] + _field_2: TaskA, +} + +fn main() {} diff --git a/core/node/node_framework/tests/ui/incorrect/03_into_context_default_resource.stderr b/core/node/node_framework/tests/ui/incorrect/03_into_context_default_resource.stderr new file mode 100644 index 000000000000..e69da3ad9bba --- /dev/null +++ b/core/node/node_framework/tests/ui/incorrect/03_into_context_default_resource.stderr @@ -0,0 +1,5 @@ +error: `default` attribute is not allowed in `IntoContext` macro + --> tests/ui/incorrect/03_into_context_default_resource.rs:30:5 + | +30 | _field: ResourceA, + | ^^^^^^ diff --git a/core/node/node_framework/tests/ui/incorrect/04_field_crate_attr.rs b/core/node/node_framework/tests/ui/incorrect/04_field_crate_attr.rs new file mode 100644 index 000000000000..48c17222333f --- /dev/null +++ b/core/node/node_framework/tests/ui/incorrect/04_field_crate_attr.rs @@ -0,0 +1,48 @@ +#![allow(dead_code)] + +use zksync_node_framework::{IntoContext, Resource, StopReceiver, Task, TaskId}; + +#[derive(Clone)] +struct ResourceA; + +impl Resource for ResourceA { + fn name() -> String { + "a".to_string() + } +} + +struct TaskA; + +#[async_trait::async_trait] +impl Task for TaskA { + fn id(&self) -> TaskId { + "batch_status_updater".into() + } + + async fn run(self: Box, _stop_receiver: StopReceiver) -> anyhow::Result<()> { + Ok(()) + } +} + +#[derive(IntoContext)] +struct SimpleStruct { + #[context(crate = a)] + _field: ResourceA, +} + +#[derive(IntoContext)] +struct SimpleStruct2 { + #[context(crate = b)] + _field: ResourceA, + #[context(task)] + _field_2: TaskA, +} + +#[derive(IntoContext)] +struct SimpleStruct3 { + _field: ResourceA, + #[context(task, crate = c)] + _field_2: TaskA, +} + +fn main() {} diff --git a/core/node/node_framework/tests/ui/incorrect/04_field_crate_attr.stderr b/core/node/node_framework/tests/ui/incorrect/04_field_crate_attr.stderr new file mode 100644 index 000000000000..6346c4cb7e9e --- /dev/null +++ b/core/node/node_framework/tests/ui/incorrect/04_field_crate_attr.stderr @@ -0,0 +1,17 @@ +error: `crate` attribute is not allowed for fields + --> tests/ui/incorrect/04_field_crate_attr.rs:30:5 + | +30 | _field: ResourceA, + | ^^^^^^ + +error: `crate` attribute is not allowed for fields + --> tests/ui/incorrect/04_field_crate_attr.rs:36:5 + | +36 | _field: ResourceA, + | ^^^^^^ + +error: `crate` attribute is not allowed for fields + --> tests/ui/incorrect/04_field_crate_attr.rs:45:5 + | +45 | _field_2: TaskA, + | ^^^^^^^^ diff --git a/core/node/node_storage_init/Cargo.toml b/core/node/node_storage_init/Cargo.toml new file mode 100644 index 000000000000..3a1e8b291156 --- /dev/null +++ b/core/node/node_storage_init/Cargo.toml @@ -0,0 +1,30 @@ +[package] +name = "zksync_node_storage_init" +description = "ZKsync node storage initialization" +version.workspace = true +edition.workspace = true +authors.workspace = true +homepage.workspace = true +repository.workspace = true +license.workspace = true +keywords.workspace = true +categories.workspace = true + +[dependencies] +zksync_config.workspace = true +zksync_dal.workspace = true +zksync_health_check.workspace = true +zksync_node_sync.workspace = true +zksync_node_genesis.workspace = true +zksync_object_store.workspace = true +zksync_shared_metrics.workspace = true +zksync_snapshots_applier.workspace = true +zksync_types.workspace = true +zksync_web3_decl.workspace = true +zksync_reorg_detector.workspace = true +zksync_block_reverter.workspace = true + +anyhow.workspace = true +async-trait.workspace = true +tokio.workspace = true +tracing.workspace = true diff --git a/core/node/node_storage_init/README.md b/core/node/node_storage_init/README.md new file mode 100644 index 000000000000..e1b6768878ec --- /dev/null +++ b/core/node/node_storage_init/README.md @@ -0,0 +1,5 @@ +# `zksync_node_storage_init` + +A set of actions to ensure that any ZKsync node has initialized storage and can start running. + +This includes genesis, but not limited to it, and may involve other steps. diff --git a/core/node/node_storage_init/src/external_node/genesis.rs b/core/node/node_storage_init/src/external_node/genesis.rs new file mode 100644 index 000000000000..b7a7efa9cf53 --- /dev/null +++ b/core/node/node_storage_init/src/external_node/genesis.rs @@ -0,0 +1,39 @@ +use anyhow::Context as _; +use tokio::sync::watch; +use zksync_dal::{ConnectionPool, Core}; +use zksync_types::L2ChainId; +use zksync_web3_decl::client::{DynClient, L2}; + +use crate::InitializeStorage; + +#[derive(Debug)] +pub struct ExternalNodeGenesis { + pub l2_chain_id: L2ChainId, + pub client: Box>, + pub pool: ConnectionPool, +} + +#[async_trait::async_trait] +impl InitializeStorage for ExternalNodeGenesis { + /// Will perform genesis initialization if it's required. + /// If genesis is already performed, this method will do nothing. + async fn initialize_storage( + &self, + _stop_receiver: watch::Receiver, + ) -> anyhow::Result<()> { + let mut storage = self.pool.connection_tagged("en").await?; + zksync_node_sync::genesis::perform_genesis_if_needed( + &mut storage, + self.l2_chain_id, + &self.client.clone().for_component("genesis"), + ) + .await + .context("performing genesis failed") + } + + async fn is_initialized(&self) -> anyhow::Result { + let mut storage = self.pool.connection_tagged("en").await?; + let needed = zksync_node_sync::genesis::is_genesis_needed(&mut storage).await?; + Ok(!needed) + } +} diff --git a/core/node/node_storage_init/src/external_node/mod.rs b/core/node/node_storage_init/src/external_node/mod.rs new file mode 100644 index 000000000000..b04635bf3ccd --- /dev/null +++ b/core/node/node_storage_init/src/external_node/mod.rs @@ -0,0 +1,8 @@ +pub use self::{ + genesis::ExternalNodeGenesis, revert::ExternalNodeReverter, + snapshot_recovery::ExternalNodeSnapshotRecovery, +}; + +mod genesis; +mod revert; +mod snapshot_recovery; diff --git a/core/node/node_storage_init/src/external_node/revert.rs b/core/node/node_storage_init/src/external_node/revert.rs new file mode 100644 index 000000000000..86d137c6b660 --- /dev/null +++ b/core/node/node_storage_init/src/external_node/revert.rs @@ -0,0 +1,56 @@ +use anyhow::Context as _; +use tokio::sync::watch; +use zksync_block_reverter::BlockReverter; +use zksync_dal::{ConnectionPool, Core}; +use zksync_reorg_detector::ReorgDetector; +use zksync_types::L1BatchNumber; +use zksync_web3_decl::client::{DynClient, L2}; + +use crate::RevertStorage; + +#[derive(Debug)] +pub struct ExternalNodeReverter { + pub client: Box>, + pub pool: ConnectionPool, + pub reverter: Option, +} + +#[async_trait::async_trait] +impl RevertStorage for ExternalNodeReverter { + async fn revert_storage( + &self, + to_batch: L1BatchNumber, + _stop_receiver: watch::Receiver, + ) -> anyhow::Result<()> { + let Some(block_reverter) = self.reverter.as_ref() else { + anyhow::bail!( + "Revert to block {to_batch} was requested, but the reverter was not provided." + ); + }; + + tracing::info!("Reverting to l1 batch number {to_batch}"); + block_reverter.roll_back(to_batch).await?; + tracing::info!("Revert successfully completed"); + Ok(()) + } + + async fn last_correct_batch_for_reorg( + &self, + stop_receiver: watch::Receiver, + ) -> anyhow::Result> { + let mut reorg_detector = ReorgDetector::new(self.client.clone(), self.pool.clone()); + let batch = match reorg_detector.run_once(stop_receiver).await { + Ok(()) => { + // Even if stop signal was received, the node will shut down without launching any tasks. + tracing::info!("No rollback was detected"); + None + } + Err(zksync_reorg_detector::Error::ReorgDetected(last_correct_l1_batch)) => { + tracing::info!("Reverting to l1 batch number {last_correct_l1_batch}"); + Some(last_correct_l1_batch) + } + Err(err) => return Err(err).context("reorg_detector.check_consistency()"), + }; + Ok(batch) + } +} diff --git a/core/node/node_storage_init/src/external_node/snapshot_recovery.rs b/core/node/node_storage_init/src/external_node/snapshot_recovery.rs new file mode 100644 index 000000000000..d9ba60a1bcbf --- /dev/null +++ b/core/node/node_storage_init/src/external_node/snapshot_recovery.rs @@ -0,0 +1,82 @@ +use std::{sync::Arc, time::Instant}; + +use anyhow::Context as _; +use tokio::sync::watch; +use zksync_dal::{ConnectionPool, Core}; +use zksync_health_check::AppHealthCheck; +use zksync_object_store::ObjectStoreFactory; +use zksync_shared_metrics::{SnapshotRecoveryStage, APP_METRICS}; +use zksync_snapshots_applier::{ + RecoveryCompletionStatus, SnapshotsApplierConfig, SnapshotsApplierTask, +}; +use zksync_web3_decl::client::{DynClient, L2}; + +use crate::{InitializeStorage, SnapshotRecoveryConfig}; + +#[derive(Debug)] +pub struct ExternalNodeSnapshotRecovery { + pub client: Box>, + pub pool: ConnectionPool, + pub recovery_config: SnapshotRecoveryConfig, + pub app_health: Arc, +} + +#[async_trait::async_trait] +impl InitializeStorage for ExternalNodeSnapshotRecovery { + async fn initialize_storage(&self, stop_receiver: watch::Receiver) -> anyhow::Result<()> { + let pool = self.pool.clone(); + tracing::warn!("Proceeding with snapshot recovery. This is an experimental feature; use at your own risk"); + let object_store_config = + self.recovery_config.object_store_config.clone().context( + "Snapshot object store must be presented if snapshot recovery is activated", + )?; + let object_store = ObjectStoreFactory::new(object_store_config) + .create_store() + .await?; + + let config = SnapshotsApplierConfig::default(); + let mut snapshots_applier_task = SnapshotsApplierTask::new( + config, + pool, + Box::new(self.client.clone().for_component("snapshot_recovery")), + object_store, + ); + if let Some(snapshot_l1_batch) = self.recovery_config.snapshot_l1_batch_override { + tracing::info!( + "Using a specific snapshot with L1 batch #{snapshot_l1_batch}; this may not work \ + if the snapshot is too old (order of several weeks old) or non-existent" + ); + snapshots_applier_task.set_snapshot_l1_batch(snapshot_l1_batch); + } + if self.recovery_config.drop_storage_key_preimages { + tracing::info!("Dropping storage key preimages for snapshot storage logs"); + snapshots_applier_task.drop_storage_key_preimages(); + } + self.app_health + .insert_component(snapshots_applier_task.health_check())?; + + let recovery_started_at = Instant::now(); + let stats = snapshots_applier_task + .run(stop_receiver) + .await + .context("snapshot recovery failed")?; + if stats.done_work { + let latency = recovery_started_at.elapsed(); + APP_METRICS.snapshot_recovery_latency[&SnapshotRecoveryStage::Postgres].set(latency); + tracing::info!("Recovered Postgres from snapshot in {latency:?}"); + } + // We don't really care if the task was canceled. + // If it was, all the other tasks are canceled as well. + + Ok(()) + } + + async fn is_initialized(&self) -> anyhow::Result { + let mut storage = self.pool.connection_tagged("en").await?; + let completed = matches!( + SnapshotsApplierTask::is_recovery_completed(&mut storage, &self.client).await?, + RecoveryCompletionStatus::Completed + ); + Ok(completed) + } +} diff --git a/core/node/node_storage_init/src/lib.rs b/core/node/node_storage_init/src/lib.rs new file mode 100644 index 000000000000..10b0131908ca --- /dev/null +++ b/core/node/node_storage_init/src/lib.rs @@ -0,0 +1,213 @@ +use std::{future::Future, sync::Arc, time::Duration}; + +use tokio::sync::watch; +use zksync_config::ObjectStoreConfig; +use zksync_dal::{ConnectionPool, Core, CoreDal as _}; +use zksync_types::L1BatchNumber; + +pub use crate::traits::{InitializeStorage, RevertStorage}; + +pub mod external_node; +pub mod main_node; +mod traits; + +#[derive(Debug)] +pub struct SnapshotRecoveryConfig { + /// If not specified, the latest snapshot will be used. + pub snapshot_l1_batch_override: Option, + pub drop_storage_key_preimages: bool, + pub object_store_config: Option, +} + +#[derive(Debug, Clone, Copy)] +enum InitDecision { + /// Perform or check genesis. + Genesis, + /// Perform or check snapshot recovery. + SnapshotRecovery, +} + +#[derive(Debug, Clone)] +pub struct NodeInitializationStrategy { + pub genesis: Arc, + pub snapshot_recovery: Option>, + pub block_reverter: Option>, +} + +/// Node storage initializer. +/// This structure is responsible for making sure that the node storage is initialized. +/// +/// This structure operates together with [`NodeRole`] to achieve that: +/// `NodeStorageInitializer` understands what does initialized storage mean, but it defers +/// any actual initialization to the `NodeRole` implementation. This allows to have different +/// initialization strategies for different node types, while keeping common invariants +/// for the whole system. +#[derive(Debug)] +pub struct NodeStorageInitializer { + strategy: NodeInitializationStrategy, + pool: ConnectionPool, +} + +impl NodeStorageInitializer { + pub fn new(strategy: NodeInitializationStrategy, pool: ConnectionPool) -> Self { + Self { strategy, pool } + } + + /// Returns the preferred kind of storage initialization. + /// The decision is based on the current state of the storage. + /// Note that the decision does not guarantee that the initialization has not been performed + /// already, so any returned decision should be checked before performing the initialization. + async fn decision(&self) -> anyhow::Result { + let mut storage = self.pool.connection_tagged("node_init").await?; + let genesis_l1_batch = storage + .blocks_dal() + .get_l1_batch_header(L1BatchNumber(0)) + .await?; + let snapshot_recovery = storage + .snapshot_recovery_dal() + .get_applied_snapshot_status() + .await?; + drop(storage); + + let decision = match (genesis_l1_batch, snapshot_recovery) { + (Some(batch), Some(snapshot_recovery)) => { + anyhow::bail!( + "Node has both genesis L1 batch: {batch:?} and snapshot recovery information: {snapshot_recovery:?}. \ + This is not supported and can be caused by broken snapshot recovery." + ); + } + (Some(batch), None) => { + tracing::info!( + "Node has a genesis L1 batch: {batch:?} and no snapshot recovery info" + ); + InitDecision::Genesis + } + (None, Some(snapshot_recovery)) => { + tracing::info!("Node has no genesis L1 batch and snapshot recovery information: {snapshot_recovery:?}"); + InitDecision::SnapshotRecovery + } + (None, None) => { + tracing::info!("Node has neither genesis L1 batch, nor snapshot recovery info"); + if self.strategy.snapshot_recovery.is_some() { + InitDecision::SnapshotRecovery + } else { + InitDecision::Genesis + } + } + }; + Ok(decision) + } + + /// Initializes the storage for the node. + /// After the initialization, the node can safely start operating. + pub async fn run(self, stop_receiver: watch::Receiver) -> anyhow::Result<()> { + let decision = self.decision().await?; + + // Make sure that we have state to work with. + match decision { + InitDecision::Genesis => { + tracing::info!("Performing genesis initialization"); + self.strategy + .genesis + .initialize_storage(stop_receiver.clone()) + .await?; + } + InitDecision::SnapshotRecovery => { + tracing::info!("Performing snapshot recovery initialization"); + if let Some(recovery) = &self.strategy.snapshot_recovery { + recovery.initialize_storage(stop_receiver.clone()).await?; + } else { + anyhow::bail!( + "Snapshot recovery should be performed, but the strategy is not provided" + ); + } + } + } + + // Now we may check whether we're in the invalid state and should perform a rollback. + if let Some(reverter) = &self.strategy.block_reverter { + if let Some(to_batch) = reverter + .last_correct_batch_for_reorg(stop_receiver.clone()) + .await? + { + tracing::info!(l1_batch = %to_batch, "State must be rolled back to L1 batch"); + tracing::info!("Performing the rollback"); + reverter.revert_storage(to_batch, stop_receiver).await?; + } + } + + Ok(()) + } + + /// Checks if the node can safely start operating. + pub async fn wait_for_initialized_storage( + &self, + stop_receiver: watch::Receiver, + ) -> anyhow::Result<()> { + const POLLING_INTERVAL: Duration = Duration::from_secs(1); + + // Wait until data is added to the database. + poll(stop_receiver.clone(), POLLING_INTERVAL, || { + self.is_database_initialized() + }) + .await?; + if *stop_receiver.borrow() { + return Ok(()); + } + + // Wait until the rollback is no longer needed. + poll(stop_receiver.clone(), POLLING_INTERVAL, || { + self.is_chain_tip_correct(stop_receiver.clone()) + }) + .await?; + + Ok(()) + } + + async fn is_database_initialized(&self) -> anyhow::Result { + // We're fine if the database is initialized in any meaningful way we can check. + if self.strategy.genesis.is_initialized().await? { + return Ok(true); + } + if let Some(snapshot_recovery) = &self.strategy.snapshot_recovery { + return snapshot_recovery.is_initialized().await; + } + Ok(false) + } + + /// Checks if the head of the chain has correct state, e.g. no rollback needed. + async fn is_chain_tip_correct( + &self, + stop_receiver: watch::Receiver, + ) -> anyhow::Result { + // May be `true` if stop signal is received, but the node will shut down without launching any tasks anyway. + let initialized = if let Some(reverter) = &self.strategy.block_reverter { + reverter + .last_correct_batch_for_reorg(stop_receiver) + .await? + .is_none() + } else { + true + }; + Ok(initialized) + } +} + +async fn poll( + mut stop_receiver: watch::Receiver, + polling_interval: Duration, + mut check: F, +) -> anyhow::Result<()> +where + F: FnMut() -> Fut, + Fut: Future>, +{ + while !*stop_receiver.borrow() && !check().await? { + // Return value will be checked on the next iteration. + tokio::time::timeout(polling_interval, stop_receiver.changed()) + .await + .ok(); + } + + Ok(()) +} diff --git a/core/node/node_storage_init/src/main_node/genesis.rs b/core/node/node_storage_init/src/main_node/genesis.rs new file mode 100644 index 000000000000..db2eef51912e --- /dev/null +++ b/core/node/node_storage_init/src/main_node/genesis.rs @@ -0,0 +1,54 @@ +use anyhow::Context as _; +use tokio::sync::watch; +use zksync_config::{ContractsConfig, GenesisConfig}; +use zksync_dal::{ConnectionPool, Core, CoreDal as _}; +use zksync_node_genesis::GenesisParams; +use zksync_web3_decl::client::{DynClient, L1}; + +use crate::traits::InitializeStorage; + +#[derive(Debug)] +pub struct MainNodeGenesis { + pub genesis: GenesisConfig, + pub contracts: ContractsConfig, + pub l1_client: Box>, + pub pool: ConnectionPool, +} + +#[async_trait::async_trait] +impl InitializeStorage for MainNodeGenesis { + /// Will perform genesis initialization if it's required. + /// If genesis is already performed, this method will do nothing. + async fn initialize_storage( + &self, + _stop_receiver: watch::Receiver, + ) -> anyhow::Result<()> { + let mut storage = self.pool.connection_tagged("genesis").await?; + + if !storage.blocks_dal().is_genesis_needed().await? { + return Ok(()); + } + + let params = GenesisParams::load_genesis_params(self.genesis.clone())?; + zksync_node_genesis::ensure_genesis_state(&mut storage, ¶ms).await?; + + if let Some(ecosystem_contracts) = &self.contracts.ecosystem_contracts { + zksync_node_genesis::save_set_chain_id_tx( + &mut storage, + &self.l1_client, + self.contracts.diamond_proxy_addr, + ecosystem_contracts.state_transition_proxy_addr, + ) + .await + .context("Failed to save SetChainId upgrade transaction")?; + } + + Ok(()) + } + + async fn is_initialized(&self) -> anyhow::Result { + let mut storage = self.pool.connection_tagged("genesis").await?; + let needed = zksync_node_genesis::is_genesis_needed(&mut storage).await?; + Ok(!needed) + } +} diff --git a/core/node/node_storage_init/src/main_node/mod.rs b/core/node/node_storage_init/src/main_node/mod.rs new file mode 100644 index 000000000000..4254e7b08d87 --- /dev/null +++ b/core/node/node_storage_init/src/main_node/mod.rs @@ -0,0 +1,3 @@ +pub use self::genesis::MainNodeGenesis; + +mod genesis; diff --git a/core/node/node_storage_init/src/traits.rs b/core/node/node_storage_init/src/traits.rs new file mode 100644 index 000000000000..3b6467764d97 --- /dev/null +++ b/core/node/node_storage_init/src/traits.rs @@ -0,0 +1,33 @@ +use std::fmt; + +use tokio::sync::watch; +use zksync_types::L1BatchNumber; + +/// An abstract storage initialization strategy. +#[async_trait::async_trait] +pub trait InitializeStorage: fmt::Debug + Send + Sync + 'static { + /// Checks if the storage is already initialized. + async fn is_initialized(&self) -> anyhow::Result; + + /// Initializes the storage. + /// Implementors of this method may assume that they have unique access to the storage. + async fn initialize_storage(&self, stop_receiver: watch::Receiver) -> anyhow::Result<()>; +} + +/// An abstract storage revert strategy. +/// This trait assumes that for any invalid state there exists a batch number to which the storage can be rolled back. +#[async_trait::async_trait] +pub trait RevertStorage: fmt::Debug + Send + Sync + 'static { + /// Checks if the storage is invalid state and has to be rolled back. + async fn last_correct_batch_for_reorg( + &self, + stop_receiver: watch::Receiver, + ) -> anyhow::Result>; + + /// Reverts the storage to the provided batch number. + async fn revert_storage( + &self, + to_batch: L1BatchNumber, + stop_receiver: watch::Receiver, + ) -> anyhow::Result<()>; +} diff --git a/core/node/node_sync/Cargo.toml b/core/node/node_sync/Cargo.toml index 58eec35a630c..5f1ae04c5f50 100644 --- a/core/node/node_sync/Cargo.toml +++ b/core/node/node_sync/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "zksync_node_sync" -version = "0.1.0" +description = "ZKsync node synchronization utilities" +version.workspace = true edition.workspace = true authors.workspace = true homepage.workspace = true @@ -24,7 +25,7 @@ zksync_utils.workspace = true zksync_eth_client.workspace = true zksync_concurrency.workspace = true vise.workspace = true -vm_utils.workspace = true +zksync_vm_utils.workspace = true anyhow.workspace = true async-trait.workspace = true diff --git a/core/node/node_sync/src/external_io.rs b/core/node/node_sync/src/external_io.rs index 690d38f620a0..50734421341e 100644 --- a/core/node/node_sync/src/external_io.rs +++ b/core/node/node_sync/src/external_io.rs @@ -2,7 +2,6 @@ use std::{collections::HashMap, time::Duration}; use anyhow::Context as _; use async_trait::async_trait; -use vm_utils::storage::L1BatchParamsProvider; use zksync_contracts::{BaseSystemContracts, BaseSystemContractsHashes, SystemContractCode}; use zksync_dal::{ConnectionPool, Core, CoreDal}; use zksync_state_keeper::{ @@ -21,6 +20,7 @@ use zksync_types::{ L1BatchNumber, L2BlockNumber, L2ChainId, ProtocolVersionId, Transaction, H256, }; use zksync_utils::bytes_to_be_words; +use zksync_vm_utils::storage::L1BatchParamsProvider; use super::{ client::MainNodeClient, @@ -43,18 +43,13 @@ pub struct ExternalIO { } impl ExternalIO { - pub async fn new( + pub fn new( pool: ConnectionPool, actions: ActionQueue, main_node_client: Box, chain_id: L2ChainId, ) -> anyhow::Result { - let mut storage = pool.connection_tagged("sync_layer").await?; - let l1_batch_params_provider = L1BatchParamsProvider::new(&mut storage) - .await - .context("failed initializing L1 batch params provider")?; - drop(storage); - + let l1_batch_params_provider = L1BatchParamsProvider::new(); Ok(Self { pool, l1_batch_params_provider, @@ -137,6 +132,10 @@ impl StateKeeperIO for ExternalIO { async fn initialize(&mut self) -> anyhow::Result<(IoCursor, Option)> { let mut storage = self.pool.connection_tagged("sync_layer").await?; let cursor = IoCursor::new(&mut storage).await?; + self.l1_batch_params_provider + .initialize(&mut storage) + .await + .context("failed initializing L1 batch params provider")?; tracing::info!( "Initialized the ExternalIO: current L1 batch number {}, current L2 block number {}", cursor.l1_batch, diff --git a/core/node/node_sync/src/genesis.rs b/core/node/node_sync/src/genesis.rs index c1b45f8ade93..ccc26b417e98 100644 --- a/core/node/node_sync/src/genesis.rs +++ b/core/node/node_sync/src/genesis.rs @@ -8,6 +8,10 @@ use zksync_types::{ use super::client::MainNodeClient; +pub async fn is_genesis_needed(storage: &mut Connection<'_, Core>) -> anyhow::Result { + Ok(storage.blocks_dal().is_genesis_needed().await?) +} + pub async fn perform_genesis_if_needed( storage: &mut Connection<'_, Core>, zksync_chain_id: L2ChainId, diff --git a/core/node/node_sync/src/sync_action.rs b/core/node/node_sync/src/sync_action.rs index 09d49943a454..8cb90d24fe84 100644 --- a/core/node/node_sync/src/sync_action.rs +++ b/core/node/node_sync/src/sync_action.rs @@ -13,20 +13,30 @@ impl ActionQueueSender { /// Requires that the actions are in the correct order: starts with a new open L1 batch / L2 block, /// followed by 0 or more transactions, have mandatory `SealL2Block` and optional `SealBatch` at the end. /// Would panic if the order is incorrect. - pub async fn push_actions(&self, actions: Vec) { - Self::check_action_sequence(&actions).unwrap(); + /// + /// # Errors + /// + /// Errors correspond to incorrect action order, or to `ExternalIO` instance that the queue is connected to shutting down. + /// Hence, returned errors must be treated as unrecoverable by the caller; it is unsound to continue + /// operating a node if some of the `actions` may be lost. + pub async fn push_actions(&self, actions: Vec) -> anyhow::Result<()> { + Self::check_action_sequence(&actions)?; for action in actions { - self.0.send(action).await.expect("EN sync logic panicked"); + self.0 + .send(action) + .await + .map_err(|_| anyhow::anyhow!("node action processor stopped"))?; QUEUE_METRICS .action_queue_size .set(self.0.max_capacity() - self.0.capacity()); } + Ok(()) } /// Checks whether the action sequence is valid. /// Returned error is meant to be used as a panic message, since an invalid sequence represents an unrecoverable /// error. This function itself does not panic for the ease of testing. - fn check_action_sequence(actions: &[SyncAction]) -> Result<(), String> { + fn check_action_sequence(actions: &[SyncAction]) -> anyhow::Result<()> { // Rules for the sequence: // 1. Must start with either `OpenBatch` or `L2Block`, both of which may be met only once. // 2. Followed by a sequence of `Tx` actions which consists of 0 or more elements. @@ -38,27 +48,22 @@ impl ActionQueueSender { for action in actions { match action { SyncAction::OpenBatch { .. } | SyncAction::L2Block { .. } => { - if opened { - return Err(format!("Unexpected OpenBatch / L2Block: {actions:?}")); - } + anyhow::ensure!(!opened, "Unexpected OpenBatch / L2Block: {actions:?}"); opened = true; } SyncAction::Tx(_) => { - if !opened || l2_block_sealed { - return Err(format!("Unexpected Tx: {actions:?}")); - } + anyhow::ensure!(opened && !l2_block_sealed, "Unexpected Tx: {actions:?}"); } SyncAction::SealL2Block | SyncAction::SealBatch => { - if !opened || l2_block_sealed { - return Err(format!("Unexpected SealL2Block / SealBatch: {actions:?}")); - } + anyhow::ensure!( + opened && !l2_block_sealed, + "Unexpected SealL2Block / SealBatch: {actions:?}" + ); l2_block_sealed = true; } } } - if !l2_block_sealed { - return Err(format!("Incomplete sequence: {actions:?}")); - } + anyhow::ensure!(l2_block_sealed, "Incomplete sequence: {actions:?}"); Ok(()) } } @@ -287,7 +292,7 @@ mod tests { panic!("Invalid sequence passed the test. Sequence #{idx}, expected error: {expected_err}"); }; assert!( - err.starts_with(expected_err), + err.to_string().contains(expected_err), "Sequence #{idx} failed. Expected error: {expected_err}, got: {err}" ); } diff --git a/core/node/node_sync/src/tests.rs b/core/node/node_sync/src/tests.rs index 9830641a9fa1..510f9124c297 100644 --- a/core/node/node_sync/src/tests.rs +++ b/core/node/node_sync/src/tests.rs @@ -118,7 +118,6 @@ impl StateKeeperHandles { Box::new(main_node_client), L2ChainId::default(), ) - .await .unwrap(); let (stop_sender, stop_receiver) = watch::channel(false); @@ -230,7 +229,7 @@ async fn external_io_basics(snapshot_recovery: bool) { &[&extract_tx_hashes(&actions)], ) .await; - actions_sender.push_actions(actions).await; + actions_sender.push_actions(actions).await.unwrap(); // Wait until the L2 block is sealed. state_keeper .wait_for_local_block(snapshot.l2_block_number + 1) @@ -316,7 +315,7 @@ async fn external_io_works_without_local_protocol_version(snapshot_recovery: boo &[&extract_tx_hashes(&actions)], ) .await; - actions_sender.push_actions(actions).await; + actions_sender.push_actions(actions).await.unwrap(); // Wait until the L2 block is sealed. state_keeper .wait_for_local_block(snapshot.l2_block_number + 1) @@ -407,8 +406,14 @@ pub(super) async fn run_state_keeper_with_multiple_l2_blocks( let (actions_sender, action_queue) = ActionQueue::new(); let client = MockMainNodeClient::default(); let state_keeper = StateKeeperHandles::new(pool, client, action_queue, &[&tx_hashes]).await; - actions_sender.push_actions(first_l2_block_actions).await; - actions_sender.push_actions(second_l2_block_actions).await; + actions_sender + .push_actions(first_l2_block_actions) + .await + .unwrap(); + actions_sender + .push_actions(second_l2_block_actions) + .await + .unwrap(); // Wait until both L2 blocks are sealed. state_keeper .wait_for_local_block(snapshot.l2_block_number + 2) @@ -490,7 +495,7 @@ async fn test_external_io_recovery( number: snapshot.l2_block_number + 3, }; let actions = vec![open_l2_block, new_tx.into(), SyncAction::SealL2Block]; - actions_sender.push_actions(actions).await; + actions_sender.push_actions(actions).await.unwrap(); state_keeper .wait_for_local_block(snapshot.l2_block_number + 3) .await; @@ -580,9 +585,18 @@ pub(super) async fn run_state_keeper_with_multiple_l1_batches( &[&[first_tx_hash], &[second_tx_hash]], ) .await; - actions_sender.push_actions(first_l1_batch_actions).await; - actions_sender.push_actions(fictive_l2_block_actions).await; - actions_sender.push_actions(second_l1_batch_actions).await; + actions_sender + .push_actions(first_l1_batch_actions) + .await + .unwrap(); + actions_sender + .push_actions(fictive_l2_block_actions) + .await + .unwrap(); + actions_sender + .push_actions(second_l1_batch_actions) + .await + .unwrap(); let hash_task = tokio::spawn(mock_l1_batch_hash_computation( pool.clone(), diff --git a/core/node/node_sync/src/tree_data_fetcher/tests.rs b/core/node/node_sync/src/tree_data_fetcher/tests.rs index 5d94ddf658d6..5cb8b9241b24 100644 --- a/core/node/node_sync/src/tree_data_fetcher/tests.rs +++ b/core/node/node_sync/src/tree_data_fetcher/tests.rs @@ -85,7 +85,8 @@ pub(super) async fn seal_l1_batch_with_timestamp( let initial_writes = [StorageKey::new( AccountTreeId::new(Address::repeat_byte(1)), H256::from_low_u64_be(number.0.into()), - )]; + ) + .hashed_key()]; transaction .storage_logs_dedup_dal() .insert_initial_writes(number, &initial_writes) diff --git a/core/node/proof_data_handler/Cargo.toml b/core/node/proof_data_handler/Cargo.toml index 301ce0df6a80..31a0e8437ba5 100644 --- a/core/node/proof_data_handler/Cargo.toml +++ b/core/node/proof_data_handler/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "zksync_proof_data_handler" -version = "0.1.0" +description = "ZKsync proof data handler API" +version.workspace = true edition.workspace = true authors.workspace = true homepage.workspace = true @@ -10,6 +11,7 @@ keywords.workspace = true categories.workspace = true [dependencies] +vise.workspace = true zksync_config.workspace = true zksync_dal.workspace = true zksync_object_store.workspace = true @@ -24,7 +26,7 @@ tracing.workspace = true [dev-dependencies] hyper.workspace = true chrono.workspace = true -multivm.workspace = true +zksync_multivm.workspace = true serde_json.workspace = true tower.workspace = true zksync_basic_types.workspace = true diff --git a/core/node/proof_data_handler/src/lib.rs b/core/node/proof_data_handler/src/lib.rs index 5a3cb2d95b6a..618a786ea658 100644 --- a/core/node/proof_data_handler/src/lib.rs +++ b/core/node/proof_data_handler/src/lib.rs @@ -18,6 +18,7 @@ use zksync_types::commitment::L1BatchCommitmentMode; mod tests; mod errors; +mod metrics; mod request_processor; mod tee_request_processor; @@ -32,8 +33,10 @@ pub async fn run_server( tracing::debug!("Starting proof data handler server on {bind_address}"); let app = create_proof_processing_router(blob_store, connection_pool, config, commitment_mode); - axum::Server::bind(&bind_address) - .serve(app.into_make_service()) + let listener = tokio::net::TcpListener::bind(bind_address) + .await + .with_context(|| format!("Failed binding proof data handler server to {bind_address}"))?; + axum::serve(listener, app) .with_graceful_shutdown(async move { if stop_receiver.changed().await.is_err() { tracing::warn!("Stop signal sender for proof data handler server was dropped without sending a signal"); diff --git a/core/node/proof_data_handler/src/metrics.rs b/core/node/proof_data_handler/src/metrics.rs new file mode 100644 index 000000000000..edccda90dc24 --- /dev/null +++ b/core/node/proof_data_handler/src/metrics.rs @@ -0,0 +1,41 @@ +use vise::{Histogram, Metrics}; +use zksync_object_store::bincode; +use zksync_prover_interface::inputs::WitnessInputData; + +const BYTES_IN_MEGABYTE: u64 = 1024 * 1024; + +#[derive(Debug, Metrics)] +pub(super) struct ProofDataHandlerMetrics { + #[metrics(buckets = vise::Buckets::exponential(1.0..=2_048.0, 2.0))] + pub vm_run_data_blob_size_in_mb: Histogram, + #[metrics(buckets = vise::Buckets::exponential(1.0..=2_048.0, 2.0))] + pub merkle_paths_blob_size_in_mb: Histogram, + #[metrics(buckets = vise::Buckets::exponential(1.0..=2_048.0, 2.0))] + pub eip_4844_blob_size_in_mb: Histogram, + #[metrics(buckets = vise::Buckets::exponential(1.0..=2_048.0, 2.0))] + pub total_blob_size_in_mb: Histogram, +} + +impl ProofDataHandlerMetrics { + pub fn observe_blob_sizes(&self, blob: &WitnessInputData) { + let vm_run_data_blob_size_in_mb = + bincode::serialize(&blob.vm_run_data).unwrap().len() as u64 / BYTES_IN_MEGABYTE; + let merkle_paths_blob_size_in_mb = + bincode::serialize(&blob.merkle_paths).unwrap().len() as u64 / BYTES_IN_MEGABYTE; + let eip_4844_blob_size_in_mb = + bincode::serialize(&blob.eip_4844_blobs).unwrap().len() as u64 / BYTES_IN_MEGABYTE; + let total_blob_size_in_mb = + bincode::serialize(blob).unwrap().len() as u64 / BYTES_IN_MEGABYTE; + + self.vm_run_data_blob_size_in_mb + .observe(vm_run_data_blob_size_in_mb); + self.merkle_paths_blob_size_in_mb + .observe(merkle_paths_blob_size_in_mb); + self.eip_4844_blob_size_in_mb + .observe(eip_4844_blob_size_in_mb); + self.total_blob_size_in_mb.observe(total_blob_size_in_mb); + } +} + +#[vise::register] +pub(super) static METRICS: vise::Global = vise::Global::new(); diff --git a/core/node/proof_data_handler/src/request_processor.rs b/core/node/proof_data_handler/src/request_processor.rs index 170b27bb971f..a89f9b63a848 100644 --- a/core/node/proof_data_handler/src/request_processor.rs +++ b/core/node/proof_data_handler/src/request_processor.rs @@ -4,9 +4,14 @@ use axum::{extract::Path, Json}; use zksync_config::configs::ProofDataHandlerConfig; use zksync_dal::{ConnectionPool, Core, CoreDal}; use zksync_object_store::ObjectStore; -use zksync_prover_interface::api::{ - ProofGenerationData, ProofGenerationDataRequest, ProofGenerationDataResponse, - SubmitProofRequest, SubmitProofResponse, +use zksync_prover_interface::{ + api::{ + ProofGenerationData, ProofGenerationDataRequest, ProofGenerationDataResponse, + SubmitProofRequest, SubmitProofResponse, + }, + inputs::{ + L1BatchMetadataHashes, VMRunWitnessInputData, WitnessInputData, WitnessInputMerklePaths, + }, }; use zksync_types::{ basic_fri_types::Eip4844Blobs, @@ -15,7 +20,7 @@ use zksync_types::{ L1BatchNumber, H256, }; -use crate::errors::RequestProcessorError; +use crate::{errors::RequestProcessorError, metrics::METRICS}; #[derive(Clone)] pub(crate) struct RequestProcessor { @@ -61,11 +66,27 @@ impl RequestProcessor { None => return Ok(Json(ProofGenerationDataResponse::Success(None))), // no batches pending to be proven }; - let blob = self + let vm_run_data: VMRunWitnessInputData = self .blob_store .get(l1_batch_number) .await .map_err(RequestProcessorError::ObjectStore)?; + let merkle_paths: WitnessInputMerklePaths = self + .blob_store + .get(l1_batch_number) + .await + .map_err(RequestProcessorError::ObjectStore)?; + + let previous_batch_metadata = self + .pool + .connection() + .await + .unwrap() + .blocks_dal() + .get_l1_batch_metadata(L1BatchNumber(l1_batch_number.checked_sub(1).unwrap())) + .await + .unwrap() + .expect("No metadata for previous batch"); let header = self .pool @@ -115,13 +136,26 @@ impl RequestProcessor { } }; + let blob = WitnessInputData { + vm_run_data, + merkle_paths, + eip_4844_blobs, + previous_batch_metadata: L1BatchMetadataHashes { + root_hash: previous_batch_metadata.metadata.root_hash, + meta_hash: previous_batch_metadata.metadata.meta_parameters_hash, + aux_hash: previous_batch_metadata.metadata.aux_data_hash, + }, + }; + + METRICS.observe_blob_sizes(&blob); + let proof_gen_data = ProofGenerationData { l1_batch_number, - data: blob, + witness_input_data: blob, protocol_version: protocol_version.version, l1_verifier_config: protocol_version.l1_verifier_config, - eip_4844_blobs, }; + Ok(Json(ProofGenerationDataResponse::Success(Some(Box::new( proof_gen_data, ))))) diff --git a/core/node/proof_data_handler/src/tee_request_processor.rs b/core/node/proof_data_handler/src/tee_request_processor.rs index 957d0ef085f1..243c9e06cfcc 100644 --- a/core/node/proof_data_handler/src/tee_request_processor.rs +++ b/core/node/proof_data_handler/src/tee_request_processor.rs @@ -2,20 +2,19 @@ use std::sync::Arc; use axum::{extract::Path, Json}; use zksync_config::configs::ProofDataHandlerConfig; -use zksync_dal::{tee_proof_generation_dal::TeeType, ConnectionPool, Core, CoreDal}; +use zksync_dal::{ConnectionPool, Core, CoreDal}; use zksync_object_store::ObjectStore; -use zksync_prover_interface::api::{ - GenericProofGenerationDataResponse, RegisterTeeAttestationRequest, - RegisterTeeAttestationResponse, SubmitProofResponse, SubmitTeeProofRequest, - TeeProofGenerationDataRequest, +use zksync_prover_interface::{ + api::{ + RegisterTeeAttestationRequest, RegisterTeeAttestationResponse, SubmitProofResponse, + SubmitTeeProofRequest, TeeProofGenerationDataRequest, TeeProofGenerationDataResponse, + }, + inputs::TeeVerifierInput, }; -use zksync_tee_verifier::TeeVerifierInput; use zksync_types::L1BatchNumber; use crate::errors::RequestProcessorError; -pub type TeeProofGenerationDataResponse = GenericProofGenerationDataResponse; - #[derive(Clone)] pub(crate) struct TeeRequestProcessor { blob_store: Arc, @@ -55,7 +54,7 @@ impl TeeRequestProcessor { .map_err(RequestProcessorError::Dal)?; let l1_batch_number = match l1_batch_number_result { Some(number) => number, - None => return Ok(Json(TeeProofGenerationDataResponse::Success(None))), + None => return Ok(Json(TeeProofGenerationDataResponse(None))), }; let tee_verifier_input: TeeVerifierInput = self @@ -64,9 +63,9 @@ impl TeeRequestProcessor { .await .map_err(RequestProcessorError::ObjectStore)?; - Ok(Json(TeeProofGenerationDataResponse::Success(Some( - Box::new(tee_verifier_input), - )))) + Ok(Json(TeeProofGenerationDataResponse(Some(Box::new( + tee_verifier_input, + ))))) } pub(crate) async fn submit_proof( @@ -92,7 +91,7 @@ impl TeeRequestProcessor { &proof.0.signature, &proof.0.pubkey, &proof.0.proof, - TeeType::Sgx, + proof.0.tee_type, ) .await .map_err(RequestProcessorError::Dal)?; diff --git a/core/node/proof_data_handler/src/tests.rs b/core/node/proof_data_handler/src/tests.rs index 7047bd154c9a..1fbe563d2d28 100644 --- a/core/node/proof_data_handler/src/tests.rs +++ b/core/node/proof_data_handler/src/tests.rs @@ -6,17 +6,18 @@ use axum::{ response::Response, Router, }; -use hyper::body::HttpBody; -use multivm::interface::{L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode}; use serde_json::json; use tower::ServiceExt; use zksync_basic_types::U256; use zksync_config::configs::ProofDataHandlerConfig; use zksync_contracts::{BaseSystemContracts, SystemContractCode}; use zksync_dal::{ConnectionPool, CoreDal}; +use zksync_multivm::interface::{L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode}; use zksync_object_store::MockObjectStore; -use zksync_prover_interface::{api::SubmitTeeProofRequest, inputs::PrepareBasicCircuitsJob}; -use zksync_tee_verifier::TeeVerifierInput; +use zksync_prover_interface::{ + api::SubmitTeeProofRequest, + inputs::{TeeVerifierInput, V1TeeVerifierInput, WitnessInputMerklePaths}, +}; use zksync_types::{commitment::L1BatchCommitmentMode, L1BatchNumber, H256}; use crate::create_proof_processing_router; @@ -32,8 +33,8 @@ async fn request_tee_proof_inputs() { // prepare a sample mocked TEE verifier input let batch_number = L1BatchNumber::from(1); - let tvi = TeeVerifierInput::new( - PrepareBasicCircuitsJob::new(0), + let tvi = V1TeeVerifierInput::new( + WitnessInputMerklePaths::new(0), vec![], L1BatchEnv { previous_batch_hash: Some(H256([1; 32])), @@ -69,6 +70,7 @@ async fn request_tee_proof_inputs() { }, vec![(H256([1; 32]), vec![0, 1, 2, 3, 4])], ); + let tvi = TeeVerifierInput::V1(tvi); // populate mocked object store with a single batch blob @@ -107,12 +109,10 @@ async fn request_tee_proof_inputs() { assert_eq!(response.status(), StatusCode::OK); - let body = response.into_body().collect().await.unwrap().to_bytes(); + let body = axum::body::to_bytes(response.into_body(), usize::MAX) + .await + .unwrap(); let json: serde_json::Value = serde_json::from_slice(&body).unwrap(); - let json = json - .get("Success") - .expect("Unexpected response format") - .clone(); let deserialized: TeeVerifierInput = serde_json::from_value(json).unwrap(); assert_eq!(tvi, deserialized); @@ -133,7 +133,8 @@ async fn submit_tee_proof() { let tee_proof_request_str = r#"{ "signature": [ 0, 1, 2, 3, 4 ], "pubkey": [ 5, 6, 7, 8, 9 ], - "proof": [ 10, 11, 12, 13, 14 ] + "proof": [ 10, 11, 12, 13, 14 ], + "tee_type": "Sgx" }"#; let tee_proof_request = serde_json::from_str::(tee_proof_request_str).unwrap(); diff --git a/core/node/reorg_detector/Cargo.toml b/core/node/reorg_detector/Cargo.toml index 75e2eb3c0ece..e3e4834e90bb 100644 --- a/core/node/reorg_detector/Cargo.toml +++ b/core/node/reorg_detector/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "zksync_reorg_detector" -version = "0.1.0" +description = "ZKsync reorg detector" +version.workspace = true edition.workspace = true authors.workspace = true homepage.workspace = true diff --git a/core/node/shared_metrics/Cargo.toml b/core/node/shared_metrics/Cargo.toml index 5fbbf16a2ec7..f30a2ba35334 100644 --- a/core/node/shared_metrics/Cargo.toml +++ b/core/node/shared_metrics/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "zksync_shared_metrics" -version = "0.1.0" +description = "ZKsync shared metrics" +version.workspace = true edition.workspace = true authors.workspace = true homepage.workspace = true diff --git a/core/node/shared_metrics/src/lib.rs b/core/node/shared_metrics/src/lib.rs index 22a90349191d..e0a7fa74ef42 100644 --- a/core/node/shared_metrics/src/lib.rs +++ b/core/node/shared_metrics/src/lib.rs @@ -31,6 +31,7 @@ pub enum InitStage { Tree, TeeVerifierInputProducer, Consensus, + DADispatcher, } impl fmt::Display for InitStage { @@ -46,6 +47,7 @@ impl fmt::Display for InitStage { Self::Tree => formatter.write_str("tree"), Self::TeeVerifierInputProducer => formatter.write_str("tee_verifier_input_producer"), Self::Consensus => formatter.write_str("consensus"), + Self::DADispatcher => formatter.write_str("da_dispatcher"), } } } diff --git a/core/node/state_keeper/Cargo.toml b/core/node/state_keeper/Cargo.toml index c2ac940eef39..904d17718503 100644 --- a/core/node/state_keeper/Cargo.toml +++ b/core/node/state_keeper/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "zksync_state_keeper" -version = "0.1.0" +description = "ZKsync state keeper" +version.workspace = true edition.workspace = true authors.workspace = true homepage.workspace = true @@ -12,7 +13,7 @@ categories.workspace = true [dependencies] vise.workspace = true -multivm.workspace = true +zksync_multivm.workspace = true zksync_types.workspace = true zksync_dal.workspace = true zksync_state.workspace = true @@ -27,7 +28,8 @@ zksync_protobuf.workspace = true zksync_test_account.workspace = true zksync_node_genesis.workspace = true zksync_node_test_utils.workspace = true -vm_utils.workspace = true +zksync_vm_utils.workspace = true +zksync_base_token_adjuster.workspace = true anyhow.workspace = true async-trait.workspace = true diff --git a/core/node/state_keeper/src/batch_executor/main_executor.rs b/core/node/state_keeper/src/batch_executor/main_executor.rs index 60695c4e0ff0..1d780924ea19 100644 --- a/core/node/state_keeper/src/batch_executor/main_executor.rs +++ b/core/node/state_keeper/src/batch_executor/main_executor.rs @@ -2,7 +2,12 @@ use std::sync::Arc; use anyhow::Context as _; use async_trait::async_trait; -use multivm::{ +use once_cell::sync::OnceCell; +use tokio::{ + runtime::Handle, + sync::{mpsc, watch}, +}; +use zksync_multivm::{ interface::{ ExecutionResult, FinishedL1Batch, Halt, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, @@ -11,11 +16,6 @@ use multivm::{ vm_latest::HistoryEnabled, MultiVMTracer, VmInstance, }; -use once_cell::sync::OnceCell; -use tokio::{ - runtime::Handle, - sync::{mpsc, watch}, -}; use zksync_shared_metrics::{InteractionType, TxStage, APP_METRICS}; use zksync_state::{ReadStorage, ReadStorageFactory, StorageView}; use zksync_types::{vm_trace::Call, Transaction}; @@ -147,6 +147,15 @@ impl CommandReceiver { .observe(metrics.time_spent_on_set_value); return; } + Command::FinishBatchWithCache(resp) => { + let vm_block_result = self.finish_batch(&mut vm); + let cache = (*storage_view).borrow().cache(); + if resp.send((vm_block_result, cache)).is_err() { + break; + } + + return; + } } } // State keeper can exit because of stop signal, so it's OK to exit mid-batch. diff --git a/core/node/state_keeper/src/batch_executor/mod.rs b/core/node/state_keeper/src/batch_executor/mod.rs index 8703831f3952..4577ab1b360a 100644 --- a/core/node/state_keeper/src/batch_executor/mod.rs +++ b/core/node/state_keeper/src/batch_executor/mod.rs @@ -2,14 +2,14 @@ use std::{error::Error as StdError, fmt, sync::Arc}; use anyhow::Context as _; use async_trait::async_trait; -use multivm::interface::{ - FinishedL1Batch, Halt, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionResultAndLogs, -}; use tokio::{ sync::{mpsc, oneshot, watch}, task::JoinHandle, }; -use zksync_state::ReadStorageFactory; +use zksync_multivm::interface::{ + FinishedL1Batch, Halt, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionResultAndLogs, +}; +use zksync_state::{ReadStorageFactory, StorageViewCache}; use zksync_types::{vm_trace::Call, Transaction}; use zksync_utils::bytecode::CompressedBytecodeInfo; @@ -229,6 +229,33 @@ impl BatchExecutorHandle { latency.observe(); Ok(finished_batch) } + + pub async fn finish_batch_with_cache( + mut self, + ) -> anyhow::Result<(FinishedL1Batch, StorageViewCache)> { + let (response_sender, response_receiver) = oneshot::channel(); + let send_failed = self + .commands + .send(Command::FinishBatchWithCache(response_sender)) + .await + .is_err(); + if send_failed { + return Err(self.handle.wait_for_error().await); + } + + let latency = EXECUTOR_METRICS.batch_executor_command_response_time + [&ExecutorCommand::FinishBatchWithCache] + .start(); + let batch_with_cache = match response_receiver.await { + Ok(batch_with_cache) => batch_with_cache, + Err(_) => return Err(self.handle.wait_for_error().await), + }; + + self.handle.wait().await?; + + latency.observe(); + Ok(batch_with_cache) + } } #[derive(Debug)] @@ -237,4 +264,5 @@ pub(super) enum Command { StartNextL2Block(L2BlockEnv, oneshot::Sender<()>), RollbackLastTx(oneshot::Sender<()>), FinishBatch(oneshot::Sender), + FinishBatchWithCache(oneshot::Sender<(FinishedL1Batch, StorageViewCache)>), } diff --git a/core/node/state_keeper/src/batch_executor/tests/mod.rs b/core/node/state_keeper/src/batch_executor/tests/mod.rs index c2196a7b6b28..4b36965895fd 100644 --- a/core/node/state_keeper/src/batch_executor/tests/mod.rs +++ b/core/node/state_keeper/src/batch_executor/tests/mod.rs @@ -69,12 +69,12 @@ impl SnapshotRecoveryMutation { fn mutate_snapshot(self, storage_snapshot: &mut StorageSnapshot, alice: &Account) { match self { Self::RemoveNonce => { - let nonce_key = get_nonce_key(&alice.address()); + let nonce_key = get_nonce_key(&alice.address()).hashed_key(); let nonce_value = storage_snapshot.storage_logs.remove(&nonce_key); assert!(nonce_value.is_some()); } Self::RemoveBalance => { - let balance_key = storage_key_for_eth_balance(&alice.address()); + let balance_key = storage_key_for_eth_balance(&alice.address()).hashed_key(); let balance_value = storage_snapshot.storage_logs.remove(&balance_key); assert!(balance_value.is_some()); } @@ -82,8 +82,8 @@ impl SnapshotRecoveryMutation { } } -const EXECUTE_L2_TX_AFTER_SNAPSHOT_RECOVERY_CASES: test_casing::Product<( - [std::option::Option; 3], +const EXECUTE_L2_TX_AFTER_SNAPSHOT_RECOVERY_CASES: Product<( + [Option; 3], [StorageType; 3], )> = Product((SnapshotRecoveryMutation::ALL, StorageType::ALL)); diff --git a/core/node/state_keeper/src/batch_executor/tests/tester.rs b/core/node/state_keeper/src/batch_executor/tests/tester.rs index 7e734ffc3d5e..579f3bee4819 100644 --- a/core/node/state_keeper/src/batch_executor/tests/tester.rs +++ b/core/node/state_keeper/src/batch_executor/tests/tester.rs @@ -3,25 +3,29 @@ use std::{collections::HashMap, fmt::Debug, sync::Arc}; -use multivm::{ - interface::{L1BatchEnv, L2BlockEnv, SystemEnv}, - vm_latest::constants::INITIAL_STORAGE_WRITE_PUBDATA_BYTES, -}; use tempfile::TempDir; use tokio::{sync::watch, task::JoinHandle}; use zksync_config::configs::chain::StateKeeperConfig; use zksync_contracts::{get_loadnext_contract, test_contracts::LoadnextContractExecutionParams}; use zksync_dal::{ConnectionPool, Core, CoreDal}; -use zksync_node_genesis::create_genesis_l1_batch; -use zksync_node_test_utils::prepare_recovery_snapshot; +use zksync_multivm::{ + interface::{L1BatchEnv, L2BlockEnv, SystemEnv}, + vm_latest::constants::INITIAL_STORAGE_WRITE_PUBDATA_BYTES, +}; +use zksync_node_genesis::{create_genesis_l1_batch, GenesisParams}; +use zksync_node_test_utils::{recover, Snapshot}; use zksync_state::{ReadStorageFactory, RocksdbStorageOptions}; use zksync_test_account::{Account, DeployContractsTx, TxType}; use zksync_types::{ - block::L2BlockHasher, ethabi::Token, protocol_version::ProtocolSemanticVersion, - snapshots::SnapshotRecoveryStatus, storage_writes_deduplicator::StorageWritesDeduplicator, - system_contracts::get_system_smart_contracts, utils::storage_key_for_standard_token_balance, + block::L2BlockHasher, + ethabi::Token, + protocol_version::ProtocolSemanticVersion, + snapshots::{SnapshotRecoveryStatus, SnapshotStorageLog}, + storage_writes_deduplicator::StorageWritesDeduplicator, + system_contracts::get_system_smart_contracts, + utils::storage_key_for_standard_token_balance, AccountTreeId, Address, Execute, L1BatchNumber, L2BlockNumber, PriorityOpId, ProtocolVersionId, - StorageKey, StorageLog, Transaction, H256, L2_BASE_TOKEN_ADDRESS, U256, + StorageLog, Transaction, H256, L2_BASE_TOKEN_ADDRESS, U256, }; use zksync_utils::u256_to_h256; @@ -284,7 +288,7 @@ impl Tester { { storage .storage_logs_dedup_dal() - .insert_initial_writes(L1BatchNumber(0), &[storage_log.key]) + .insert_initial_writes(L1BatchNumber(0), &[storage_log.key.hashed_key()]) .await .unwrap(); } @@ -433,7 +437,7 @@ pub(super) struct StorageSnapshot { pub l2_block_number: L2BlockNumber, pub l2_block_hash: H256, pub l2_block_timestamp: u64, - pub storage_logs: HashMap, + pub storage_logs: HashMap, pub factory_deps: HashMap>, } @@ -512,7 +516,7 @@ impl StorageSnapshot { all_logs.extend( modified_entries .into_iter() - .map(|(key, slot)| (key, slot.value)), + .map(|(key, slot)| (key.hashed_key(), slot.value)), ); // Compute the hash of the last (fictive) L2 block in the batch. @@ -539,17 +543,23 @@ impl StorageSnapshot { let snapshot_logs: Vec<_> = self .storage_logs .into_iter() - .map(|(key, value)| StorageLog::new_write_log(key, value)) + .enumerate() + .map(|(i, (key, value))| SnapshotStorageLog { + key, + value, + l1_batch_number_of_initial_write: L1BatchNumber(1), + enumeration_index: i as u64 + 1, + }) .collect(); let mut storage = connection_pool.connection().await.unwrap(); - let mut snapshot = prepare_recovery_snapshot( - &mut storage, + + let snapshot = Snapshot::new( L1BatchNumber(1), self.l2_block_number, - &snapshot_logs, - ) - .await; - + snapshot_logs, + GenesisParams::mock(), + ); + let mut snapshot = recover(&mut storage, snapshot).await; snapshot.l2_block_hash = self.l2_block_hash; snapshot.l2_block_timestamp = self.l2_block_timestamp; diff --git a/core/node/state_keeper/src/io/common/mod.rs b/core/node/state_keeper/src/io/common/mod.rs index f521a87ab228..6bd881414a20 100644 --- a/core/node/state_keeper/src/io/common/mod.rs +++ b/core/node/state_keeper/src/io/common/mod.rs @@ -1,8 +1,8 @@ use std::time::Duration; use anyhow::Context; -use multivm::interface::{L1BatchEnv, SystemEnv}; use zksync_dal::{Connection, Core, CoreDal}; +use zksync_multivm::interface::{L1BatchEnv, SystemEnv}; use zksync_types::{L1BatchNumber, L2BlockNumber, H256}; use super::PendingBatchData; diff --git a/core/node/state_keeper/src/io/common/tests.rs b/core/node/state_keeper/src/io/common/tests.rs index 5810061af19a..f3b3f6e0fb4b 100644 --- a/core/node/state_keeper/src/io/common/tests.rs +++ b/core/node/state_keeper/src/io/common/tests.rs @@ -6,7 +6,6 @@ use std::{collections::HashMap, ops}; use futures::FutureExt; -use vm_utils::storage::L1BatchParamsProvider; use zksync_config::GenesisConfig; use zksync_contracts::BaseSystemContractsHashes; use zksync_dal::{ConnectionPool, Core}; @@ -19,6 +18,7 @@ use zksync_types::{ block::L2BlockHasher, fee::TransactionExecutionMetrics, protocol_version::ProtocolSemanticVersion, L2ChainId, ProtocolVersion, ProtocolVersionId, }; +use zksync_vm_utils::storage::L1BatchParamsProvider; use super::*; @@ -102,7 +102,8 @@ async fn waiting_for_l1_batch_params_with_genesis() { .await .unwrap(); - let provider = L1BatchParamsProvider::new(&mut storage).await.unwrap(); + let mut provider = L1BatchParamsProvider::new(); + provider.initialize(&mut storage).await.unwrap(); let (hash, timestamp) = provider .wait_for_l1_batch_params(&mut storage, L1BatchNumber(0)) .await @@ -141,7 +142,8 @@ async fn waiting_for_l1_batch_params_after_snapshot_recovery() { let snapshot_recovery = prepare_recovery_snapshot(&mut storage, L1BatchNumber(23), L2BlockNumber(42), &[]).await; - let provider = L1BatchParamsProvider::new(&mut storage).await.unwrap(); + let mut provider = L1BatchParamsProvider::new(); + provider.initialize(&mut storage).await.unwrap(); let (hash, timestamp) = provider .wait_for_l1_batch_params(&mut storage, snapshot_recovery.l1_batch_number) .await @@ -189,7 +191,8 @@ async fn getting_first_l2_block_in_batch_with_genesis() { .await .unwrap(); - let provider = L1BatchParamsProvider::new(&mut storage).await.unwrap(); + let mut provider = L1BatchParamsProvider::new(); + provider.initialize(&mut storage).await.unwrap(); let mut batches_and_l2_blocks = HashMap::from([ (L1BatchNumber(0), Ok(Some(L2BlockNumber(0)))), (L1BatchNumber(1), Ok(Some(L2BlockNumber(1)))), @@ -260,7 +263,8 @@ async fn getting_first_l2_block_in_batch_after_snapshot_recovery() { let snapshot_recovery = prepare_recovery_snapshot(&mut storage, L1BatchNumber(23), L2BlockNumber(42), &[]).await; - let provider = L1BatchParamsProvider::new(&mut storage).await.unwrap(); + let mut provider = L1BatchParamsProvider::new(); + provider.initialize(&mut storage).await.unwrap(); let mut batches_and_l2_blocks = HashMap::from([ (L1BatchNumber(1), Err(())), (snapshot_recovery.l1_batch_number, Err(())), @@ -316,7 +320,8 @@ async fn loading_pending_batch_with_genesis() { ) .await; - let provider = L1BatchParamsProvider::new(&mut storage).await.unwrap(); + let mut provider = L1BatchParamsProvider::new(); + provider.initialize(&mut storage).await.unwrap(); let first_l2_block_in_batch = provider .load_first_l2_block_in_batch(&mut storage, L1BatchNumber(1)) .await @@ -397,7 +402,8 @@ async fn loading_pending_batch_after_snapshot_recovery() { ) .await; - let provider = L1BatchParamsProvider::new(&mut storage).await.unwrap(); + let mut provider = L1BatchParamsProvider::new(); + provider.initialize(&mut storage).await.unwrap(); let first_l2_block_in_batch = provider .load_first_l2_block_in_batch(&mut storage, snapshot_recovery.l1_batch_number + 1) .await @@ -459,7 +465,8 @@ async fn getting_batch_version_with_genesis() { .await .unwrap(); - let provider = L1BatchParamsProvider::new(&mut storage).await.unwrap(); + let mut provider = L1BatchParamsProvider::new(); + provider.initialize(&mut storage).await.unwrap(); let version = provider .load_l1_batch_protocol_version(&mut storage, L1BatchNumber(0)) .await @@ -498,7 +505,8 @@ async fn getting_batch_version_after_snapshot_recovery() { let snapshot_recovery = prepare_recovery_snapshot(&mut storage, L1BatchNumber(23), L2BlockNumber(42), &[]).await; - let provider = L1BatchParamsProvider::new(&mut storage).await.unwrap(); + let mut provider = L1BatchParamsProvider::new(); + provider.initialize(&mut storage).await.unwrap(); let version = provider .load_l1_batch_protocol_version(&mut storage, snapshot_recovery.l1_batch_number) .await diff --git a/core/node/state_keeper/src/io/mempool.rs b/core/node/state_keeper/src/io/mempool.rs index 38bcdaad193d..c3d8dc1dee4d 100644 --- a/core/node/state_keeper/src/io/mempool.rs +++ b/core/node/state_keeper/src/io/mempool.rs @@ -7,12 +7,11 @@ use std::{ use anyhow::Context as _; use async_trait::async_trait; -use multivm::{interface::Halt, utils::derive_base_fee_and_gas_per_pubdata}; -use vm_utils::storage::L1BatchParamsProvider; use zksync_config::configs::chain::StateKeeperConfig; use zksync_contracts::BaseSystemContracts; use zksync_dal::{ConnectionPool, Core, CoreDal}; use zksync_mempool::L2TxFilter; +use zksync_multivm::{interface::Halt, utils::derive_base_fee_and_gas_per_pubdata}; use zksync_node_fee_model::BatchFeeModelInputProvider; use zksync_types::{ protocol_upgrade::ProtocolUpgradeTx, utils::display_timestamp, Address, L1BatchNumber, @@ -20,6 +19,7 @@ use zksync_types::{ }; // TODO (SMA-1206): use seconds instead of milliseconds. use zksync_utils::time::millis_since_epoch; +use zksync_vm_utils::storage::L1BatchParamsProvider; use crate::{ io::{ @@ -90,6 +90,10 @@ impl StateKeeperIO for MempoolIO { async fn initialize(&mut self) -> anyhow::Result<(IoCursor, Option)> { let mut storage = self.pool.connection_tagged("state_keeper").await?; let cursor = IoCursor::new(&mut storage).await?; + self.l1_batch_params_provider + .initialize(&mut storage) + .await + .context("failed initializing L1 batch params provider")?; L2BlockSealProcess::clear_pending_l2_block(&mut storage, cursor.next_l2_block - 1).await?; @@ -416,7 +420,7 @@ async fn sleep_past(timestamp: u64, l2_block: L2BlockNumber) -> u64 { } impl MempoolIO { - pub async fn new( + pub fn new( mempool: MempoolGuard, batch_fee_input_provider: Arc, pool: ConnectionPool, @@ -425,12 +429,6 @@ impl MempoolIO { delay_interval: Duration, chain_id: L2ChainId, ) -> anyhow::Result { - let mut storage = pool.connection_tagged("state_keeper").await?; - let l1_batch_params_provider = L1BatchParamsProvider::new(&mut storage) - .await - .context("failed initializing L1 batch params provider")?; - drop(storage); - Ok(Self { mempool, pool, @@ -438,7 +436,7 @@ impl MempoolIO { l2_block_max_payload_size_sealer: L2BlockMaxPayloadSizeSealer::new(config), filter: L2TxFilter::default(), // ^ Will be initialized properly on the first newly opened batch - l1_batch_params_provider, + l1_batch_params_provider: L1BatchParamsProvider::new(), fee_account, validation_computational_gas_limit: config.validation_computational_gas_limit, max_allowed_tx_gas_limit: config.max_allowed_l2_tx_gas_limit.into(), diff --git a/core/node/state_keeper/src/io/mod.rs b/core/node/state_keeper/src/io/mod.rs index 80ba8e59e2b7..384b0f45b0f6 100644 --- a/core/node/state_keeper/src/io/mod.rs +++ b/core/node/state_keeper/src/io/mod.rs @@ -1,13 +1,13 @@ use std::{fmt, time::Duration}; use async_trait::async_trait; -use multivm::interface::{L1BatchEnv, SystemEnv}; -use vm_utils::storage::l1_batch_params; use zksync_contracts::BaseSystemContracts; +use zksync_multivm::interface::{L1BatchEnv, SystemEnv}; use zksync_types::{ block::L2BlockExecutionData, fee_model::BatchFeeInput, protocol_upgrade::ProtocolUpgradeTx, Address, L1BatchNumber, L2ChainId, ProtocolVersionId, Transaction, H256, }; +use zksync_vm_utils::storage::l1_batch_params; pub use self::{ common::IoCursor, diff --git a/core/node/state_keeper/src/io/persistence.rs b/core/node/state_keeper/src/io/persistence.rs index c3da618fe76f..de9ac22e1777 100644 --- a/core/node/state_keeper/src/io/persistence.rs +++ b/core/node/state_keeper/src/io/persistence.rs @@ -351,8 +351,8 @@ mod tests { use assert_matches::assert_matches; use futures::FutureExt; - use multivm::zk_evm_latest::ethereum_types::{H256, U256}; use zksync_dal::CoreDal; + use zksync_multivm::zk_evm_latest::ethereum_types::{H256, U256}; use zksync_node_genesis::{insert_genesis_batch, GenesisParams}; use zksync_types::{ api::TransactionStatus, block::BlockGasCount, tx::ExecutionMetrics, diff --git a/core/node/state_keeper/src/io/seal_logic/l2_block_seal_subtasks.rs b/core/node/state_keeper/src/io/seal_logic/l2_block_seal_subtasks.rs index fabdc855fa47..03495c0d98b4 100644 --- a/core/node/state_keeper/src/io/seal_logic/l2_block_seal_subtasks.rs +++ b/core/node/state_keeper/src/io/seal_logic/l2_block_seal_subtasks.rs @@ -365,12 +365,12 @@ impl L2BlockSealSubtask for InsertL2ToL1LogsSubtask { #[cfg(test)] mod tests { - use multivm::{ + use zksync_dal::{ConnectionPool, Core}; + use zksync_multivm::{ utils::{get_max_batch_gas_limit, get_max_gas_per_pubdata_byte}, zk_evm_latest::ethereum_types::H256, VmVersion, }; - use zksync_dal::{ConnectionPool, Core}; use zksync_node_test_utils::create_l2_transaction; use zksync_types::{ block::L2BlockHeader, diff --git a/core/node/state_keeper/src/io/seal_logic/mod.rs b/core/node/state_keeper/src/io/seal_logic/mod.rs index 5aedb85b8131..92630015f2a2 100644 --- a/core/node/state_keeper/src/io/seal_logic/mod.rs +++ b/core/node/state_keeper/src/io/seal_logic/mod.rs @@ -8,8 +8,8 @@ use std::{ use anyhow::Context as _; use itertools::Itertools; -use multivm::utils::{get_max_batch_gas_limit, get_max_gas_per_pubdata_byte}; use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; +use zksync_multivm::utils::{get_max_batch_gas_limit, get_max_gas_per_pubdata_byte}; use zksync_shared_metrics::{BlockStage, L2BlockStage, APP_METRICS}; use zksync_types::{ block::{L1BatchHeader, L2BlockHeader}, @@ -22,8 +22,8 @@ use zksync_types::{ TransactionExecutionResult, }, utils::display_timestamp, - AccountTreeId, Address, ExecuteTransactionCommon, ProtocolVersionId, StorageKey, StorageLog, - Transaction, VmEvent, H256, + Address, ExecuteTransactionCommon, ProtocolVersionId, StorageKey, StorageLog, Transaction, + VmEvent, H256, }; use zksync_utils::u256_to_h256; @@ -185,47 +185,46 @@ impl UpdatesManager { } let progress = L1_BATCH_METRICS.start(L1BatchSealStage::FilterWrittenSlots); - let (initial_writes, all_writes_len): (Vec<_>, usize) = if let Some(state_diffs) = - &finished_batch.state_diffs - { - let all_writes_len = state_diffs.len(); - - ( - state_diffs + let (initial_writes, all_writes_len): (Vec<_>, usize) = + if let Some(state_diffs) = &finished_batch.state_diffs { + let all_writes_len = state_diffs.len(); + + ( + state_diffs + .iter() + .filter(|diff| diff.is_write_initial()) + .map(|diff| { + H256(StorageKey::raw_hashed_key( + &diff.address, + &u256_to_h256(diff.key), + )) + }) + .collect(), + all_writes_len, + ) + } else { + let deduplicated_writes_hashed_keys_iter = finished_batch + .final_execution_state + .deduplicated_storage_logs .iter() - .filter(|diff| diff.is_write_initial()) - .map(|diff| { - StorageKey::new(AccountTreeId::new(diff.address), u256_to_h256(diff.key)) - }) - .collect(), - all_writes_len, - ) - } else { - let deduplicated_writes = finished_batch - .final_execution_state - .deduplicated_storage_logs - .iter() - .filter(|log_query| log_query.is_write()); - - let deduplicated_writes_hashed_keys: Vec<_> = deduplicated_writes - .clone() - .map(|log| log.key.hashed_key()) - .collect(); - let all_writes_len = deduplicated_writes_hashed_keys.len(); - let non_initial_writes = transaction - .storage_logs_dedup_dal() - .filter_written_slots(&deduplicated_writes_hashed_keys) - .await?; - - ( - deduplicated_writes - .filter_map(|log| { - (!non_initial_writes.contains(&log.key.hashed_key())).then_some(log.key) - }) - .collect(), - all_writes_len, - ) - }; + .filter(|log| log.is_write()) + .map(|log| log.key.hashed_key()); + + let deduplicated_writes_hashed_keys: Vec<_> = + deduplicated_writes_hashed_keys_iter.clone().collect(); + let all_writes_len = deduplicated_writes_hashed_keys.len(); + let non_initial_writes = transaction + .storage_logs_dedup_dal() + .filter_written_slots(&deduplicated_writes_hashed_keys) + .await?; + + ( + deduplicated_writes_hashed_keys_iter + .filter(|hashed_key| !non_initial_writes.contains(hashed_key)) + .collect(), + all_writes_len, + ) + }; progress.observe(all_writes_len); let progress = L1_BATCH_METRICS.start(L1BatchSealStage::InsertInitialWrites); diff --git a/core/node/state_keeper/src/io/tests/mod.rs b/core/node/state_keeper/src/io/tests/mod.rs index ee0e39ed0618..7c70607c763b 100644 --- a/core/node/state_keeper/src/io/tests/mod.rs +++ b/core/node/state_keeper/src/io/tests/mod.rs @@ -1,10 +1,10 @@ use std::time::Duration; -use multivm::utils::derive_base_fee_and_gas_per_pubdata; use test_casing::test_casing; use zksync_contracts::BaseSystemContractsHashes; use zksync_dal::{ConnectionPool, Core, CoreDal}; use zksync_mempool::L2TxFilter; +use zksync_multivm::utils::derive_base_fee_and_gas_per_pubdata; use zksync_node_test_utils::prepare_recovery_snapshot; use zksync_types::{ block::{BlockGasCount, L2BlockHasher}, @@ -311,7 +311,7 @@ async fn processing_storage_logs_when_sealing_l2_block() { // Keys that are only read must not be written to `storage_logs`. let account = AccountTreeId::default(); let read_key = StorageKey::new(account, H256::from_low_u64_be(1)); - assert!(!touched_slots.contains_key(&read_key)); + assert!(!touched_slots.contains_key(&read_key.hashed_key())); // The storage logs must be inserted and read in the correct order, so that // `touched_slots` contain the most recent values in the L1 batch. @@ -320,7 +320,7 @@ async fn processing_storage_logs_when_sealing_l2_block() { for (key, value) in written_kvs { let key = StorageKey::new(account, H256::from_low_u64_be(key)); let expected_value = H256::from_low_u64_be(value); - assert_eq!(touched_slots[&key], expected_value); + assert_eq!(touched_slots[&key.hashed_key()], expected_value); } } diff --git a/core/node/state_keeper/src/io/tests/tester.rs b/core/node/state_keeper/src/io/tests/tester.rs index 84dfd4354b3c..28fcbd51822e 100644 --- a/core/node/state_keeper/src/io/tests/tester.rs +++ b/core/node/state_keeper/src/io/tests/tester.rs @@ -2,14 +2,15 @@ use std::{slice, sync::Arc, time::Duration}; -use multivm::vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT; +use zksync_base_token_adjuster::NoOpRatioProvider; use zksync_config::{ configs::{chain::StateKeeperConfig, eth_sender::PubdataSendingMode, wallets::Wallets}, GasAdjusterConfig, }; use zksync_contracts::BaseSystemContracts; use zksync_dal::{ConnectionPool, Core, CoreDal}; -use zksync_eth_client::clients::MockEthereum; +use zksync_eth_client::{clients::MockEthereum, BaseFees}; +use zksync_multivm::vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT; use zksync_node_fee_model::{l1_gas_price::GasAdjuster, MainNodeFeeInputProvider}; use zksync_node_genesis::create_genesis_l1_batch; use zksync_node_test_utils::{ @@ -47,9 +48,15 @@ impl Tester { } async fn create_gas_adjuster(&self) -> GasAdjuster { - let eth_client = MockEthereum::builder() - .with_fee_history(vec![0, 4, 6, 8, 7, 5, 5, 8, 10, 9]) - .build(); + let block_fees = vec![0, 4, 6, 8, 7, 5, 5, 8, 10, 9]; + let base_fees = block_fees + .into_iter() + .map(|base_fee_per_gas| BaseFees { + base_fee_per_gas, + base_fee_per_blob_gas: 1.into(), // Not relevant for the test + }) + .collect(); + let eth_client = MockEthereum::builder().with_fee_history(base_fees).build(); let gas_adjuster_config = GasAdjusterConfig { default_priority_fee_per_gas: 10, @@ -78,8 +85,10 @@ impl Tester { pub(super) async fn create_batch_fee_input_provider(&self) -> MainNodeFeeInputProvider { let gas_adjuster = Arc::new(self.create_gas_adjuster().await); + MainNodeFeeInputProvider::new( gas_adjuster, + Arc::new(NoOpRatioProvider::default()), FeeModelConfig::V1(FeeModelConfigV1 { minimal_l2_gas_price: self.minimal_l2_gas_price(), }), @@ -98,6 +107,7 @@ impl Tester { let gas_adjuster = Arc::new(self.create_gas_adjuster().await); let batch_fee_input_provider = MainNodeFeeInputProvider::new( gas_adjuster, + Arc::new(NoOpRatioProvider::default()), FeeModelConfig::V1(FeeModelConfigV1 { minimal_l2_gas_price: self.minimal_l2_gas_price(), }), @@ -119,7 +129,6 @@ impl Tester { Duration::from_secs(1), L2ChainId::from(270), ) - .await .unwrap(); (io, mempool) diff --git a/core/node/state_keeper/src/keeper.rs b/core/node/state_keeper/src/keeper.rs index 6d44dd247c4d..6c1718232a09 100644 --- a/core/node/state_keeper/src/keeper.rs +++ b/core/node/state_keeper/src/keeper.rs @@ -5,8 +5,8 @@ use std::{ }; use anyhow::Context as _; -use multivm::interface::{Halt, L1BatchEnv, SystemEnv}; use tokio::sync::watch; +use zksync_multivm::interface::{Halt, L1BatchEnv, SystemEnv}; use zksync_state::ReadStorageFactory; use zksync_types::{ block::L2BlockExecutionData, l2::TransactionType, protocol_upgrade::ProtocolUpgradeTx, diff --git a/core/node/state_keeper/src/lib.rs b/core/node/state_keeper/src/lib.rs index 4920e2514b0a..1c12f7825486 100644 --- a/core/node/state_keeper/src/lib.rs +++ b/core/node/state_keeper/src/lib.rs @@ -63,7 +63,6 @@ pub async fn create_state_keeper( mempool_config.delay_interval(), l2chain_id, ) - .await .expect("Failed initializing main node I/O for state keeper"); let sealer = SequencerSealer::new(state_keeper_config); diff --git a/core/node/state_keeper/src/mempool_actor.rs b/core/node/state_keeper/src/mempool_actor.rs index 85a68069e00b..d79d9ebb34a8 100644 --- a/core/node/state_keeper/src/mempool_actor.rs +++ b/core/node/state_keeper/src/mempool_actor.rs @@ -1,13 +1,13 @@ use std::{collections::HashMap, sync::Arc, time::Duration}; use anyhow::Context as _; -use multivm::utils::derive_base_fee_and_gas_per_pubdata; #[cfg(test)] use tokio::sync::mpsc; use tokio::sync::watch; use zksync_config::configs::chain::MempoolConfig; use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; use zksync_mempool::L2TxFilter; +use zksync_multivm::utils::derive_base_fee_and_gas_per_pubdata; use zksync_node_fee_model::BatchFeeModelInputProvider; #[cfg(test)] use zksync_types::H256; diff --git a/core/node/state_keeper/src/metrics.rs b/core/node/state_keeper/src/metrics.rs index 66c6e7933e8e..c154719e3900 100644 --- a/core/node/state_keeper/src/metrics.rs +++ b/core/node/state_keeper/src/metrics.rs @@ -5,12 +5,12 @@ use std::{ time::Duration, }; -use multivm::interface::{VmExecutionResultAndLogs, VmRevertReason}; use vise::{ Buckets, Counter, EncodeLabelSet, EncodeLabelValue, Family, Gauge, Histogram, LatencyObserver, Metrics, }; use zksync_mempool::MempoolStore; +use zksync_multivm::interface::{VmExecutionResultAndLogs, VmRevertReason}; use zksync_shared_metrics::InteractionType; use zksync_types::{tx::tx_execution_info::DeduplicatedWritesMetrics, ProtocolVersionId}; @@ -444,6 +444,7 @@ pub(super) enum ExecutorCommand { StartNextL2Block, RollbackLastTx, FinishBatch, + FinishBatchWithCache, } const GAS_PER_NANOSECOND_BUCKETS: Buckets = Buckets::values(&[ diff --git a/core/node/state_keeper/src/seal_criteria/criteria/gas_for_batch_tip.rs b/core/node/state_keeper/src/seal_criteria/criteria/gas_for_batch_tip.rs index 8c15d04d0833..69214406bea5 100644 --- a/core/node/state_keeper/src/seal_criteria/criteria/gas_for_batch_tip.rs +++ b/core/node/state_keeper/src/seal_criteria/criteria/gas_for_batch_tip.rs @@ -1,4 +1,4 @@ -use multivm::utils::gas_bootloader_batch_tip_overhead; +use zksync_multivm::utils::gas_bootloader_batch_tip_overhead; use zksync_types::ProtocolVersionId; use crate::seal_criteria::{ diff --git a/core/node/state_keeper/src/seal_criteria/criteria/geometry_seal_criteria.rs b/core/node/state_keeper/src/seal_criteria/criteria/geometry_seal_criteria.rs index 3e800f18e2d9..264618f5d136 100644 --- a/core/node/state_keeper/src/seal_criteria/criteria/geometry_seal_criteria.rs +++ b/core/node/state_keeper/src/seal_criteria/criteria/geometry_seal_criteria.rs @@ -1,7 +1,7 @@ -use multivm::utils::{ +use zksync_config::configs::chain::StateKeeperConfig; +use zksync_multivm::utils::{ circuit_statistics_bootloader_batch_tip_overhead, get_max_batch_base_layer_circuits, }; -use zksync_config::configs::chain::StateKeeperConfig; use zksync_types::ProtocolVersionId; // Local uses diff --git a/core/node/state_keeper/src/seal_criteria/criteria/pubdata_bytes.rs b/core/node/state_keeper/src/seal_criteria/criteria/pubdata_bytes.rs index e021cc127be7..f575a905891c 100644 --- a/core/node/state_keeper/src/seal_criteria/criteria/pubdata_bytes.rs +++ b/core/node/state_keeper/src/seal_criteria/criteria/pubdata_bytes.rs @@ -1,4 +1,4 @@ -use multivm::utils::execution_metrics_bootloader_batch_tip_overhead; +use zksync_multivm::utils::execution_metrics_bootloader_batch_tip_overhead; use zksync_types::ProtocolVersionId; use crate::seal_criteria::{ diff --git a/core/node/state_keeper/src/seal_criteria/criteria/slots.rs b/core/node/state_keeper/src/seal_criteria/criteria/slots.rs index 6178f9e824d8..81b3a0933801 100644 --- a/core/node/state_keeper/src/seal_criteria/criteria/slots.rs +++ b/core/node/state_keeper/src/seal_criteria/criteria/slots.rs @@ -1,4 +1,4 @@ -use multivm::utils::get_bootloader_max_txs_in_batch; +use zksync_multivm::utils::get_bootloader_max_txs_in_batch; use zksync_types::ProtocolVersionId; use crate::seal_criteria::{SealCriterion, SealData, SealResolution, StateKeeperConfig}; diff --git a/core/node/state_keeper/src/seal_criteria/criteria/tx_encoding_size.rs b/core/node/state_keeper/src/seal_criteria/criteria/tx_encoding_size.rs index 13a7f0b0a757..409673d6cac8 100644 --- a/core/node/state_keeper/src/seal_criteria/criteria/tx_encoding_size.rs +++ b/core/node/state_keeper/src/seal_criteria/criteria/tx_encoding_size.rs @@ -1,4 +1,4 @@ -use multivm::utils::get_bootloader_encoding_space; +use zksync_multivm::utils::get_bootloader_encoding_space; use zksync_types::ProtocolVersionId; use crate::seal_criteria::{ diff --git a/core/node/state_keeper/src/seal_criteria/mod.rs b/core/node/state_keeper/src/seal_criteria/mod.rs index ff231107326a..01be129dde6f 100644 --- a/core/node/state_keeper/src/seal_criteria/mod.rs +++ b/core/node/state_keeper/src/seal_criteria/mod.rs @@ -12,8 +12,8 @@ use std::fmt; -use multivm::{interface::Halt, vm_latest::TransactionVmExt}; use zksync_config::configs::chain::StateKeeperConfig; +use zksync_multivm::{interface::Halt, vm_latest::TransactionVmExt}; use zksync_types::{ block::BlockGasCount, fee::TransactionExecutionMetrics, diff --git a/core/node/state_keeper/src/testonly/mod.rs b/core/node/state_keeper/src/testonly/mod.rs index 691f656d34cc..80514f51e8c3 100644 --- a/core/node/state_keeper/src/testonly/mod.rs +++ b/core/node/state_keeper/src/testonly/mod.rs @@ -4,18 +4,18 @@ use std::sync::Arc; use async_trait::async_trait; -use multivm::{ +use once_cell::sync::Lazy; +use tokio::sync::{mpsc, watch}; +use zksync_contracts::BaseSystemContracts; +use zksync_dal::{ConnectionPool, Core, CoreDal as _}; +use zksync_multivm::{ interface::{ CurrentExecutionState, ExecutionResult, FinishedL1Batch, L1BatchEnv, Refunds, SystemEnv, VmExecutionResultAndLogs, VmExecutionStatistics, }, vm_latest::VmExecutionLogs, }; -use once_cell::sync::Lazy; -use tokio::sync::{mpsc, watch}; -use zksync_contracts::BaseSystemContracts; -use zksync_dal::{ConnectionPool, Core, CoreDal as _}; -use zksync_state::ReadStorageFactory; +use zksync_state::{ReadStorageFactory, StorageViewCache}; use zksync_test_account::Account; use zksync_types::{ fee::Fee, utils::storage_key_for_standard_token_balance, AccountTreeId, Address, Execute, @@ -76,6 +76,10 @@ pub(crate) fn successful_exec() -> TxExecutionResult { } } +pub(crate) fn storage_view_cache() -> StorageViewCache { + StorageViewCache::default() +} + /// `BatchExecutor` which doesn't check anything at all. Accepts all transactions. #[derive(Debug)] pub struct MockBatchExecutor; @@ -102,6 +106,9 @@ impl BatchExecutor for MockBatchExecutor { resp.send(default_vm_batch_result()).unwrap(); break; } + Command::FinishBatchWithCache(resp) => resp + .send((default_vm_batch_result(), storage_view_cache())) + .unwrap(), } } anyhow::Ok(()) @@ -139,7 +146,7 @@ pub async fn fund(pool: &ConnectionPool, addresses: &[Address]) { { storage .storage_logs_dedup_dal() - .insert_initial_writes(L1BatchNumber(0), &[storage_log.key]) + .insert_initial_writes(L1BatchNumber(0), &[storage_log.key.hashed_key()]) .await .unwrap(); } diff --git a/core/node/state_keeper/src/testonly/test_batch_executor.rs b/core/node/state_keeper/src/testonly/test_batch_executor.rs index 4539633174a8..1be84cfbf54e 100644 --- a/core/node/state_keeper/src/testonly/test_batch_executor.rs +++ b/core/node/state_keeper/src/testonly/test_batch_executor.rs @@ -13,12 +13,12 @@ use std::{ }; use async_trait::async_trait; -use multivm::{ +use tokio::sync::{mpsc, watch, watch::Receiver}; +use zksync_contracts::BaseSystemContracts; +use zksync_multivm::{ interface::{ExecutionResult, L1BatchEnv, SystemEnv, VmExecutionResultAndLogs}, vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, }; -use tokio::sync::{mpsc, watch, watch::Receiver}; -use zksync_contracts::BaseSystemContracts; use zksync_node_test_utils::create_l2_transaction; use zksync_state::{PgOrRocksdbStorage, ReadStorageFactory}; use zksync_types::{ @@ -30,7 +30,9 @@ use crate::{ batch_executor::{BatchExecutor, BatchExecutorHandle, Command, TxExecutionResult}, io::{IoCursor, L1BatchParams, L2BlockParams, PendingBatchData, StateKeeperIO}, seal_criteria::{IoSealCriteria, SequencerSealer, UnexecutableReason}, - testonly::{default_vm_batch_result, successful_exec, BASE_SYSTEM_CONTRACTS}, + testonly::{ + default_vm_batch_result, storage_view_cache, successful_exec, BASE_SYSTEM_CONTRACTS, + }, types::ExecutionMetricsForCriteria, updates::UpdatesManager, OutputHandler, StateKeeperOutputHandler, ZkSyncStateKeeper, @@ -271,7 +273,7 @@ pub(crate) fn successful_exec_with_metrics( /// Creates a `TxExecutionResult` object denoting a tx that was rejected. pub(crate) fn rejected_exec() -> TxExecutionResult { TxExecutionResult::RejectedByVm { - reason: multivm::interface::Halt::InnerTxError, + reason: zksync_multivm::interface::Halt::InnerTxError, } } @@ -499,6 +501,9 @@ impl TestBatchExecutor { resp.send(default_vm_batch_result()).unwrap(); return; } + Command::FinishBatchWithCache(resp) => resp + .send((default_vm_batch_result(), storage_view_cache())) + .unwrap(), } } } @@ -827,6 +832,9 @@ impl BatchExecutor for MockBatchExecutor { resp.send(default_vm_batch_result()).unwrap(); break; } + Command::FinishBatchWithCache(resp) => resp + .send((default_vm_batch_result(), storage_view_cache())) + .unwrap(), } } anyhow::Ok(()) diff --git a/core/node/state_keeper/src/tests/mod.rs b/core/node/state_keeper/src/tests/mod.rs index ee716df2e691..8bfc53c8f7b1 100644 --- a/core/node/state_keeper/src/tests/mod.rs +++ b/core/node/state_keeper/src/tests/mod.rs @@ -6,15 +6,15 @@ use std::{ time::Instant, }; -use multivm::{ +use tokio::sync::watch; +use zksync_config::configs::chain::StateKeeperConfig; +use zksync_multivm::{ interface::{ ExecutionResult, Halt, L1BatchEnv, L2BlockEnv, Refunds, SystemEnv, TxExecutionMode, VmExecutionResultAndLogs, VmExecutionStatistics, }, vm_latest::{constants::BATCH_COMPUTATIONAL_GAS_LIMIT, VmExecutionLogs}, }; -use tokio::sync::watch; -use zksync_config::configs::chain::StateKeeperConfig; use zksync_node_test_utils::create_l2_transaction; use zksync_types::{ aggregated_operations::AggregatedActionType, diff --git a/core/node/state_keeper/src/types.rs b/core/node/state_keeper/src/types.rs index 61548483dfd2..2606e7d5c7b7 100644 --- a/core/node/state_keeper/src/types.rs +++ b/core/node/state_keeper/src/types.rs @@ -3,9 +3,9 @@ use std::{ sync::{Arc, Mutex}, }; -use multivm::interface::VmExecutionResultAndLogs; use zksync_dal::{Connection, Core, CoreDal}; use zksync_mempool::{L2TxFilter, MempoolInfo, MempoolStore}; +use zksync_multivm::interface::VmExecutionResultAndLogs; use zksync_types::{ block::BlockGasCount, tx::ExecutionMetrics, Address, Nonce, PriorityOpId, Transaction, }; diff --git a/core/node/state_keeper/src/updates/l1_batch_updates.rs b/core/node/state_keeper/src/updates/l1_batch_updates.rs index 0670b06db7d7..7bc2095ff9b1 100644 --- a/core/node/state_keeper/src/updates/l1_batch_updates.rs +++ b/core/node/state_keeper/src/updates/l1_batch_updates.rs @@ -1,4 +1,4 @@ -use multivm::interface::FinishedL1Batch; +use zksync_multivm::interface::FinishedL1Batch; use zksync_types::{ block::BlockGasCount, priority_op_onchain_data::PriorityOpOnchainData, @@ -51,7 +51,7 @@ impl L1BatchUpdates { #[cfg(test)] mod tests { - use multivm::vm_latest::TransactionVmExt; + use zksync_multivm::vm_latest::TransactionVmExt; use zksync_types::{L2BlockNumber, ProtocolVersionId, H256}; use super::*; diff --git a/core/node/state_keeper/src/updates/l2_block_updates.rs b/core/node/state_keeper/src/updates/l2_block_updates.rs index 93e0a481ebc3..8b3060babad1 100644 --- a/core/node/state_keeper/src/updates/l2_block_updates.rs +++ b/core/node/state_keeper/src/updates/l2_block_updates.rs @@ -1,6 +1,6 @@ use std::collections::HashMap; -use multivm::{ +use zksync_multivm::{ interface::{ExecutionResult, L2BlockEnv, VmExecutionResultAndLogs}, vm_latest::TransactionVmExt, }; @@ -181,7 +181,7 @@ impl L2BlockUpdates { #[cfg(test)] mod tests { - use multivm::vm_latest::TransactionVmExt; + use zksync_multivm::vm_latest::TransactionVmExt; use super::*; use crate::tests::{create_execution_result, create_transaction}; diff --git a/core/node/state_keeper/src/updates/mod.rs b/core/node/state_keeper/src/updates/mod.rs index c78607147468..e05432c57b21 100644 --- a/core/node/state_keeper/src/updates/mod.rs +++ b/core/node/state_keeper/src/updates/mod.rs @@ -1,8 +1,9 @@ -use multivm::{ +use zksync_contracts::BaseSystemContractsHashes; +use zksync_multivm::{ interface::{FinishedL1Batch, L1BatchEnv, SystemEnv, VmExecutionResultAndLogs}, utils::get_batch_base_fee, }; -use zksync_contracts::BaseSystemContractsHashes; +use zksync_state::StorageViewCache; use zksync_types::{ block::BlockGasCount, fee_model::BatchFeeInput, storage_writes_deduplicator::StorageWritesDeduplicator, @@ -35,6 +36,7 @@ pub struct UpdatesManager { base_fee_per_gas: u64, base_system_contract_hashes: BaseSystemContractsHashes, protocol_version: ProtocolVersionId, + storage_view_cache: Option, pub l1_batch: L1BatchUpdates, pub l2_block: L2BlockUpdates, pub storage_writes_deduplicator: StorageWritesDeduplicator, @@ -59,6 +61,7 @@ impl UpdatesManager { protocol_version, ), storage_writes_deduplicator: StorageWritesDeduplicator::new(), + storage_view_cache: None, } } @@ -66,7 +69,7 @@ impl UpdatesManager { self.batch_timestamp } - pub(crate) fn base_system_contract_hashes(&self) -> BaseSystemContractsHashes { + pub fn base_system_contract_hashes(&self) -> BaseSystemContractsHashes { self.base_system_contract_hashes } @@ -98,7 +101,7 @@ impl UpdatesManager { } } - pub(crate) fn protocol_version(&self) -> ProtocolVersionId { + pub fn protocol_version(&self) -> ProtocolVersionId { self.protocol_version } @@ -153,6 +156,14 @@ impl UpdatesManager { latency.observe(); } + pub fn update_storage_view_cache(&mut self, storage_view_cache: StorageViewCache) { + self.storage_view_cache = Some(storage_view_cache); + } + + pub fn storage_view_cache(&self) -> Option { + self.storage_view_cache.clone() + } + /// Pushes a new L2 block with the specified timestamp into this manager. The previously /// held L2 block is considered sealed and is used to extend the L1 batch data. pub fn push_l2_block(&mut self, l2_block_params: L2BlockParams) { diff --git a/core/node/tee_verifier_input_producer/Cargo.toml b/core/node/tee_verifier_input_producer/Cargo.toml index 208e7e35760c..c975bbcd280a 100644 --- a/core/node/tee_verifier_input_producer/Cargo.toml +++ b/core/node/tee_verifier_input_producer/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "zksync_tee_verifier_input_producer" -version = "0.1.0" +description = "ZKsync TEE verifier input producer" +version.workspace = true edition.workspace = true authors.workspace = true homepage.workspace = true @@ -17,7 +18,7 @@ zksync_queued_job_processor.workspace = true zksync_tee_verifier.workspace = true zksync_types.workspace = true zksync_utils.workspace = true -vm_utils.workspace = true +zksync_vm_utils.workspace = true vise.workspace = true anyhow.workspace = true diff --git a/core/node/tee_verifier_input_producer/src/lib.rs b/core/node/tee_verifier_input_producer/src/lib.rs index 9104b62fa5e5..0cd28ee5ce79 100644 --- a/core/node/tee_verifier_input_producer/src/lib.rs +++ b/core/node/tee_verifier_input_producer/src/lib.rs @@ -12,14 +12,16 @@ use std::{sync::Arc, time::Instant}; use anyhow::Context; use async_trait::async_trait; use tokio::task::JoinHandle; -use vm_utils::storage::L1BatchParamsProvider; use zksync_dal::{tee_verifier_input_producer_dal::JOB_MAX_ATTEMPT, ConnectionPool, Core, CoreDal}; use zksync_object_store::ObjectStore; -use zksync_prover_interface::inputs::PrepareBasicCircuitsJob; +use zksync_prover_interface::inputs::{ + TeeVerifierInput, V1TeeVerifierInput, WitnessInputMerklePaths, +}; use zksync_queued_job_processor::JobProcessor; -use zksync_tee_verifier::TeeVerifierInput; +use zksync_tee_verifier::Verify; use zksync_types::{L1BatchNumber, L2ChainId}; use zksync_utils::u256_to_h256; +use zksync_vm_utils::storage::L1BatchParamsProvider; use self::metrics::METRICS; @@ -53,7 +55,7 @@ impl TeeVerifierInputProducer { object_store: Arc, l2_chain_id: L2ChainId, ) -> anyhow::Result { - let prepare_basic_circuits_job: PrepareBasicCircuitsJob = object_store + let prepare_basic_circuits_job: WitnessInputMerklePaths = object_store .get(l1_batch_number) .await .context("failed to get PrepareBasicCircuitsJob from object store")?; @@ -75,7 +77,9 @@ impl TeeVerifierInputProducer { .with_context(|| format!("header is missing for L1 batch #{l1_batch_number}"))? .unwrap(); - let l1_batch_params_provider = L1BatchParamsProvider::new(&mut connection) + let mut l1_batch_params_provider = L1BatchParamsProvider::new(); + l1_batch_params_provider + .initialize(&mut connection) .await .context("failed initializing L1 batch params provider")?; @@ -128,7 +132,7 @@ impl TeeVerifierInputProducer { tracing::info!("Started execution of l1_batch: {l1_batch_number:?}"); - let tee_verifier_input = TeeVerifierInput::new( + let tee_verifier_input = V1TeeVerifierInput::new( prepare_basic_circuits_job, l2_blocks_execution_data, l1_batch_env, @@ -149,7 +153,7 @@ impl TeeVerifierInputProducer { l1_batch_number.0 ); - Ok(tee_verifier_input) + Ok(TeeVerifierInput::new(tee_verifier_input)) } } @@ -214,15 +218,13 @@ impl JobProcessor for TeeVerifierInputProducer { started_at: Instant, artifacts: Self::JobArtifacts, ) -> anyhow::Result<()> { - let upload_started_at = Instant::now(); + let observer: vise::LatencyObserver = METRICS.upload_input_time.start(); let object_path = self .object_store .put(job_id, &artifacts) .await .context("failed to upload artifacts for TeeVerifierInputProducer")?; - METRICS - .upload_input_time - .observe(upload_started_at.elapsed()); + observer.observe(); let mut connection = self .connection_pool .connection() @@ -245,7 +247,7 @@ impl JobProcessor for TeeVerifierInputProducer { .commit() .await .context("failed to commit DB transaction for TeeVerifierInputProducer")?; - METRICS.block_number_processed.set(job_id.0 as i64); + METRICS.block_number_processed.set(job_id.0 as u64); Ok(()) } diff --git a/core/node/tee_verifier_input_producer/src/metrics.rs b/core/node/tee_verifier_input_producer/src/metrics.rs index 51daa20baadb..362804d338e9 100644 --- a/core/node/tee_verifier_input_producer/src/metrics.rs +++ b/core/node/tee_verifier_input_producer/src/metrics.rs @@ -11,7 +11,7 @@ pub(crate) struct TeeVerifierInputProducerMetrics { pub process_batch_time: Histogram, #[metrics(buckets = Buckets::LATENCIES, unit = Unit::Seconds)] pub upload_input_time: Histogram, - pub block_number_processed: Gauge, + pub block_number_processed: Gauge, } #[vise::register] diff --git a/core/node/test_utils/Cargo.toml b/core/node/test_utils/Cargo.toml index da23ac917571..af60008df570 100644 --- a/core/node/test_utils/Cargo.toml +++ b/core/node/test_utils/Cargo.toml @@ -1,5 +1,6 @@ [package] name = "zksync_node_test_utils" +description = "ZKsync utilities for writing tests" version.workspace = true edition.workspace = true authors.workspace = true @@ -10,7 +11,7 @@ keywords.workspace = true categories.workspace = true [dependencies] -multivm.workspace = true +zksync_multivm.workspace = true zksync_types.workspace = true zksync_dal.workspace = true zksync_contracts.workspace = true diff --git a/core/node/test_utils/src/lib.rs b/core/node/test_utils/src/lib.rs index a77e0aea2c0c..ee3503322aea 100644 --- a/core/node/test_utils/src/lib.rs +++ b/core/node/test_utils/src/lib.rs @@ -2,10 +2,10 @@ use std::collections::HashMap; -use multivm::utils::get_max_gas_per_pubdata_byte; use zksync_contracts::BaseSystemContractsHashes; use zksync_dal::{Connection, Core, CoreDal}; use zksync_merkle_tree::{domain::ZkSyncTree, TreeInstruction}; +use zksync_multivm::utils::get_max_gas_per_pubdata_byte; use zksync_node_genesis::GenesisParams; use zksync_system_constants::{get_intrinsic_constants, ZKPORTER_IS_AVAILABLE}; use zksync_types::{ @@ -18,7 +18,7 @@ use zksync_types::{ fee_model::BatchFeeInput, l2::L2Tx, protocol_version::ProtocolSemanticVersion, - snapshots::SnapshotRecoveryStatus, + snapshots::{SnapshotRecoveryStatus, SnapshotStorageLog}, transaction_request::PaymasterParams, tx::{tx_execution_info::TxExecutionStatus, ExecutionMetrics, TransactionExecutionResult}, Address, K256PrivateKey, L1BatchNumber, L2BlockNumber, L2ChainId, Nonce, ProtocolVersion, @@ -154,16 +154,16 @@ pub fn execute_l2_transaction(transaction: L2Tx) -> TransactionExecutionResult { pub struct Snapshot { pub l1_batch: L1BatchHeader, pub l2_block: L2BlockHeader, - pub storage_logs: Vec, + pub storage_logs: Vec, pub factory_deps: HashMap>, } impl Snapshot { // Constructs a dummy Snapshot based on the provided values. - pub fn make( + pub fn new( l1_batch: L1BatchNumber, l2_block: L2BlockNumber, - storage_logs: &[StorageLog], + storage_logs: Vec, genesis_params: GenesisParams, ) -> Self { let contracts = genesis_params.base_system_contracts(); @@ -197,7 +197,7 @@ impl Snapshot { .into_iter() .map(|c| (c.hash, zksync_utils::be_words_to_bytes(&c.code))) .collect(), - storage_logs: storage_logs.to_vec(), + storage_logs, } } } @@ -209,11 +209,18 @@ pub async fn prepare_recovery_snapshot( l2_block: L2BlockNumber, storage_logs: &[StorageLog], ) -> SnapshotRecoveryStatus { - recover( - storage, - Snapshot::make(l1_batch, l2_block, storage_logs, GenesisParams::mock()), - ) - .await + let storage_logs = storage_logs + .iter() + .enumerate() + .map(|(i, log)| SnapshotStorageLog { + key: log.key.hashed_key(), + value: log.value, + l1_batch_number_of_initial_write: l1_batch, + enumeration_index: i as u64 + 1, + }) + .collect(); + let snapshot = Snapshot::new(l1_batch, l2_block, storage_logs, GenesisParams::mock()); + recover(storage, snapshot).await } /// Takes a storage snapshot at the last sealed L1 batch. @@ -248,10 +255,7 @@ pub async fn snapshot(storage: &mut Connection<'_, Core>) -> Snapshot { .snapshots_creator_dal() .get_storage_logs_chunk(l2_block, l1_batch.number, all_hashes) .await - .unwrap() - .into_iter() - .map(|l| StorageLog::new_write_log(l.key, l.value)) - .collect(), + .unwrap(), factory_deps: storage .snapshots_creator_dal() .get_all_factory_deps(l2_block) @@ -274,8 +278,10 @@ pub async fn recover( let tree_instructions: Vec<_> = snapshot .storage_logs .iter() - .enumerate() - .map(|(i, log)| TreeInstruction::write(log.key, i as u64 + 1, log.value)) + .map(|log| { + let tree_key = U256::from_little_endian(log.key.as_bytes()); + TreeInstruction::write(tree_key, log.enumeration_index, log.value) + }) .collect(); let l1_batch_root_hash = ZkSyncTree::process_genesis_batch(&tree_instructions).root_hash; @@ -317,7 +323,7 @@ pub async fn recover( .unwrap(); storage .storage_logs_dal() - .insert_storage_logs(snapshot.l2_block.number, &snapshot.storage_logs) + .insert_storage_logs_from_snapshot(snapshot.l2_block.number, &snapshot.storage_logs) .await .unwrap(); diff --git a/core/node/vm_runner/Cargo.toml b/core/node/vm_runner/Cargo.toml index 5571bb7f3fde..3af52ed4688e 100644 --- a/core/node/vm_runner/Cargo.toml +++ b/core/node/vm_runner/Cargo.toml @@ -1,5 +1,6 @@ [package] name = "zksync_vm_runner" +description = "ZKsync VM runner" version.workspace = true edition.workspace = true authors.workspace = true @@ -10,7 +11,7 @@ keywords.workspace = true categories.workspace = true [dependencies] -multivm.workspace = true +zksync_multivm.workspace = true zksync_types.workspace = true zksync_dal.workspace = true zksync_contracts.workspace = true @@ -18,7 +19,9 @@ zksync_state.workspace = true zksync_storage.workspace = true zksync_state_keeper.workspace = true zksync_utils.workspace = true -vm_utils.workspace = true +zksync_prover_interface.workspace = true +zksync_object_store.workspace = true +zksync_vm_utils.workspace = true tokio = { workspace = true, features = ["time"] } anyhow.workspace = true diff --git a/core/node/vm_runner/src/impls/bwip.rs b/core/node/vm_runner/src/impls/bwip.rs new file mode 100644 index 000000000000..7ab18397353d --- /dev/null +++ b/core/node/vm_runner/src/impls/bwip.rs @@ -0,0 +1,393 @@ +use std::{collections::HashSet, sync::Arc}; + +use anyhow::anyhow; +use async_trait::async_trait; +use tokio::sync::watch; +use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; +use zksync_object_store::ObjectStore; +use zksync_prover_interface::inputs::VMRunWitnessInputData; +use zksync_state_keeper::{MainBatchExecutor, StateKeeperOutputHandler, UpdatesManager}; +use zksync_types::{ + block::StorageOracleInfo, witness_block_state::WitnessStorageState, L1BatchNumber, L2ChainId, + H256, +}; +use zksync_utils::{bytes_to_chunks, h256_to_u256, u256_to_h256}; + +use crate::{ + storage::StorageSyncTask, ConcurrentOutputHandlerFactory, ConcurrentOutputHandlerFactoryTask, + OutputHandlerFactory, VmRunner, VmRunnerIo, VmRunnerStorage, +}; + +/// A standalone component that retrieves all needed data for basic witness generation and saves it to the bucket +#[derive(Debug)] +pub struct BasicWitnessInputProducer { + vm_runner: VmRunner, +} + +impl BasicWitnessInputProducer { + /// Create a new BWIP from the provided DB parameters and window size which + /// regulates how many batches this component can handle at the same time. + pub async fn new( + pool: ConnectionPool, + object_store: Arc, + rocksdb_path: String, + chain_id: L2ChainId, + first_processed_batch: L1BatchNumber, + window_size: u32, + ) -> anyhow::Result<(Self, BasicWitnessInputProducerTasks)> { + let io = BasicWitnessInputProducerIo { + first_processed_batch, + window_size, + }; + let (loader, loader_task) = + VmRunnerStorage::new(pool.clone(), rocksdb_path, io.clone(), chain_id).await?; + let output_handler_factory = BasicWitnessInputProducerOutputHandlerFactory { + pool: pool.clone(), + object_store, + }; + let (output_handler_factory, output_handler_factory_task) = + ConcurrentOutputHandlerFactory::new(pool.clone(), io.clone(), output_handler_factory); + let batch_processor = MainBatchExecutor::new(false, false); + let vm_runner = VmRunner::new( + pool, + Box::new(io), + Arc::new(loader), + Box::new(output_handler_factory), + Box::new(batch_processor), + ); + Ok(( + Self { vm_runner }, + BasicWitnessInputProducerTasks { + loader_task, + output_handler_factory_task, + }, + )) + } + + /// Continuously loads new available batches and writes the corresponding data + /// produced by that batch. + /// + /// # Errors + /// + /// Propagates RocksDB and Postgres errors. + pub async fn run(self, stop_receiver: &watch::Receiver) -> anyhow::Result<()> { + self.vm_runner.run(stop_receiver).await + } +} + +/// A collections of tasks that need to be run in order for BWIP to work as +/// intended. +#[derive(Debug)] +pub struct BasicWitnessInputProducerTasks { + /// Task that synchronizes storage with new available batches. + pub loader_task: StorageSyncTask, + /// Task that handles output from processed batches. + pub output_handler_factory_task: + ConcurrentOutputHandlerFactoryTask, +} + +/// IO implementation for the basic witness input producer. +#[derive(Debug, Clone)] +pub struct BasicWitnessInputProducerIo { + first_processed_batch: L1BatchNumber, + window_size: u32, +} + +#[async_trait] +impl VmRunnerIo for BasicWitnessInputProducerIo { + fn name(&self) -> &'static str { + "basic_witness_input_producer" + } + + async fn latest_processed_batch( + &self, + conn: &mut Connection<'_, Core>, + ) -> anyhow::Result { + Ok(conn + .vm_runner_dal() + .get_bwip_latest_processed_batch() + .await? + .unwrap_or(self.first_processed_batch)) + } + + async fn last_ready_to_be_loaded_batch( + &self, + conn: &mut Connection<'_, Core>, + ) -> anyhow::Result { + Ok(conn + .vm_runner_dal() + .get_bwip_last_ready_batch(self.first_processed_batch, self.window_size) + .await?) + } + + async fn mark_l1_batch_as_processing( + &self, + conn: &mut Connection<'_, Core>, + l1_batch_number: L1BatchNumber, + ) -> anyhow::Result<()> { + Ok(conn + .vm_runner_dal() + .mark_bwip_batch_as_processing(l1_batch_number) + .await?) + } + + async fn mark_l1_batch_as_completed( + &self, + conn: &mut Connection<'_, Core>, + l1_batch_number: L1BatchNumber, + ) -> anyhow::Result<()> { + conn.vm_runner_dal() + .mark_bwip_batch_as_completed(l1_batch_number) + .await + } +} + +#[derive(Debug)] +struct BasicWitnessInputProducerOutputHandler { + pool: ConnectionPool, + object_store: Arc, +} + +#[async_trait] +impl StateKeeperOutputHandler for BasicWitnessInputProducerOutputHandler { + async fn handle_l2_block(&mut self, _updates_manager: &UpdatesManager) -> anyhow::Result<()> { + Ok(()) + } + + async fn handle_l1_batch( + &mut self, + updates_manager: Arc, + ) -> anyhow::Result<()> { + let l1_batch_number = updates_manager.l1_batch.number; + let mut connection = self.pool.connection().await?; + + tracing::info!(%l1_batch_number, "Started saving VM run data"); + + let result = + get_updates_manager_witness_input_data(&mut connection, updates_manager).await?; + + assert_database_witness_input_data(&mut connection, l1_batch_number, &result).await; + + let blob_url = self.object_store.put(l1_batch_number, &result).await?; + + tracing::info!(%l1_batch_number, "Saved VM run data"); + + connection + .proof_generation_dal() + .insert_proof_generation_details(l1_batch_number) + .await?; + + connection + .proof_generation_dal() + .save_vm_runner_artifacts_metadata(l1_batch_number, &blob_url) + .await?; + + Ok(()) + } +} + +async fn get_updates_manager_witness_input_data( + connection: &mut Connection<'_, Core>, + updates_manager: Arc, +) -> anyhow::Result { + let l1_batch_number = updates_manager.l1_batch.number; + let finished_batch = updates_manager + .l1_batch + .finished + .clone() + .ok_or_else(|| anyhow!("L1 batch {l1_batch_number:?} is not finished"))?; + + let initial_heap_content = finished_batch.final_bootloader_memory.unwrap(); // might be just empty + let default_aa = updates_manager.base_system_contract_hashes().default_aa; + let bootloader = updates_manager.base_system_contract_hashes().bootloader; + let bootloader_code_bytes = connection + .factory_deps_dal() + .get_sealed_factory_dep(bootloader) + .await? + .ok_or_else(|| anyhow!("Failed fetching bootloader bytecode from DB"))?; + let bootloader_code = bytes_to_chunks(&bootloader_code_bytes); + + let account_code_hash = h256_to_u256(default_aa); + let account_bytecode_bytes = connection + .factory_deps_dal() + .get_sealed_factory_dep(default_aa) + .await? + .ok_or_else(|| anyhow!("Default account bytecode should exist"))?; + let account_bytecode = bytes_to_chunks(&account_bytecode_bytes); + + let hashes: HashSet = finished_batch + .final_execution_state + .used_contract_hashes + .iter() + // SMA-1555: remove this hack once updated to the latest version of `zkevm_test_harness` + .filter(|&&hash| hash != h256_to_u256(bootloader)) + .map(|hash| u256_to_h256(*hash)) + .collect(); + let mut used_bytecodes = connection + .factory_deps_dal() + .get_factory_deps(&hashes) + .await; + if finished_batch + .final_execution_state + .used_contract_hashes + .contains(&account_code_hash) + { + used_bytecodes.insert(account_code_hash, account_bytecode); + } + + let storage_refunds = finished_batch.final_execution_state.storage_refunds; + let pubdata_costs = finished_batch.final_execution_state.pubdata_costs; + + let storage_view_cache = updates_manager + .storage_view_cache() + .expect("Storage view cache was not initialized"); + + let witness_block_state = WitnessStorageState { + read_storage_key: storage_view_cache.read_storage_keys(), + is_write_initial: storage_view_cache.initial_writes(), + }; + + Ok(VMRunWitnessInputData { + l1_batch_number, + used_bytecodes, + initial_heap_content, + + protocol_version: updates_manager.protocol_version(), + + bootloader_code, + default_account_code_hash: account_code_hash, + storage_refunds, + pubdata_costs, + witness_block_state, + }) +} + +async fn assert_database_witness_input_data( + connection: &mut Connection<'_, Core>, + l1_batch_number: L1BatchNumber, + result: &VMRunWitnessInputData, +) { + let block_header = connection + .blocks_dal() + .get_l1_batch_header(l1_batch_number) + .await + .expect("Failed fetching L1 block from DB") + .expect("L1 block header should exist"); + + let initial_heap_content = connection + .blocks_dal() + .get_initial_bootloader_heap(l1_batch_number) + .await + .expect("Failed fetching initial heap content from DB") + .expect("Initial bootloader heap should exist"); + + let account_code_hash = h256_to_u256(block_header.base_system_contracts_hashes.default_aa); + let account_bytecode_bytes = connection + .factory_deps_dal() + .get_sealed_factory_dep(block_header.base_system_contracts_hashes.default_aa) + .await + .expect("Failed fetching default account bytecode from DB") + .expect("Default account bytecode should exist"); + let account_bytecode = bytes_to_chunks(&account_bytecode_bytes); + + let hashes: HashSet = block_header + .used_contract_hashes + .iter() + // SMA-1555: remove this hack once updated to the latest version of `zkevm_test_harness` + .filter(|&&hash| hash != h256_to_u256(block_header.base_system_contracts_hashes.bootloader)) + .map(|hash| u256_to_h256(*hash)) + .collect(); + let mut used_bytecodes = connection + .factory_deps_dal() + .get_factory_deps(&hashes) + .await; + if block_header + .used_contract_hashes + .contains(&account_code_hash) + { + used_bytecodes.insert(account_code_hash, account_bytecode); + } + + assert_eq!( + hashes.len(), + used_bytecodes.len(), + "{} factory deps are not found in DB", + hashes.len() - used_bytecodes.len() + ); + + let StorageOracleInfo { + storage_refunds, + pubdata_costs, + } = connection + .blocks_dal() + .get_storage_oracle_info(block_header.number) + .await + .expect("Failed fetching L1 block from DB") + .expect("Storage oracle info should exist"); + let pubdata_costs = pubdata_costs.unwrap(); + + let bootloader_code_bytes = connection + .factory_deps_dal() + .get_sealed_factory_dep(block_header.base_system_contracts_hashes.bootloader) + .await + .expect("Failed fetching bootloader bytecode from DB") + .expect("Bootloader bytecode should exist"); + let bootloader_code = bytes_to_chunks(&bootloader_code_bytes); + + assert_eq!( + block_header.protocol_version.unwrap(), + result.protocol_version, + "Protocol version mismatch in basic witness input producer: DB: {:?}, UpdatesManager: {:?}", + block_header.protocol_version, + result.protocol_version + ); + assert_eq!( + used_bytecodes, result.used_bytecodes, + "Used bytecodes mismatch in basic witness input producer: DB: {:?}, UpdatesManager: {:?}", + used_bytecodes, result.used_bytecodes + ); + assert_eq!( + storage_refunds, result.storage_refunds, + "Storage refunds mismatch in basic witness input producer: DB: {:?}, UpdatesManager: {:?}", + storage_refunds, result.storage_refunds + ); + assert_eq!( + pubdata_costs, result.pubdata_costs, + "Pubdata costs mismatch in basic witness input producer: DB: {:?}, UpdatesManager: {:?}", + pubdata_costs, result.pubdata_costs + ); + assert_eq!( + initial_heap_content, result.initial_heap_content, + "Initial heap content mismatch in basic witness input producer: DB: {:?}, UpdatesManager: {:?}", + initial_heap_content, result.initial_heap_content + ); + assert_eq!( + bootloader_code, result.bootloader_code, + "Bootloader code mismatch in basic witness input producer: DB: {:?}, UpdatesManager: {:?}", + bootloader_code, result.bootloader_code + ); + assert_eq!( + account_code_hash, result.default_account_code_hash, + "Default account code hash mismatch in basic witness input producer: DB: {:?}, UpdatesManager: {:?}", + account_code_hash, result.default_account_code_hash + ); +} + +#[derive(Debug)] +struct BasicWitnessInputProducerOutputHandlerFactory { + pool: ConnectionPool, + object_store: Arc, +} + +#[async_trait] +impl OutputHandlerFactory for BasicWitnessInputProducerOutputHandlerFactory { + async fn create_handler( + &mut self, + _l1_batch_number: L1BatchNumber, + ) -> anyhow::Result> { + Ok(Box::new(BasicWitnessInputProducerOutputHandler { + pool: self.pool.clone(), + object_store: self.object_store.clone(), + })) + } +} diff --git a/core/node/vm_runner/src/impls/mod.rs b/core/node/vm_runner/src/impls/mod.rs index 70d01f6932ef..2d982730498a 100644 --- a/core/node/vm_runner/src/impls/mod.rs +++ b/core/node/vm_runner/src/impls/mod.rs @@ -1,3 +1,7 @@ +mod bwip; mod protective_reads; -pub use protective_reads::{ProtectiveReadsWriter, ProtectiveReadsWriterTasks}; +pub use bwip::{ + BasicWitnessInputProducer, BasicWitnessInputProducerIo, BasicWitnessInputProducerTasks, +}; +pub use protective_reads::{ProtectiveReadsIo, ProtectiveReadsWriter, ProtectiveReadsWriterTasks}; diff --git a/core/node/vm_runner/src/impls/protective_reads.rs b/core/node/vm_runner/src/impls/protective_reads.rs index 6a8d85e3bd49..3be37b77d114 100644 --- a/core/node/vm_runner/src/impls/protective_reads.rs +++ b/core/node/vm_runner/src/impls/protective_reads.rs @@ -75,6 +75,7 @@ pub struct ProtectiveReadsWriterTasks { pub output_handler_factory_task: ConcurrentOutputHandlerFactoryTask, } +/// `VmRunnerIo` implementation for protective reads. #[derive(Debug, Clone)] pub struct ProtectiveReadsIo { first_processed_batch: L1BatchNumber, @@ -93,8 +94,9 @@ impl VmRunnerIo for ProtectiveReadsIo { ) -> anyhow::Result { Ok(conn .vm_runner_dal() - .get_protective_reads_latest_processed_batch(self.first_processed_batch) - .await?) + .get_protective_reads_latest_processed_batch() + .await? + .unwrap_or(self.first_processed_batch)) } async fn last_ready_to_be_loaded_batch( @@ -107,16 +109,26 @@ impl VmRunnerIo for ProtectiveReadsIo { .await?) } - async fn mark_l1_batch_as_completed( + async fn mark_l1_batch_as_processing( &self, conn: &mut Connection<'_, Core>, l1_batch_number: L1BatchNumber, ) -> anyhow::Result<()> { Ok(conn .vm_runner_dal() - .mark_protective_reads_batch_as_completed(l1_batch_number) + .mark_protective_reads_batch_as_processing(l1_batch_number) .await?) } + + async fn mark_l1_batch_as_completed( + &self, + conn: &mut Connection<'_, Core>, + l1_batch_number: L1BatchNumber, + ) -> anyhow::Result<()> { + conn.vm_runner_dal() + .mark_protective_reads_batch_as_completed(l1_batch_number) + .await + } } #[derive(Debug)] @@ -139,7 +151,7 @@ impl StateKeeperOutputHandler for ProtectiveReadsOutputHandler { .finished .as_ref() .context("L1 batch is not actually finished")?; - let (_, protective_reads): (Vec, Vec) = finished_batch + let (_, computed_protective_reads): (Vec, Vec) = finished_batch .final_execution_state .deduplicated_storage_logs .iter() @@ -149,30 +161,48 @@ impl StateKeeperOutputHandler for ProtectiveReadsOutputHandler { .pool .connection_tagged("protective_reads_writer") .await?; - let mut expected_protective_reads = connection + let mut written_protective_reads = connection .storage_logs_dedup_dal() .get_protective_reads_for_l1_batch(updates_manager.l1_batch.number) .await?; - for protective_read in protective_reads { - let address = protective_read.key.address(); - let key = protective_read.key.key(); - if !expected_protective_reads.remove(&protective_read.key) { + if !written_protective_reads.is_empty() { + tracing::debug!( + l1_batch_number = %updates_manager.l1_batch.number, + "Protective reads have already been written, validating" + ); + for protective_read in computed_protective_reads { + let address = protective_read.key.address(); + let key = protective_read.key.key(); + if !written_protective_reads.remove(&protective_read.key) { + tracing::error!( + l1_batch_number = %updates_manager.l1_batch.number, + address = %address, + key = %key, + "VM runner produced a protective read that did not happen in state keeper" + ); + } + } + for remaining_read in written_protective_reads { tracing::error!( l1_batch_number = %updates_manager.l1_batch.number, - address = %address, - key = %key, - "VM runner produced a protective read that did not happen in state keeper" + address = %remaining_read.address(), + key = %remaining_read.key(), + "State keeper produced a protective read that did not happen in VM runner" ); } - } - for remaining_read in expected_protective_reads { - tracing::error!( + } else { + tracing::debug!( l1_batch_number = %updates_manager.l1_batch.number, - address = %remaining_read.address(), - key = %remaining_read.key(), - "State keeper produced a protective read that did not happen in VM runner" + "Protective reads have not been written, writing" ); + connection + .storage_logs_dedup_dal() + .insert_protective_reads( + updates_manager.l1_batch.number, + &computed_protective_reads, + ) + .await?; } Ok(()) diff --git a/core/node/vm_runner/src/io.rs b/core/node/vm_runner/src/io.rs index e67da0e8235c..2e118f6cfd13 100644 --- a/core/node/vm_runner/src/io.rs +++ b/core/node/vm_runner/src/io.rs @@ -31,6 +31,18 @@ pub trait VmRunnerIo: Debug + Send + Sync + 'static { conn: &mut Connection<'_, Core>, ) -> anyhow::Result; + /// Marks the specified batch as being in progress. Must be called before a batch can be marked + /// as completed. + /// + /// # Errors + /// + /// Propagates DB errors. + async fn mark_l1_batch_as_processing( + &self, + conn: &mut Connection<'_, Core>, + l1_batch_number: L1BatchNumber, + ) -> anyhow::Result<()>; + /// Marks the specified batch as the latest completed batch. All earlier batches are considered /// to be completed too. No guarantees about later batches. /// diff --git a/core/node/vm_runner/src/lib.rs b/core/node/vm_runner/src/lib.rs index 50cf2a4433c1..b252eebcbb1f 100644 --- a/core/node/vm_runner/src/lib.rs +++ b/core/node/vm_runner/src/lib.rs @@ -13,7 +13,10 @@ mod metrics; #[cfg(test)] mod tests; -pub use impls::{ProtectiveReadsWriter, ProtectiveReadsWriterTasks}; +pub use impls::{ + BasicWitnessInputProducer, BasicWitnessInputProducerIo, BasicWitnessInputProducerTasks, + ProtectiveReadsIo, ProtectiveReadsWriter, ProtectiveReadsWriterTasks, +}; pub use io::VmRunnerIo; pub use output_handler::{ ConcurrentOutputHandlerFactory, ConcurrentOutputHandlerFactoryTask, OutputHandlerFactory, diff --git a/core/node/vm_runner/src/process.rs b/core/node/vm_runner/src/process.rs index 945d35477ce6..e84ec76d0726 100644 --- a/core/node/vm_runner/src/process.rs +++ b/core/node/vm_runner/src/process.rs @@ -1,9 +1,9 @@ use std::{sync::Arc, time::Duration}; use anyhow::Context; -use multivm::interface::L2BlockEnv; use tokio::{sync::watch, task::JoinHandle}; use zksync_dal::{ConnectionPool, Core}; +use zksync_multivm::interface::L2BlockEnv; use zksync_state_keeper::{ BatchExecutor, BatchExecutorHandle, ExecutionMetricsForCriteria, L2BlockParams, StateKeeperOutputHandler, TxExecutionResult, UpdatesManager, @@ -110,11 +110,15 @@ impl VmRunner { .await .context("VM runner failed to handle L2 block")?; } - let finished_batch = batch_executor - .finish_batch() + + let (finished_batch, storage_view_cache) = batch_executor + .finish_batch_with_cache() .await - .context("failed finishing L1 batch in executor")?; + .context("Failed getting storage view cache")?; updates_manager.finish_batch(finished_batch); + // this is needed for Basic Witness Input Producer to use in memory reads, but not database queries + updates_manager.update_storage_view_cache(storage_view_cache); + latency.observe(); output_handler .handle_l1_batch(Arc::new(updates_manager)) @@ -194,6 +198,9 @@ impl VmRunner { .create_handler(next_batch) .await?; + self.io + .mark_l1_batch_as_processing(&mut self.pool.connection().await?, next_batch) + .await?; let handle = tokio::task::spawn(Self::process_batch( batch_executor, batch_data.l2_blocks, diff --git a/core/node/vm_runner/src/storage.rs b/core/node/vm_runner/src/storage.rs index 7f4de2725e4a..f3e304d7d4ff 100644 --- a/core/node/vm_runner/src/storage.rs +++ b/core/node/vm_runner/src/storage.rs @@ -7,15 +7,15 @@ use std::{ use anyhow::Context as _; use async_trait::async_trait; -use multivm::{interface::L1BatchEnv, vm_1_4_2::SystemEnv}; use tokio::sync::{watch, RwLock}; -use vm_utils::storage::L1BatchParamsProvider; use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; +use zksync_multivm::{interface::L1BatchEnv, vm_1_4_2::SystemEnv}; use zksync_state::{ AsyncCatchupTask, BatchDiff, PgOrRocksdbStorage, ReadStorageFactory, RocksdbCell, RocksdbStorage, RocksdbStorageBuilder, RocksdbWithMemory, }; use zksync_types::{block::L2BlockExecutionData, L1BatchNumber, L2ChainId}; +use zksync_vm_utils::storage::L1BatchParamsProvider; use crate::{metrics::METRICS, VmRunnerIo}; @@ -101,7 +101,9 @@ impl VmRunnerStorage { chain_id: L2ChainId, ) -> anyhow::Result<(Self, StorageSyncTask)> { let mut conn = pool.connection_tagged(io.name()).await?; - let l1_batch_params_provider = L1BatchParamsProvider::new(&mut conn) + let mut l1_batch_params_provider = L1BatchParamsProvider::new(); + l1_batch_params_provider + .initialize(&mut conn) .await .context("Failed initializing L1 batch params provider")?; drop(conn); @@ -246,7 +248,9 @@ impl StorageSyncTask { state: Arc>, ) -> anyhow::Result { let mut conn = pool.connection_tagged(io.name()).await?; - let l1_batch_params_provider = L1BatchParamsProvider::new(&mut conn) + let mut l1_batch_params_provider = L1BatchParamsProvider::new(); + l1_batch_params_provider + .initialize(&mut conn) .await .context("Failed initializing L1 batch params provider")?; let target_l1_batch_number = io.latest_processed_batch(&mut conn).await?; diff --git a/core/node/vm_runner/src/tests/mod.rs b/core/node/vm_runner/src/tests/mod.rs index 52c4db4bb486..50acba610ba5 100644 --- a/core/node/vm_runner/src/tests/mod.rs +++ b/core/node/vm_runner/src/tests/mod.rs @@ -55,6 +55,14 @@ impl VmRunnerIo for Arc> { Ok(io.current + io.max) } + async fn mark_l1_batch_as_processing( + &self, + _conn: &mut Connection<'_, Core>, + _l1_batch_number: L1BatchNumber, + ) -> anyhow::Result<()> { + Ok(()) + } + async fn mark_l1_batch_as_completed( &self, _conn: &mut Connection<'_, Core>, @@ -233,7 +241,7 @@ async fn store_l1_batches( for _ in 0..10 { let key = StorageKey::new(AccountTreeId::new(H160::random()), H256::random()); let value = StorageValue::random(); - written_keys.push(key); + written_keys.push(key.hashed_key()); logs.push(StorageLog { kind: StorageLogKind::RepeatedWrite, key, @@ -354,7 +362,7 @@ async fn fund(pool: &ConnectionPool, accounts: &[Account]) { .is_empty() { conn.storage_logs_dedup_dal() - .insert_initial_writes(L1BatchNumber(0), &[storage_log.key]) + .insert_initial_writes(L1BatchNumber(0), &[storage_log.key.hashed_key()]) .await .unwrap(); } diff --git a/core/node/vm_runner/src/tests/output_handler.rs b/core/node/vm_runner/src/tests/output_handler.rs index 97ea59db63b0..453507328c4f 100644 --- a/core/node/vm_runner/src/tests/output_handler.rs +++ b/core/node/vm_runner/src/tests/output_handler.rs @@ -1,12 +1,12 @@ use std::{collections::HashMap, sync::Arc, time::Duration}; -use multivm::interface::{L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode}; use tokio::{ sync::{watch, RwLock}, task::JoinHandle, }; use zksync_contracts::{BaseSystemContracts, SystemContractCode}; use zksync_dal::{ConnectionPool, Core}; +use zksync_multivm::interface::{L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode}; use zksync_state_keeper::UpdatesManager; use zksync_types::L1BatchNumber; diff --git a/core/node/vm_runner/src/tests/storage.rs b/core/node/vm_runner/src/tests/storage.rs index afeaac8a8364..52de43801ff0 100644 --- a/core/node/vm_runner/src/tests/storage.rs +++ b/core/node/vm_runner/src/tests/storage.rs @@ -338,8 +338,10 @@ async fn access_vm_runner_storage() -> anyhow::Result<()> { })?; // Check that both storages have identical key-value pairs written in them for storage_log in &storage_logs { - let storage_key = - StorageKey::new(AccountTreeId::new(storage_log.address), storage_log.key); + let storage_key = StorageKey::new( + AccountTreeId::new(storage_log.address.unwrap()), + storage_log.key.unwrap(), + ); assert_eq!( pg_storage.read_value(&storage_key), vm_storage.read_value(&storage_key) diff --git a/core/tests/loadnext/Cargo.toml b/core/tests/loadnext/Cargo.toml index 0c8b005d5582..adb5c9eca429 100644 --- a/core/tests/loadnext/Cargo.toml +++ b/core/tests/loadnext/Cargo.toml @@ -19,8 +19,7 @@ zksync_eth_client.workspace = true zksync_config.workspace = true zksync_contracts.workspace = true zksync_system_constants.workspace = true -vlog.workspace = true -prometheus_exporter.workspace = true +zksync_vlog.workspace = true async-trait.workspace = true serde = { workspace = true, features = ["derive"] } diff --git a/core/tests/loadnext/src/account/pubsub_executor.rs b/core/tests/loadnext/src/account/pubsub_executor.rs index 246954a26a25..07f45b4ae972 100644 --- a/core/tests/loadnext/src/account/pubsub_executor.rs +++ b/core/tests/loadnext/src/account/pubsub_executor.rs @@ -3,7 +3,10 @@ use std::time::{Duration, Instant}; use futures::{stream, TryStreamExt}; use zksync_web3_decl::{ jsonrpsee::{ - core::client::{Subscription, SubscriptionClientT}, + core::{ + client::{Subscription, SubscriptionClientT}, + ClientError as RpcError, + }, rpc_params, ws_client::WsClientBuilder, }, @@ -86,7 +89,7 @@ impl AccountLifespan { { match resp { None => return Err(ClientError::OperationTimeout), - Some(Err(err)) => return Err(err.into()), + Some(Err(err)) => return Err(RpcError::ParseError(err).into()), _ => {} } } diff --git a/core/tests/loadnext/src/main.rs b/core/tests/loadnext/src/main.rs index 309dd7557687..7ba6e762ea26 100644 --- a/core/tests/loadnext/src/main.rs +++ b/core/tests/loadnext/src/main.rs @@ -12,16 +12,16 @@ use loadnext::{ executor::Executor, report_collector::LoadtestResult, }; -use prometheus_exporter::PrometheusExporterConfig; use tokio::sync::watch; use zksync_config::configs::api::PrometheusConfig; +use zksync_vlog::prometheus::PrometheusExporterConfig; #[tokio::main] async fn main() -> anyhow::Result<()> { // We don't want to introduce dependency on `zksync_env_config` in loadnext, // but we historically rely on the environment variables for the observability configuration, // so we load them directly here. - let log_format: vlog::LogFormat = std::env::var("MISC_LOG_FORMAT") + let log_format: zksync_vlog::LogFormat = std::env::var("MISC_LOG_FORMAT") .ok() .unwrap_or("plain".to_string()) .parse()?; @@ -39,7 +39,7 @@ async fn main() -> anyhow::Result<()> { } }; - let mut builder = vlog::ObservabilityBuilder::new().with_log_format(log_format); + let mut builder = zksync_vlog::ObservabilityBuilder::new().with_log_format(log_format); if let Some(sentry_url) = sentry_url { builder = builder .with_sentry_url(&sentry_url) @@ -62,16 +62,15 @@ async fn main() -> anyhow::Result<()> { let mut executor = Executor::new(config, execution_config).await?; let (stop_sender, stop_receiver) = watch::channel(false); - if let Some(prometheus_config) = prometheus_config { - let exporter_config = PrometheusExporterConfig::push( - prometheus_config.gateway_endpoint(), - prometheus_config.push_interval(), - ); - - tracing::info!("Starting prometheus exporter with config {prometheus_config:?}"); - tokio::spawn(exporter_config.run(stop_receiver)); - } else { - tracing::info!("Starting without prometheus exporter"); + match prometheus_config.map(|c| (c.gateway_endpoint(), c.push_interval())) { + Some((Some(gateway_endpoint), push_interval)) => { + tracing::info!("Starting prometheus exporter with gateway {gateway_endpoint:?} and push_interval {push_interval:?}"); + let exporter_config = PrometheusExporterConfig::push(gateway_endpoint, push_interval); + tokio::spawn(exporter_config.run(stop_receiver)); + } + _ => { + tracing::info!("Starting without prometheus exporter"); + } } let result = executor.start().await; diff --git a/core/tests/loadnext/src/sdk/operations/deploy_contract.rs b/core/tests/loadnext/src/sdk/operations/deploy_contract.rs index af621249ed8b..3b4c7a5eb53f 100644 --- a/core/tests/loadnext/src/sdk/operations/deploy_contract.rs +++ b/core/tests/loadnext/src/sdk/operations/deploy_contract.rs @@ -155,7 +155,7 @@ where ); self.wallet .provider - .estimate_fee(l2_tx.into()) + .estimate_fee(l2_tx.into(), None) .await .map_err(Into::into) } diff --git a/core/tests/loadnext/src/sdk/operations/execute_contract.rs b/core/tests/loadnext/src/sdk/operations/execute_contract.rs index 18b93008a73a..d5fe57c7b79f 100644 --- a/core/tests/loadnext/src/sdk/operations/execute_contract.rs +++ b/core/tests/loadnext/src/sdk/operations/execute_contract.rs @@ -155,7 +155,7 @@ where ); self.wallet .provider - .estimate_fee(execute.into()) + .estimate_fee(execute.into(), None) .await .map_err(Into::into) } diff --git a/core/tests/loadnext/src/sdk/operations/transfer.rs b/core/tests/loadnext/src/sdk/operations/transfer.rs index 34bab615c7c5..94ee3aeb6082 100644 --- a/core/tests/loadnext/src/sdk/operations/transfer.rs +++ b/core/tests/loadnext/src/sdk/operations/transfer.rs @@ -181,7 +181,7 @@ where }; self.wallet .provider - .estimate_fee(l2_tx.into()) + .estimate_fee(l2_tx.into(), None) .await .map_err(Into::into) } diff --git a/core/tests/loadnext/src/sdk/wallet.rs b/core/tests/loadnext/src/sdk/wallet.rs index c46431f70f48..9d3bd73a9bf2 100644 --- a/core/tests/loadnext/src/sdk/wallet.rs +++ b/core/tests/loadnext/src/sdk/wallet.rs @@ -96,7 +96,7 @@ where }; let bytes = self .provider - .call(req, Some(BlockIdVariant::BlockNumber(block_number))) + .call(req, Some(BlockIdVariant::BlockNumber(block_number)), None) .await?; if bytes.0.len() == 32 { U256::from_big_endian(&bytes.0) diff --git a/core/tests/recovery-test/package.json b/core/tests/recovery-test/package.json index adbbd1212696..8b2ea7f054c0 100644 --- a/core/tests/recovery-test/package.json +++ b/core/tests/recovery-test/package.json @@ -23,11 +23,13 @@ "@types/node": "^18.19.15", "@types/node-fetch": "^2.5.7", "chai": "^4.3.4", + "ethers": "^6.7.1", "mocha": "^9.0.2", "mocha-steps": "^1.3.0", "node-fetch": "^2.6.1", "protobufjs": "^7.2.5", "ts-node": "^10.1.0", - "typescript": "^4.3.5" + "typescript": "^4.3.5", + "zksync-ethers": "^6.9.0" } } diff --git a/core/tests/recovery-test/src/index.ts b/core/tests/recovery-test/src/index.ts index ca11a0d3b4cd..9e30a6d7831e 100644 --- a/core/tests/recovery-test/src/index.ts +++ b/core/tests/recovery-test/src/index.ts @@ -7,7 +7,7 @@ import fetch, { FetchError } from 'node-fetch'; import { promisify } from 'node:util'; import { ChildProcess, exec, spawn } from 'node:child_process'; import * as zksync from 'zksync-ethers'; -import { ethers } from 'ethers'; +import * as ethers from 'ethers'; import path from 'node:path'; import { expect } from 'chai'; @@ -200,11 +200,13 @@ async function waitForProcess(childProcess: ChildProcess, checkExitCode: boolean * Funded wallet wrapper that can be used to generate L1 batches. */ export class FundedWallet { - static async create(mainNode: zksync.Provider, eth: ethers.providers.Provider): Promise { + static async create(mainNode: zksync.Provider, eth: ethers.Provider): Promise { const testConfigPath = path.join(process.env.ZKSYNC_HOME!, `etc/test_config/constant/eth.json`); const ethTestConfig = JSON.parse(await fs.readFile(testConfigPath, { encoding: 'utf-8' })); - const mnemonic = ethTestConfig.test_mnemonic as string; - const wallet = zksync.Wallet.fromMnemonic(mnemonic, "m/44'/60'/0'/0/0").connect(mainNode).connectToL1(eth); + const mnemonic = ethers.Mnemonic.fromPhrase(ethTestConfig.test_mnemonic); + const walletHD = ethers.HDNodeWallet.fromMnemonic(mnemonic, "m/44'/60'/0'/0/0"); + const wallet = new zksync.Wallet(walletHD.privateKey, mainNode, eth); + return new FundedWallet(wallet); } @@ -213,14 +215,14 @@ export class FundedWallet { /** Ensure that this wallet is funded on L2, depositing funds from L1 if necessary. */ async ensureIsFunded() { const balance = await this.wallet.getBalance(); - const minExpectedBalance = ethers.utils.parseEther('0.001'); - if (balance.gte(minExpectedBalance)) { + const minExpectedBalance = ethers.parseEther('0.001'); + if (balance >= minExpectedBalance) { console.log('Wallet has acceptable balance on L2', balance); return; } const l1Balance = await this.wallet.getBalanceL1(); - expect(l1Balance.gte(minExpectedBalance), 'L1 balance of funded wallet is too small').to.be.true; + expect(l1Balance >= minExpectedBalance, 'L1 balance of funded wallet is too small').to.be.true; const baseTokenAddress = await this.wallet.getBaseToken(); const isETHBasedChain = baseTokenAddress == zksync.utils.ETH_ADDRESS_IN_CONTRACTS; diff --git a/core/tests/recovery-test/tests/genesis-recovery.test.ts b/core/tests/recovery-test/tests/genesis-recovery.test.ts index ebcf2b5a7e89..8ba9fc2fc79e 100644 --- a/core/tests/recovery-test/tests/genesis-recovery.test.ts +++ b/core/tests/recovery-test/tests/genesis-recovery.test.ts @@ -56,7 +56,7 @@ describe('genesis recovery', () => { before('create test wallet', async () => { const ethRpcUrl = process.env.ETH_CLIENT_WEB3_URL ?? 'http://127.0.0.1:8545'; console.log(`Using L1 RPC at ${ethRpcUrl}`); - const eth = new ethers.providers.JsonRpcProvider(ethRpcUrl); + const eth = new ethers.JsonRpcProvider(ethRpcUrl); fundedWallet = await FundedWallet.create(mainNode, eth); }); diff --git a/core/tests/recovery-test/tests/snapshot-recovery.test.ts b/core/tests/recovery-test/tests/snapshot-recovery.test.ts index 3a5d3b7ef57c..f0bd1d83d432 100644 --- a/core/tests/recovery-test/tests/snapshot-recovery.test.ts +++ b/core/tests/recovery-test/tests/snapshot-recovery.test.ts @@ -22,6 +22,7 @@ interface AllSnapshotsResponse { } interface GetSnapshotResponse { + readonly version: number; readonly miniblockNumber: number; readonly l1BatchNumber: number; readonly storageLogsChunks: Array; @@ -100,7 +101,7 @@ describe('snapshot recovery', () => { before('create test wallet', async () => { const ethRpcUrl = process.env.ETH_CLIENT_WEB3_URL ?? 'http://127.0.0.1:8545'; console.log(`Using L1 RPC at ${ethRpcUrl}`); - const eth = new ethers.providers.JsonRpcProvider(ethRpcUrl); + const eth = new ethers.JsonRpcProvider(ethRpcUrl); fundedWallet = await FundedWallet.create(mainNode, eth); }); @@ -138,6 +139,7 @@ describe('snapshot recovery', () => { const l1BatchNumber = Math.max(...newBatchNumbers); snapshotMetadata = await getSnapshot(l1BatchNumber); console.log('Obtained latest snapshot', snapshotMetadata); + expect(snapshotMetadata.version).to.be.oneOf([0, 1]); const l2BlockNumber = snapshotMetadata.miniblockNumber; const protoPath = path.join(homeDir, 'core/lib/types/src/proto/mod.proto'); @@ -160,17 +162,20 @@ describe('snapshot recovery', () => { } sampledCount++; - const snapshotAccountAddress = '0x' + storageLog.accountAddress.toString('hex'); - const snapshotKey = '0x' + storageLog.storageKey.toString('hex'); - const snapshotValue = '0x' + storageLog.storageValue.toString('hex'); const snapshotL1BatchNumber = storageLog.l1BatchNumberOfInitialWrite; - const valueOnBlockchain = await mainNode.getStorageAt( - snapshotAccountAddress, - snapshotKey, - l2BlockNumber - ); - expect(snapshotValue).to.equal(valueOnBlockchain); expect(snapshotL1BatchNumber).to.be.lessThanOrEqual(l1BatchNumber); + + if (snapshotMetadata.version === 0) { + const snapshotAccountAddress = '0x' + storageLog.accountAddress.toString('hex'); + const snapshotKey = '0x' + storageLog.storageKey.toString('hex'); + const snapshotValue = '0x' + storageLog.storageValue.toString('hex'); + const valueOnBlockchain = await mainNode.getStorage( + snapshotAccountAddress, + snapshotKey, + l2BlockNumber + ); + expect(snapshotValue).to.equal(valueOnBlockchain); + } } console.log(`Checked random ${sampledCount} logs in the chunk`); } diff --git a/core/tests/recovery-test/tsconfig.json b/core/tests/recovery-test/tsconfig.json index 6c8907a86016..3de8e1a1c606 100644 --- a/core/tests/recovery-test/tsconfig.json +++ b/core/tests/recovery-test/tsconfig.json @@ -1,6 +1,6 @@ { "compilerOptions": { - "target": "es2019", + "target": "es2020", "module": "commonjs", "strict": true, "esModuleInterop": true, diff --git a/core/tests/revert-test/package.json b/core/tests/revert-test/package.json index f9b9fef68f2b..c3be63dff631 100644 --- a/core/tests/revert-test/package.json +++ b/core/tests/revert-test/package.json @@ -24,11 +24,12 @@ "@types/node-fetch": "^2.5.7", "chai": "^4.3.4", "ethereumjs-abi": "^0.6.8", - "ethers": "~5.7.0", + "ethers": "^6.7.1", "mocha": "^9.0.2", "mocha-steps": "^1.3.0", "node-fetch": "^2.6.1", "ts-node": "^10.1.0", - "typescript": "^4.3.5" + "typescript": "^4.3.5", + "zksync-ethers": "^6.9.0" } } diff --git a/core/tests/revert-test/tests/revert-and-restart-en.test.ts b/core/tests/revert-test/tests/revert-and-restart-en.test.ts index 6edf40a8d2d4..2fee9c7be887 100644 --- a/core/tests/revert-test/tests/revert-and-restart-en.test.ts +++ b/core/tests/revert-test/tests/revert-and-restart-en.test.ts @@ -5,29 +5,55 @@ // main_contract.getTotalBatchesExecuted actually checks the number of batches executed. import * as utils from 'utils'; import { Tester } from './tester'; +import { exec, runServerInBackground, runExternalNodeInBackground } from './utils'; import * as zksync from 'zksync-ethers'; -import { BigNumber, ethers } from 'ethers'; +import * as ethers from 'ethers'; import { expect, assert } from 'chai'; import fs from 'fs'; import * as child_process from 'child_process'; import * as dotenv from 'dotenv'; +import { + getAllConfigsPath, + loadConfig, + shouldLoadConfigFromFile, + replaceAggregatedBlockExecuteDeadline +} from 'utils/build/file-configs'; +import path from 'path'; + +const pathToHome = path.join(__dirname, '../../../..'); +const fileConfig = shouldLoadConfigFromFile(); let mainEnv: string; let extEnv: string; -if (process.env.DEPLOYMENT_MODE == 'Validium') { + +let deploymentMode: string; + +if (fileConfig.loadFromFile) { + const genesisConfig = loadConfig({ pathToHome, chain: fileConfig.chain, config: 'genesis.yaml' }); + deploymentMode = genesisConfig.deploymentMode; +} else { + if (!process.env.DEPLOYMENT_MODE) { + throw new Error('DEPLOYMENT_MODE is not set'); + } + if (!['Validium', 'Rollup'].includes(process.env.DEPLOYMENT_MODE)) { + throw new Error(`Unknown deployment mode: ${process.env.DEPLOYMENT_MODE}`); + } + deploymentMode = process.env.DEPLOYMENT_MODE; +} + +if (deploymentMode == 'Validium') { mainEnv = process.env.IN_DOCKER ? 'dev_validium_docker' : 'dev_validium'; extEnv = process.env.IN_DOCKER ? 'ext-node-validium-docker' : 'ext-node-validium'; -} else if (process.env.DEPLOYMENT_MODE == 'Rollup') { +} else { + // Rollup deployment mode mainEnv = process.env.IN_DOCKER ? 'docker' : 'dev'; extEnv = process.env.IN_DOCKER ? 'ext-node-docker' : 'ext-node'; -} else { - throw new Error(`Unknown deployment mode: ${process.env.DEPLOYMENT_MODE}`); } const mainLogsPath: string = 'revert_main.log'; const extLogsPath: string = 'revert_ext.log'; interface SuggestedValues { - lastExecutedL1BatchNumber: BigNumber; + lastExecutedL1BatchNumber: bigint; nonce: number; priorityFee: number; } @@ -40,16 +66,12 @@ function parseSuggestedValues(jsonString: string): SuggestedValues { assert(Number.isInteger(json.nonce)); assert(Number.isInteger(json.priority_fee)); return { - lastExecutedL1BatchNumber: BigNumber.from(json.last_executed_l1_batch_number), + lastExecutedL1BatchNumber: BigInt(json.last_executed_l1_batch_number), nonce: json.nonce, priorityFee: json.priority_fee }; } -function spawn(cmd: string, args: string[], options: child_process.SpawnOptions): child_process.ChildProcess { - return child_process.spawn(cmd, args, options); -} - function run(cmd: string, args: string[], options: child_process.SpawnOptions): child_process.SpawnSyncReturns { let res = child_process.spawnSync(cmd, args, options); expect(res.error).to.be.undefined; @@ -79,18 +101,33 @@ function fetchEnv(zksyncEnv: string): any { return { ...process.env, ...dotenv.parse(res.stdout) }; } -function runBlockReverter(args: string[]): string { +async function runBlockReverter(args: string[]): Promise { let env = fetchEnv(mainEnv); - env.RUST_LOG = 'off'; - let res = run('./target/release/block_reverter', args, { + + let fileConfigFlags = ''; + if (fileConfig.loadFromFile) { + const configPaths = getAllConfigsPath({ pathToHome, chain: fileConfig.chain }); + fileConfigFlags = ` + --config-path=${configPaths['general.yaml']} + --contracts-config-path=${configPaths['contracts.yaml']} + --secrets-path=${configPaths['secrets.yaml']} + --wallets-path=${configPaths['wallets.yaml']} + --genesis-path=${configPaths['genesis.yaml']} + `; + } + + const cmd = `cd ${pathToHome} && RUST_LOG=off cargo run --bin block_reverter --release -- ${args.join( + ' ' + )} ${fileConfigFlags}`; + const executedProcess = await exec(cmd, { cwd: env.ZKSYNC_HOME, env: { ...env, PATH: process.env.PATH } }); - console.log(res.stderr.toString()); - return res.stdout.toString(); + + return executedProcess.stdout; } async function killServerAndWaitForShutdown(tester: Tester, server: string) { @@ -112,7 +149,7 @@ async function killServerAndWaitForShutdown(tester: Tester, server: string) { } class MainNode { - constructor(public tester: Tester, private proc: child_process.ChildProcess) {} + constructor(public tester: Tester) {} // Terminates all main node processes running. public static async terminateAll() { @@ -129,28 +166,35 @@ class MainNode { public static async spawn( logs: fs.WriteStream, enableConsensus: boolean, - enableExecute: boolean + enableExecute: boolean, + ethClientWeb3Url: string, + apiWeb3JsonRpcHttpUrl: string, + baseTokenAddress: string ): Promise { let env = fetchEnv(mainEnv); env.ETH_SENDER_SENDER_AGGREGATED_BLOCK_EXECUTE_DEADLINE = enableExecute ? '1' : '10000'; // Set full mode for the Merkle tree as it is required to get blocks committed. env.DATABASE_MERKLE_TREE_MODE = 'full'; - console.log(`DATABASE_URL = ${env.DATABASE_URL}`); - let components = 'api,tree,eth,state_keeper,commitment_generator'; + if (fileConfig.loadFromFile) { + replaceAggregatedBlockExecuteDeadline(pathToHome, fileConfig, enableExecute ? 1 : 10000); + } + + let components = 'api,tree,eth,state_keeper,commitment_generator,da_dispatcher'; if (enableConsensus) { components += ',consensus'; } - let proc = spawn('./target/release/zksync_server', ['--components', components], { - cwd: env.ZKSYNC_HOME, + + let proc = runServerInBackground({ + components: [components], stdio: [null, logs, logs], - env: { - ...env, - PATH: process.env.PATH - } + cwd: pathToHome, + env: env, + useZkInception: fileConfig.loadFromFile }); + // Wait until the main node starts responding. - let tester: Tester = await Tester.init(env.ETH_CLIENT_WEB3_URL, env.API_WEB3_JSON_RPC_HTTP_URL); + let tester: Tester = await Tester.init(ethClientWeb3Url, apiWeb3JsonRpcHttpUrl, baseTokenAddress); while (true) { try { await tester.syncWallet.provider.getBlockNumber(); @@ -163,7 +207,7 @@ class MainNode { await utils.sleep(1); } } - return new MainNode(tester, proc); + return new MainNode(tester); } } @@ -181,23 +225,29 @@ class ExtNode { // Spawns an external node. // If enableConsensus is set, the node will use consensus P2P network to fetch blocks. - public static async spawn(logs: fs.WriteStream, enableConsensus: boolean): Promise { + public static async spawn( + logs: fs.WriteStream, + enableConsensus: boolean, + ethClientWeb3Url: string, + enEthClientUrl: string, + baseTokenAddress: string + ): Promise { let env = fetchEnv(extEnv); - console.log(`DATABASE_URL = ${env.DATABASE_URL}`); let args = []; if (enableConsensus) { args.push('--enable-consensus'); } - let proc = spawn('./target/release/zksync_external_node', args, { - cwd: env.ZKSYNC_HOME, + + // Run server in background. + let proc = runExternalNodeInBackground({ stdio: [null, logs, logs], - env: { - ...env, - PATH: process.env.PATH - } + cwd: pathToHome, + env: env, + useZkInception: fileConfig.loadFromFile }); + // Wait until the node starts responding. - let tester: Tester = await Tester.init(env.EN_ETH_CLIENT_URL, `http://127.0.0.1:${env.EN_HTTP_PORT}`); + let tester: Tester = await Tester.init(ethClientWeb3Url, enEthClientUrl, baseTokenAddress); while (true) { try { await tester.syncWallet.provider.getBlockNumber(); @@ -223,15 +273,53 @@ class ExtNode { } describe('Block reverting test', function () { - if (process.env.SKIP_COMPILATION !== 'true') { - compileBinaries(); - } - console.log(`PWD = ${process.env.PWD}`); - const mainLogs: fs.WriteStream = fs.createWriteStream(mainLogsPath, { flags: 'a' }); - const extLogs: fs.WriteStream = fs.createWriteStream(extLogsPath, { flags: 'a' }); - const enableConsensus = process.env.ENABLE_CONSENSUS === 'true'; - console.log(`enableConsensus = ${enableConsensus}`); - const depositAmount: BigNumber = ethers.utils.parseEther('0.001'); + let ethClientWeb3Url: string; + let apiWeb3JsonRpcHttpUrl: string; + let baseTokenAddress: string; + let enEthClientUrl: string; + let operatorAddress: string; + let mainLogs: fs.WriteStream; + let extLogs: fs.WriteStream; + let depositAmount: bigint; + let enableConsensus: boolean; + + before('initialize test', async () => { + if (fileConfig.loadFromFile) { + const secretsConfig = loadConfig({ pathToHome, chain: fileConfig.chain, config: 'secrets.yaml' }); + const generalConfig = loadConfig({ pathToHome, chain: fileConfig.chain, config: 'general.yaml' }); + const contractsConfig = loadConfig({ pathToHome, chain: fileConfig.chain, config: 'contracts.yaml' }); + const externalNodeConfig = loadConfig({ + pathToHome, + chain: fileConfig.chain, + config: 'external_node.yaml' + }); + const walletsConfig = loadConfig({ pathToHome, chain: fileConfig.chain, config: 'wallets.yaml' }); + + ethClientWeb3Url = secretsConfig.l1.l1_rpc_url; + apiWeb3JsonRpcHttpUrl = generalConfig.api.web3_json_rpc.http_url; + baseTokenAddress = contractsConfig.l1.base_token_addr; + enEthClientUrl = externalNodeConfig.main_node_url; + operatorAddress = walletsConfig.operator.address; + } else { + let env = fetchEnv(mainEnv); + ethClientWeb3Url = env.ETH_CLIENT_WEB3_URL; + apiWeb3JsonRpcHttpUrl = env.API_WEB3_JSON_RPC_HTTP_URL; + baseTokenAddress = env.CONTRACTS_BASE_TOKEN_ADDR; + enEthClientUrl = `http://127.0.0.1:${env.EN_HTTP_PORT}`; + // TODO use env variable for this? + operatorAddress = '0xde03a0B5963f75f1C8485B355fF6D30f3093BDE7'; + } + + if (process.env.SKIP_COMPILATION !== 'true' && !fileConfig.loadFromFile) { + compileBinaries(); + } + console.log(`PWD = ${process.env.PWD}`); + mainLogs = fs.createWriteStream(mainLogsPath, { flags: 'a' }); + extLogs = fs.createWriteStream(extLogsPath, { flags: 'a' }); + enableConsensus = process.env.ENABLE_CONSENSUS === 'true'; + console.log(`enableConsensus = ${enableConsensus}`); + depositAmount = ethers.parseEther('0.001'); + }); step('run', async () => { console.log('Make sure that nodes are not running'); @@ -239,23 +327,30 @@ describe('Block reverting test', function () { await MainNode.terminateAll(); console.log('Start main node'); - let mainNode = await MainNode.spawn(mainLogs, enableConsensus, true); + let mainNode = await MainNode.spawn( + mainLogs, + enableConsensus, + true, + ethClientWeb3Url, + apiWeb3JsonRpcHttpUrl, + baseTokenAddress + ); console.log('Start ext node'); - let extNode = await ExtNode.spawn(extLogs, enableConsensus); + let extNode = await ExtNode.spawn(extLogs, enableConsensus, ethClientWeb3Url, enEthClientUrl, baseTokenAddress); await mainNode.tester.fundSyncWallet(); await extNode.tester.fundSyncWallet(); const main_contract = await mainNode.tester.syncWallet.getMainContract(); - const baseTokenAddress = await mainNode.tester.syncWallet.getBaseToken(); - const isETHBasedChain = baseTokenAddress == zksync.utils.ETH_ADDRESS_IN_CONTRACTS; + const baseToken = await mainNode.tester.syncWallet.getBaseToken(); + const isETHBasedChain = baseToken === zksync.utils.ETH_ADDRESS_IN_CONTRACTS; const alice: zksync.Wallet = extNode.tester.emptyWallet(); console.log( 'Finalize an L1 transaction to ensure at least 1 executed L1 batch and that all transactions are processed' ); const h: zksync.types.PriorityOpResponse = await extNode.tester.syncWallet.deposit({ - token: isETHBasedChain ? zksync.utils.LEGACY_ETH_ADDRESS : baseTokenAddress, + token: isETHBasedChain ? zksync.utils.LEGACY_ETH_ADDRESS : baseToken, amount: depositAmount, to: alice.address, approveBaseERC20: true, @@ -265,15 +360,22 @@ describe('Block reverting test', function () { console.log('Restart the main node with L1 batch execution disabled.'); await killServerAndWaitForShutdown(mainNode.tester, 'zksync_server'); - mainNode = await MainNode.spawn(mainLogs, enableConsensus, false); + mainNode = await MainNode.spawn( + mainLogs, + enableConsensus, + false, + ethClientWeb3Url, + apiWeb3JsonRpcHttpUrl, + baseTokenAddress + ); console.log('Commit at least 2 L1 batches which are not executed'); - const lastExecuted: BigNumber = await main_contract.getTotalBatchesExecuted(); + const lastExecuted = await main_contract.getTotalBatchesExecuted(); // One is not enough to test the reversion of sk cache because // it gets updated with some batch logs only at the start of the next batch. - const initialL1BatchNumber = (await main_contract.getTotalBatchesCommitted()).toNumber(); + const initialL1BatchNumber = await main_contract.getTotalBatchesCommitted(); const firstDepositHandle = await extNode.tester.syncWallet.deposit({ - token: isETHBasedChain ? zksync.utils.LEGACY_ETH_ADDRESS : baseTokenAddress, + token: isETHBasedChain ? zksync.utils.LEGACY_ETH_ADDRESS : baseToken, amount: depositAmount, to: alice.address, approveBaseERC20: true, @@ -286,42 +388,42 @@ describe('Block reverting test', function () { } const secondDepositHandle = await extNode.tester.syncWallet.deposit({ - token: isETHBasedChain ? zksync.utils.LEGACY_ETH_ADDRESS : baseTokenAddress, + token: isETHBasedChain ? zksync.utils.LEGACY_ETH_ADDRESS : baseToken, amount: depositAmount, to: alice.address, approveBaseERC20: true, approveERC20: true }); await secondDepositHandle.wait(); - while ((await extNode.tester.web3Provider.getL1BatchNumber()) <= initialL1BatchNumber + 1) { + while ((await extNode.tester.web3Provider.getL1BatchNumber()) <= initialL1BatchNumber + 1n) { await utils.sleep(0.3); } + const alice2 = await alice.getBalance(); while (true) { - const lastCommitted: BigNumber = await main_contract.getTotalBatchesCommitted(); + const lastCommitted = await main_contract.getTotalBatchesCommitted(); console.log(`lastExecuted = ${lastExecuted}, lastCommitted = ${lastCommitted}`); - if (lastCommitted.sub(lastExecuted).gte(2)) { + if (lastCommitted - lastExecuted >= 2n) { + console.log('Terminate the main node'); + await killServerAndWaitForShutdown(mainNode.tester, 'zksync_server'); break; } await utils.sleep(0.3); } - const alice2 = await alice.getBalance(); - console.log('Terminate the main node'); - await killServerAndWaitForShutdown(mainNode.tester, 'zksync_server'); console.log('Ask block_reverter to suggest to which L1 batch we should revert'); - const values_json = runBlockReverter([ + const values_json = await runBlockReverter([ 'print-suggested-values', '--json', '--operator-address', - '0xde03a0B5963f75f1C8485B355fF6D30f3093BDE7' + operatorAddress ]); console.log(`values = ${values_json}`); const values = parseSuggestedValues(values_json); - assert(lastExecuted.eq(values.lastExecutedL1BatchNumber)); + assert(lastExecuted === values.lastExecutedL1BatchNumber); console.log('Send reverting transaction to L1'); - runBlockReverter([ + await runBlockReverter([ 'send-eth-transaction', '--l1-batch-number', values.lastExecutedL1BatchNumber.toString(), @@ -334,10 +436,10 @@ describe('Block reverting test', function () { console.log('Check that batches are reverted on L1'); const lastCommitted2 = await main_contract.getTotalBatchesCommitted(); console.log(`lastCommitted = ${lastCommitted2}, want ${lastExecuted}`); - assert(lastCommitted2.eq(lastExecuted)); + assert(lastCommitted2 === lastExecuted); console.log('Rollback db'); - runBlockReverter([ + await runBlockReverter([ 'rollback-db', '--l1-batch-number', values.lastExecutedL1BatchNumber.toString(), @@ -347,17 +449,24 @@ describe('Block reverting test', function () { ]); console.log('Start main node.'); - mainNode = await MainNode.spawn(mainLogs, enableConsensus, true); + mainNode = await MainNode.spawn( + mainLogs, + enableConsensus, + true, + ethClientWeb3Url, + apiWeb3JsonRpcHttpUrl, + baseTokenAddress + ); console.log('Wait for the external node to detect reorg and terminate'); await extNode.waitForExit(); console.log('Restart external node and wait for it to revert.'); - extNode = await ExtNode.spawn(extLogs, enableConsensus); + extNode = await ExtNode.spawn(extLogs, enableConsensus, ethClientWeb3Url, enEthClientUrl, baseTokenAddress); console.log('Execute an L1 transaction'); const depositHandle = await extNode.tester.syncWallet.deposit({ - token: isETHBasedChain ? zksync.utils.LEGACY_ETH_ADDRESS : baseTokenAddress, + token: isETHBasedChain ? zksync.utils.LEGACY_ETH_ADDRESS : baseToken, amount: depositAmount, to: alice.address, approveBaseERC20: true, @@ -389,23 +498,27 @@ describe('Block reverting test', function () { // The reverted transactions are expected to be reexecuted before the next transaction is applied. // Hence we compare the state against the alice2, rather than against alice3. - const alice4want = alice2.add(BigNumber.from(depositAmount)); + const alice4want = alice2 + depositAmount; const alice4 = await alice.getBalance(); console.log(`Alice's balance is ${alice4}, want ${alice4want}`); - assert(alice4.eq(alice4want)); + assert(alice4 === alice4want); console.log('Execute an L2 transaction'); - await checkedRandomTransfer(alice, BigNumber.from(1)); + await checkedRandomTransfer(alice, 1n); }); - after('Terminate nodes', async () => { + after('terminate nodes', async () => { await MainNode.terminateAll(); await ExtNode.terminateAll(); + + if (fileConfig.loadFromFile) { + replaceAggregatedBlockExecuteDeadline(pathToHome, fileConfig, 10); + } }); }); // Transfers amount from sender to a random wallet in an L2 transaction. -async function checkedRandomTransfer(sender: zksync.Wallet, amount: BigNumber) { +async function checkedRandomTransfer(sender: zksync.Wallet, amount: bigint) { const senderBalanceBefore = await sender.getBalance(); const receiver = zksync.Wallet.createRandom().connect(sender.provider); const transferHandle = await sender.sendTransaction({ to: receiver.address, value: amount, type: 0 }); @@ -418,11 +531,10 @@ async function checkedRandomTransfer(sender: zksync.Wallet, amount: BigNumber) { } while (txReceipt === null); const senderBalance = await sender.getBalance(); - const receiverBalance = await receiver.getBalance(); + const receiverBalance = await receiver.provider!.getBalance(receiver.address); - expect(receiverBalance.eq(amount), 'Failed updated the balance of the receiver').to.be.true; + expect(receiverBalance === amount, 'Failed updated the balance of the receiver').to.be.true; - const spentAmount = txReceipt.gasUsed.mul(transferHandle.gasPrice!).add(amount); - expect(senderBalance.add(spentAmount).gte(senderBalanceBefore), 'Failed to update the balance of the sender').to.be - .true; + const spentAmount = txReceipt.gasUsed * transferHandle.gasPrice! + amount; + expect(senderBalance + spentAmount >= senderBalanceBefore, 'Failed to update the balance of the sender').to.be.true; } diff --git a/core/tests/revert-test/tests/revert-and-restart.test.ts b/core/tests/revert-test/tests/revert-and-restart.test.ts index 92869ab45c8c..25ed90ea72eb 100644 --- a/core/tests/revert-test/tests/revert-and-restart.test.ts +++ b/core/tests/revert-test/tests/revert-and-restart.test.ts @@ -1,12 +1,20 @@ import * as utils from 'utils'; +import { loadConfig, shouldLoadConfigFromFile, getAllConfigsPath } from 'utils/build/file-configs'; +import { runServerInBackground } from 'utils/build/server'; import { Tester } from './tester'; import * as zksync from 'zksync-ethers'; -import { BigNumber, Contract, ethers } from 'ethers'; +import * as ethers from 'ethers'; import { expect } from 'chai'; import fs from 'fs'; +import { IZkSyncHyperchain } from 'zksync-ethers/build/typechain'; +import path from 'path'; // Parses output of "print-suggested-values" command of the revert block tool. -function parseSuggestedValues(suggestedValuesString: string) { +function parseSuggestedValues(suggestedValuesString: string): { + lastL1BatchNumber: bigint; + nonce: bigint; + priorityFee: bigint; +} { const json = JSON.parse(suggestedValuesString); if (!json || typeof json !== 'object') { throw new TypeError('suggested values are not an object'); @@ -25,7 +33,11 @@ function parseSuggestedValues(suggestedValuesString: string) { throw new TypeError('suggested `priorityFee` is not an integer'); } - return { lastL1BatchNumber, nonce, priorityFee }; + return { + lastL1BatchNumber: BigInt(lastL1BatchNumber), + nonce: BigInt(nonce), + priorityFee: BigInt(priorityFee) + }; } async function killServerAndWaitForShutdown(tester: Tester) { @@ -51,27 +63,67 @@ function ignoreError(_err: any, context?: string) { console.info(message); } -const depositAmount = ethers.utils.parseEther('0.001'); +const depositAmount = ethers.parseEther('0.001'); describe('Block reverting test', function () { let tester: Tester; let alice: zksync.Wallet; - let mainContract: Contract; - let blocksCommittedBeforeRevert: number; + let mainContract: IZkSyncHyperchain; + let blocksCommittedBeforeRevert: bigint; let logs: fs.WriteStream; - let operatorAddress = process.env.ETH_SENDER_SENDER_OPERATOR_COMMIT_ETH_ADDR; + let operatorAddress: string; + let ethClientWeb3Url: string; + let apiWeb3JsonRpcHttpUrl: string; - let enable_consensus = process.env.ENABLE_CONSENSUS == 'true'; - let components = 'api,tree,eth,state_keeper,commitment_generator'; - if (enable_consensus) { + const fileConfig = shouldLoadConfigFromFile(); + + const pathToHome = path.join(__dirname, '../../../..'); + + const enableConsensus = process.env.ENABLE_CONSENSUS == 'true'; + let components = 'api,tree,eth,state_keeper,commitment_generator,da_dispatcher'; + if (enableConsensus) { components += ',consensus'; } - before('create test wallet', async () => { - tester = await Tester.init( - process.env.ETH_CLIENT_WEB3_URL as string, - process.env.API_WEB3_JSON_RPC_HTTP_URL as string - ); + before('initialize test', async () => { + // Clone file configs if necessary + let baseTokenAddress: string; + + if (!fileConfig.loadFromFile) { + operatorAddress = process.env.ETH_SENDER_SENDER_OPERATOR_COMMIT_ETH_ADDR!; + ethClientWeb3Url = process.env.ETH_CLIENT_WEB3_URL!; + apiWeb3JsonRpcHttpUrl = process.env.API_WEB3_JSON_RPC_HTTP_URL!; + baseTokenAddress = process.env.CONTRACTS_BASE_TOKEN_ADDR!; + } else { + const generalConfig = loadConfig({ + pathToHome, + chain: fileConfig.chain, + config: 'general.yaml' + }); + const secretsConfig = loadConfig({ + pathToHome, + chain: fileConfig.chain, + config: 'secrets.yaml' + }); + const walletsConfig = loadConfig({ + pathToHome, + chain: fileConfig.chain, + config: 'wallets.yaml' + }); + const contractsConfig = loadConfig({ + pathToHome, + chain: fileConfig.chain, + config: 'contracts.yaml' + }); + + operatorAddress = walletsConfig.operator.address; + ethClientWeb3Url = secretsConfig.l1.l1_rpc_url; + apiWeb3JsonRpcHttpUrl = generalConfig.api.web3_json_rpc.http_url; + baseTokenAddress = contractsConfig.l1.base_token_addr; + } + + // Create test wallets + tester = await Tester.init(ethClientWeb3Url, apiWeb3JsonRpcHttpUrl, baseTokenAddress); alice = tester.emptyWallet(); logs = fs.createWriteStream('revert.log', { flags: 'a' }); }); @@ -80,14 +132,14 @@ describe('Block reverting test', function () { // Make sure server isn't running. await killServerAndWaitForShutdown(tester).catch(ignoreError); - // Set 1000 seconds deadline for `ExecuteBlocks` operation. - process.env.ETH_SENDER_SENDER_AGGREGATED_BLOCK_EXECUTE_DEADLINE = '1000'; - // Set full mode for the Merkle tree as it is required to get blocks committed. - process.env.DATABASE_MERKLE_TREE_MODE = 'full'; - // Run server in background. + runServerInBackground({ + components: [components], + stdio: [null, logs, logs], + cwd: pathToHome, + useZkInception: fileConfig.loadFromFile + }); - utils.background(`zk server --components ${components}`, [null, logs, logs]); // Server may need some time to recompile if it's a cold run, so wait for it. let iter = 0; while (iter < 30 && !mainContract) { @@ -133,19 +185,19 @@ describe('Block reverting test', function () { } const balance = await alice.getBalance(); - expect(balance.eq(depositAmount.mul(2)), 'Incorrect balance after deposits').to.be.true; + expect(balance === depositAmount * 2n, 'Incorrect balance after deposits').to.be.true; // Check L1 committed and executed blocks. let blocksCommitted = await mainContract.getTotalBatchesCommitted(); let blocksExecuted = await mainContract.getTotalBatchesExecuted(); let tryCount = 0; - while (blocksCommitted.eq(blocksExecuted) && tryCount < 100) { + while (blocksCommitted === blocksExecuted && tryCount < 100) { blocksCommitted = await mainContract.getTotalBatchesCommitted(); blocksExecuted = await mainContract.getTotalBatchesExecuted(); tryCount += 1; await utils.sleep(1); } - expect(blocksCommitted.gt(blocksExecuted), 'There is no committed but not executed block').to.be.true; + expect(blocksCommitted > blocksExecuted, 'There is no committed but not executed block').to.be.true; blocksCommittedBeforeRevert = blocksCommitted; // Stop server. @@ -153,9 +205,20 @@ describe('Block reverting test', function () { }); step('revert blocks', async () => { + let fileConfigFlags = ''; + if (fileConfig.loadFromFile) { + const configPaths = getAllConfigsPath({ pathToHome, chain: fileConfig.chain }); + fileConfigFlags = ` + --config-path=${configPaths['general.yaml']} + --contracts-config-path=${configPaths['contracts.yaml']} + --secrets-path=${configPaths['secrets.yaml']} + --wallets-path=${configPaths['wallets.yaml']} + --genesis-path=${configPaths['genesis.yaml']} + `; + } + const executedProcess = await utils.exec( - 'cd $ZKSYNC_HOME && ' + - `RUST_LOG=off cargo run --bin block_reverter --release -- print-suggested-values --json --operator-address ${operatorAddress}` + `cd ${pathToHome} && RUST_LOG=off cargo run --bin block_reverter --release -- print-suggested-values --json --operator-address ${operatorAddress} ${fileConfigFlags}` // ^ Switch off logs to not pollute the output JSON ); const suggestedValuesOutput = executedProcess.stdout; @@ -169,28 +232,30 @@ describe('Block reverting test', function () { console.log('Sending ETH transaction..'); await utils.spawn( - `cd $ZKSYNC_HOME && cargo run --bin block_reverter --release -- send-eth-transaction --l1-batch-number ${lastL1BatchNumber} --nonce ${nonce} --priority-fee-per-gas ${priorityFee}` + `cd ${pathToHome} && cargo run --bin block_reverter --release -- send-eth-transaction --l1-batch-number ${lastL1BatchNumber} --nonce ${nonce} --priority-fee-per-gas ${priorityFee} ${fileConfigFlags}` ); console.log('Rolling back DB..'); await utils.spawn( - `cd $ZKSYNC_HOME && cargo run --bin block_reverter --release -- rollback-db --l1-batch-number ${lastL1BatchNumber} --rollback-postgres --rollback-tree --rollback-sk-cache` + `cd ${pathToHome} && cargo run --bin block_reverter --release -- rollback-db --l1-batch-number ${lastL1BatchNumber} --rollback-postgres --rollback-tree --rollback-sk-cache ${fileConfigFlags}` ); let blocksCommitted = await mainContract.getTotalBatchesCommitted(); - expect(blocksCommitted.eq(lastL1BatchNumber), 'Revert on contract was unsuccessful').to.be.true; + expect(blocksCommitted === lastL1BatchNumber, 'Revert on contract was unsuccessful').to.be.true; }); step('execute transaction after revert', async () => { - // Set 1 second deadline for `ExecuteBlocks` operation. - process.env.ETH_SENDER_SENDER_AGGREGATED_BLOCK_EXECUTE_DEADLINE = '1'; - // Run server. - utils.background(`zk server --components ${components}`, [null, logs, logs]); - await utils.sleep(10); + runServerInBackground({ + components: [components], + stdio: [null, logs, logs], + cwd: pathToHome, + useZkInception: fileConfig.loadFromFile + }); + await utils.sleep(30); const balanceBefore = await alice.getBalance(); - expect(balanceBefore.eq(depositAmount.mul(2)), 'Incorrect balance after revert').to.be.true; + expect(balanceBefore === depositAmount * 2n, 'Incorrect balance after revert').to.be.true; // Execute a transaction const depositHandle = await tester.syncWallet.deposit({ @@ -220,23 +285,27 @@ describe('Block reverting test', function () { expect(receipt.status).to.be.eql(1); const balanceAfter = await alice.getBalance(); - expect(balanceAfter.eq(BigNumber.from(depositAmount).mul(3)), 'Incorrect balance after another deposit').to.be - .true; + expect(balanceAfter === depositAmount * 3n, 'Incorrect balance after another deposit').to.be.true; }); step('execute transactions after simple restart', async () => { // Execute an L2 transaction - await checkedRandomTransfer(alice, BigNumber.from(1)); + await checkedRandomTransfer(alice, 1n); // Stop server. await killServerAndWaitForShutdown(tester); // Run again. - utils.background(`zk server --components=${components}`, [null, logs, logs]); - await utils.sleep(10); + runServerInBackground({ + components: [components], + stdio: [null, logs, logs], + cwd: pathToHome, + useZkInception: fileConfig.loadFromFile + }); + await utils.sleep(30); // Trying to send a transaction from the same address again - await checkedRandomTransfer(alice, BigNumber.from(1)); + await checkedRandomTransfer(alice, 1n); }); after('Try killing server', async () => { @@ -244,9 +313,10 @@ describe('Block reverting test', function () { }); }); -async function checkedRandomTransfer(sender: zksync.Wallet, amount: BigNumber) { +async function checkedRandomTransfer(sender: zksync.Wallet, amount: bigint) { const senderBalanceBefore = await sender.getBalance(); - const receiver = zksync.Wallet.createRandom().connect(sender.provider); + const receiverHD = zksync.Wallet.createRandom(); + const receiver = new zksync.Wallet(receiverHD.privateKey, sender.provider); const transferHandle = await sender.sendTransaction({ to: receiver.address, value: amount, @@ -263,9 +333,8 @@ async function checkedRandomTransfer(sender: zksync.Wallet, amount: BigNumber) { const senderBalance = await sender.getBalance(); const receiverBalance = await receiver.getBalance(); - expect(receiverBalance.eq(amount), 'Failed updated the balance of the receiver').to.be.true; + expect(receiverBalance === amount, 'Failed updated the balance of the receiver').to.be.true; - const spentAmount = txReceipt.gasUsed.mul(transferHandle.gasPrice!).add(amount); - expect(senderBalance.add(spentAmount).gte(senderBalanceBefore), 'Failed to update the balance of the sender').to.be - .true; + const spentAmount = txReceipt.gasUsed * transferHandle.gasPrice! + amount; + expect(senderBalance + spentAmount >= senderBalanceBefore, 'Failed to update the balance of the sender').to.be.true; } diff --git a/core/tests/revert-test/tests/tester.ts b/core/tests/revert-test/tests/tester.ts index f50ffbcb7094..faf7f0949232 100644 --- a/core/tests/revert-test/tests/tester.ts +++ b/core/tests/revert-test/tests/tester.ts @@ -4,12 +4,12 @@ import * as zksync from 'zksync-ethers'; import * as fs from 'fs'; import * as path from 'path'; -const BASE_ERC20_TO_MINT = ethers.utils.parseEther('100'); +const BASE_ERC20_TO_MINT = ethers.parseEther('100'); export class Tester { - public runningFee: Map; + public runningFee: Map; constructor( - public ethProvider: ethers.providers.Provider, + public ethProvider: ethers.Provider, public ethWallet: ethers.Wallet, public syncWallet: zksync.Wallet, public web3Provider: zksync.Provider, @@ -21,20 +21,22 @@ export class Tester { } // prettier-ignore - static async init(l1_rpc_addr: string, l2_rpc_addr: string) : Promise { - const ethProvider = new ethers.providers.JsonRpcProvider(l1_rpc_addr); + static async init(l1_rpc_addr: string, l2_rpc_addr: string, baseTokenAddress: string) : Promise { + const ethProvider = new ethers.JsonRpcProvider(l1_rpc_addr); ethProvider.pollingInterval = 100; const testConfigPath = path.join(process.env.ZKSYNC_HOME!, `etc/test_config/constant`); const ethTestConfig = JSON.parse(fs.readFileSync(`${testConfigPath}/eth.json`, { encoding: 'utf-8' })); - let ethWallet = ethers.Wallet.fromMnemonic( - ethTestConfig.test_mnemonic as string, + const ethWalletHD = ethers.HDNodeWallet.fromMnemonic( + ethers.Mnemonic.fromPhrase(ethTestConfig.test_mnemonic), "m/44'/60'/0'/0/0" - ).connect(ethProvider); - let hyperchainAdmin = ethers.Wallet.fromMnemonic( - ethTestConfig.mnemonic as string, + ); + const ethWallet = new ethers.Wallet(ethWalletHD.privateKey, ethProvider); + const hyperchainAdminHD = ethers.HDNodeWallet.fromMnemonic( + ethers.Mnemonic.fromPhrase(ethTestConfig.mnemonic), "m/44'/60'/0'/0/1" - ).connect(ethProvider); + ); + const hyperchainAdmin = new ethers.Wallet(hyperchainAdminHD.privateKey, ethProvider); const web3Provider = new zksync.Provider(l2_rpc_addr); web3Provider.pollingInterval = 100; // It's OK to keep it low even on stage. const syncWallet = new zksync.Wallet(ethWallet.privateKey, web3Provider, ethProvider); @@ -42,16 +44,16 @@ export class Tester { // Since some tx may be pending on stage, we don't want to get stuck because of it. // In order to not get stuck transactions, we manually cancel all the pending txs. - const latestNonce = await ethWallet.getTransactionCount('latest'); - const pendingNonce = await ethWallet.getTransactionCount('pending'); + const latestNonce = await ethWallet.getNonce('latest'); + const pendingNonce = await ethWallet.getNonce('pending'); const cancellationTxs = []; for (let nonce = latestNonce; nonce != pendingNonce; nonce++) { - // For each transaction to override it, we need to provide greater fee. + // For each transaction to override it, we need to provide greater fee. // We would manually provide a value high enough (for a testnet) to be both valid // and higher than the previous one. It's OK as we'll only be charged for the bass fee // anyways. We will also set the miner's tip to 5 gwei, which is also much higher than the normal one. - const maxFeePerGas = ethers.utils.parseEther("0.00000025"); // 250 gwei - const maxPriorityFeePerGas = ethers.utils.parseEther("0.000000005"); // 5 gwei + const maxFeePerGas = ethers.parseEther("0.00000025"); // 250 gwei + const maxPriorityFeePerGas = ethers.parseEther("0.000000005"); // 5 gwei cancellationTxs.push(ethWallet.sendTransaction({ to: ethWallet.address, nonce, maxFeePerGas, maxPriorityFeePerGas }).then((tx) => tx.wait())); } if (cancellationTxs.length > 0) { @@ -59,7 +61,6 @@ export class Tester { console.log(`Canceled ${cancellationTxs.length} pending transactions`); } - const baseTokenAddress = process.env.CONTRACTS_BASE_TOKEN_ADDR!; const isETHBasedChain = baseTokenAddress == zksync.utils.ETH_ADDRESS_IN_CONTRACTS; return new Tester(ethProvider, ethWallet, syncWallet, web3Provider, hyperchainAdmin, isETHBasedChain, baseTokenAddress); @@ -77,15 +78,12 @@ export class Tester { } } - async fundedWallet( - ethAmount: ethers.BigNumberish, - l1Token: zksync.types.Address, - tokenAmount: ethers.BigNumberish - ) { - const newWallet = zksync.Wallet.createRandom().connect(this.web3Provider).connectToL1(this.ethProvider); + async fundedWallet(ethAmount: bigint, l1Token: zksync.types.Address, tokenAmount: bigint) { + const newWalletHD = zksync.Wallet.createRandom(); + const newWallet = new zksync.Wallet(newWalletHD.privateKey, this.web3Provider, this.ethProvider); let ethBalance = await this.syncWallet.getBalanceL1(); - expect(ethBalance.gt(ethAmount), 'Insufficient eth balance to create funded wallet').to.be.true; + expect(ethBalance > ethAmount, 'Insufficient eth balance to create funded wallet').to.be.true; // To make the wallet capable of requesting priority operations, // send ETH to L1. @@ -99,7 +97,7 @@ export class Tester { // Funds the wallet with L1 token. let tokenBalance = await this.syncWallet.getBalanceL1(l1Token); - expect(tokenBalance.gt(tokenAmount), 'Insufficient token balance to create funded wallet').to.be.true; + expect(tokenBalance > tokenAmount, 'Insufficient token balance to create funded wallet').to.be.true; const erc20ABI = ['function transfer(address to, uint256 amount)']; const erc20Contract = new ethers.Contract(l1Token, erc20ABI, this.ethWallet); @@ -111,6 +109,7 @@ export class Tester { } emptyWallet() { - return zksync.Wallet.createRandom().connect(this.web3Provider).connectToL1(this.ethProvider); + const walletHD = zksync.Wallet.createRandom(); + return new zksync.Wallet(walletHD.privateKey, this.web3Provider, this.ethProvider); } } diff --git a/core/tests/revert-test/tests/utils.ts b/core/tests/revert-test/tests/utils.ts new file mode 100644 index 000000000000..4bf38387cccf --- /dev/null +++ b/core/tests/revert-test/tests/utils.ts @@ -0,0 +1,81 @@ +import { exec as _exec, spawn as _spawn, ChildProcessWithoutNullStreams, type ProcessEnvOptions } from 'child_process'; +import { promisify } from 'util'; + +// executes a command in background and returns a child process handle +// by default pipes data to parent's stdio but this can be overridden +export function background({ + command, + stdio = 'inherit', + cwd, + env +}: { + command: string; + stdio: any; + cwd?: ProcessEnvOptions['cwd']; + env?: ProcessEnvOptions['env']; +}): ChildProcessWithoutNullStreams { + command = command.replace(/\n/g, ' '); + return _spawn(command, { stdio: stdio, shell: true, detached: true, cwd, env }); +} + +export function runInBackground({ + command, + components, + stdio, + cwd, + env +}: { + command: string; + components?: string[]; + stdio: any; + cwd?: Parameters[0]['cwd']; + env?: Parameters[0]['env']; +}): ChildProcessWithoutNullStreams { + if (components && components.length > 0) { + command += ` --components=${components.join(',')}`; + } + return background({ command, stdio, cwd, env }); +} + +export function runServerInBackground({ + components, + stdio, + cwd, + env, + useZkInception +}: { + components?: string[]; + stdio: any; + cwd?: Parameters[0]['cwd']; + env?: Parameters[0]['env']; + useZkInception?: boolean; +}): ChildProcessWithoutNullStreams { + let command = useZkInception ? 'zk_inception server' : 'zk server'; + return runInBackground({ command, components, stdio, cwd, env }); +} + +export function runExternalNodeInBackground({ + components, + stdio, + cwd, + env, + useZkInception +}: { + components?: string[]; + stdio: any; + cwd?: Parameters[0]['cwd']; + env?: Parameters[0]['env']; + useZkInception?: boolean; +}): ChildProcessWithoutNullStreams { + let command = useZkInception ? 'zk_inception external-node run' : 'zk external-node'; + return runInBackground({ command, components, stdio, cwd, env }); +} + +// async executor of shell commands +// spawns a new shell and can execute arbitrary commands, like "ls -la | grep .env" +// returns { stdout, stderr } +const promisified = promisify(_exec); +export function exec(command: string, options: ProcessEnvOptions) { + command = command.replace(/\n/g, ' '); + return promisified(command, options); +} diff --git a/core/tests/revert-test/tsconfig.json b/core/tests/revert-test/tsconfig.json index 6c8907a86016..3de8e1a1c606 100644 --- a/core/tests/revert-test/tsconfig.json +++ b/core/tests/revert-test/tsconfig.json @@ -1,6 +1,6 @@ { "compilerOptions": { - "target": "es2019", + "target": "es2020", "module": "commonjs", "strict": true, "esModuleInterop": true, diff --git a/core/tests/test_account/Cargo.toml b/core/tests/test_account/Cargo.toml index 6df10edd7dca..0dda4f8ac777 100644 --- a/core/tests/test_account/Cargo.toml +++ b/core/tests/test_account/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "zksync_test_account" -version = "0.1.0" +description = "ZKsync test account for writing unit tests" +version.workspace = true edition.workspace = true authors.workspace = true homepage.workspace = true @@ -8,7 +9,6 @@ repository.workspace = true license.workspace = true keywords.workspace = true categories.workspace = true -publish = false [dependencies] zksync_types.workspace = true diff --git a/core/tests/ts-integration/contracts/state-override/StateOverrideTest.sol b/core/tests/ts-integration/contracts/state-override/StateOverrideTest.sol new file mode 100644 index 000000000000..e8d02737cc15 --- /dev/null +++ b/core/tests/ts-integration/contracts/state-override/StateOverrideTest.sol @@ -0,0 +1,28 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +pragma solidity ^0.8.0; + +contract StateOverrideTest { + uint256 public someValue; + uint256 public anotherValue; + uint256 public initialValue = 100; + + function setValue(uint256 value) public { + someValue = value; + } + + function setAnotherValue(uint256 value) public { + anotherValue = value; + } + + function increment(uint256 value) public view returns (uint256) { + require(someValue > 0, "Initial state not set"); + return someValue + value; + } + + function sumValues() public view returns (uint256) { + require(someValue > 0 && anotherValue > 0, "Initial state not set"); + return someValue + anotherValue + initialValue; + } +} diff --git a/core/tests/ts-integration/package.json b/core/tests/ts-integration/package.json index 1741f2b20557..03bd84bb3f48 100644 --- a/core/tests/ts-integration/package.json +++ b/core/tests/ts-integration/package.json @@ -13,16 +13,16 @@ "build-yul": "hardhat run scripts/compile-yul.ts" }, "devDependencies": { - "@matterlabs/hardhat-zksync-deploy": "^0.6.5", - "@matterlabs/hardhat-zksync-solc": "0.4.2", - "@matterlabs/hardhat-zksync-vyper": "^1.0.0", - "@nomiclabs/hardhat-vyper": "^3.0.5", + "@matterlabs/hardhat-zksync-deploy": "^1.3.0", + "@matterlabs/hardhat-zksync-solc": "^1.1.4", + "@matterlabs/hardhat-zksync-vyper": "^1.0.8", + "@nomiclabs/hardhat-vyper": "^3.0.6", "@types/jest": "^29.0.3", "@types/node": "^18.19.15", "@types/node-fetch": "^2.5.7", "chalk": "^4.0.0", "ethereumjs-abi": "^0.6.8", - "ethers": "~5.7.0", + "ethers": "^6.7.1", "hardhat": "=2.22.2", "jest": "^29.0.3", "jest-matcher-utils": "^29.0.3", @@ -30,7 +30,7 @@ "ts-jest": "^29.0.1", "ts-node": "^10.1.0", "typescript": "^4.3.5", - "zksync-ethers": "5.8.0-beta.5", + "zksync-ethers": "^6.9.0", "elliptic": "^6.5.5", "yaml": "^2.4.2" } diff --git a/core/tests/ts-integration/src/context-owner.ts b/core/tests/ts-integration/src/context-owner.ts index f6f0ebfc8e99..634e8c950a69 100644 --- a/core/tests/ts-integration/src/context-owner.ts +++ b/core/tests/ts-integration/src/context-owner.ts @@ -15,14 +15,14 @@ import { RetryProvider } from './retry-provider'; // // Please DO NOT change these constants if you don't know why you have to do that. Try to debug the particular issue // you face first. -export const L1_DEFAULT_ETH_PER_ACCOUNT = ethers.utils.parseEther('0.08'); +export const L1_DEFAULT_ETH_PER_ACCOUNT = ethers.parseEther('0.08'); // Stress tests for L1->L2 transactions on localhost require a lot of upfront payment, but these are skipped during tests on normal environments -export const L1_EXTENDED_TESTS_ETH_PER_ACCOUNT = ethers.utils.parseEther('0.5'); -export const L2_DEFAULT_ETH_PER_ACCOUNT = ethers.utils.parseEther('0.5'); +export const L1_EXTENDED_TESTS_ETH_PER_ACCOUNT = ethers.parseEther('0.5'); +export const L2_DEFAULT_ETH_PER_ACCOUNT = ethers.parseEther('0.5'); // Stress tests on local host may require a lot of additiomal funds, but these are skipped during tests on normal environments -export const L2_EXTENDED_TESTS_ETH_PER_ACCOUNT = ethers.utils.parseEther('50'); -export const ERC20_PER_ACCOUNT = ethers.utils.parseEther('10000.0'); +export const L2_EXTENDED_TESTS_ETH_PER_ACCOUNT = ethers.parseEther('50'); +export const ERC20_PER_ACCOUNT = ethers.parseEther('10000.0'); /** * This class is responsible for preparing the test environment for all the other test suites. @@ -56,7 +56,7 @@ export class TestContextOwner { private mainEthersWallet: ethers.Wallet; private mainSyncWallet: zksync.Wallet; - private l1Provider: ethers.providers.JsonRpcProvider; + private l1Provider: ethers.JsonRpcProvider; private l2Provider: zksync.Provider; private reporter: Reporter = new Reporter(); @@ -67,7 +67,7 @@ export class TestContextOwner { this.reporter.message('Using L1 provider: ' + env.l1NodeUrl); this.reporter.message('Using L2 provider: ' + env.l2NodeUrl); - this.l1Provider = new ethers.providers.JsonRpcProvider(env.l1NodeUrl); + this.l1Provider = new ethers.JsonRpcProvider(env.l1NodeUrl); this.l2Provider = new RetryProvider( { url: env.l2NodeUrl, @@ -132,16 +132,16 @@ export class TestContextOwner { // Since some tx may be pending on stage, we don't want to get stuck because of it. // In order to not get stuck transactions, we manually cancel all the pending txs. const ethWallet = this.mainEthersWallet; - const latestNonce = await ethWallet.getTransactionCount('latest'); - const pendingNonce = await ethWallet.getTransactionCount('pending'); + const latestNonce = await ethWallet.getNonce('latest'); + const pendingNonce = await ethWallet.getNonce('pending'); this.reporter.debug(`Latest nonce is ${latestNonce}, pending nonce is ${pendingNonce}`); // For each transaction to override it, we need to provide greater fee. // We would manually provide a value high enough (for a testnet) to be both valid // and higher than the previous one. It's OK as we'll only be charged for the base fee // anyways. We will also set the miner's tip to 5 gwei, which is also much higher than the normal one. // Scaled gas price to be used to prevent transactions from being stuck. - const maxPriorityFeePerGas = ethers.utils.parseEther('0.000000005'); // 5 gwei - const maxFeePerGas = ethers.utils.parseEther('0.00000025'); // 250 gwei + const maxPriorityFeePerGas = ethers.parseEther('0.000000005'); // 5 gwei + const maxFeePerGas = ethers.parseEther('0.00000025'); // 250 gwei this.reporter.debug(`Max nonce is ${latestNonce}, pending nonce is ${pendingNonce}`); const cancellationTxs = []; @@ -202,12 +202,13 @@ export class TestContextOwner { this.reporter.message(`Found following suites: ${suites.join(', ')}`); // `+ 1 for the main account (it has to send all these transactions). - const accountsAmount = suites.length + 1; + const accountsAmount = BigInt(suites.length) + 1n; const l2ETHAmountToDeposit = await this.ensureBalances(accountsAmount); - const l2ERC20AmountToDeposit = ERC20_PER_ACCOUNT.mul(accountsAmount); + const l2ERC20AmountToDeposit = ERC20_PER_ACCOUNT * accountsAmount; const wallets = this.createTestWallets(suites); - const baseTokenAddress = await this.mainSyncWallet.provider.getBaseTokenContractAddress(); + const bridgehubContract = await this.mainSyncWallet.getBridgehubContract(); + const baseTokenAddress = await bridgehubContract.baseToken(this.env.l2ChainId); await this.distributeL1BaseToken(wallets, l2ERC20AmountToDeposit, baseTokenAddress); await this.cancelAllowances(); await this.distributeL1Tokens(wallets, l2ETHAmountToDeposit, l2ERC20AmountToDeposit, baseTokenAddress); @@ -220,27 +221,26 @@ export class TestContextOwner { /** * Checks the operator account balances on L1 and L2 and deposits funds if required. */ - private async ensureBalances(accountsAmount: number): Promise { + private async ensureBalances(accountsAmount: bigint): Promise { this.reporter.startAction(`Checking main account balance`); this.reporter.message(`Operator address is ${this.mainEthersWallet.address}`); - const requiredL2ETHAmount = this.requiredL2ETHPerAccount().mul(accountsAmount); + const requiredL2ETHAmount = this.requiredL2ETHPerAccount() * accountsAmount; const actualL2ETHAmount = await this.mainSyncWallet.getBalance(); - this.reporter.message(`Operator balance on L2 is ${ethers.utils.formatEther(actualL2ETHAmount)} ETH`); + this.reporter.message(`Operator balance on L2 is ${ethers.formatEther(actualL2ETHAmount)} ETH`); // We may have enough funds in L2. If that's the case, no need to deposit more than required. - const l2ETHAmountToDeposit = requiredL2ETHAmount.gt(actualL2ETHAmount) - ? requiredL2ETHAmount.sub(actualL2ETHAmount) - : ethers.BigNumber.from(0); + const l2ETHAmountToDeposit = + requiredL2ETHAmount > actualL2ETHAmount ? requiredL2ETHAmount - actualL2ETHAmount : 0n; - const requiredL1ETHAmount = this.requiredL1ETHPerAccount().mul(accountsAmount).add(l2ETHAmountToDeposit); + const requiredL1ETHAmount = this.requiredL1ETHPerAccount() * accountsAmount + l2ETHAmountToDeposit; const actualL1ETHAmount = await this.mainSyncWallet.getBalanceL1(); - this.reporter.message(`Operator balance on L1 is ${ethers.utils.formatEther(actualL1ETHAmount)} ETH`); + this.reporter.message(`Operator balance on L1 is ${ethers.formatEther(actualL1ETHAmount)} ETH`); - if (requiredL1ETHAmount.gt(actualL1ETHAmount)) { - const required = ethers.utils.formatEther(requiredL1ETHAmount); - const actual = ethers.utils.formatEther(actualL1ETHAmount); + if (requiredL1ETHAmount > actualL1ETHAmount) { + const required = ethers.formatEther(requiredL1ETHAmount); + const actual = ethers.formatEther(actualL1ETHAmount); const errorMessage = `There must be at least ${required} ETH on main account, but only ${actual} is available`; throw new Error(errorMessage); } @@ -270,17 +270,15 @@ export class TestContextOwner { */ private async distributeL1BaseToken( wallets: TestWallets, - l2erc20DepositAmount: ethers.BigNumber, + l2erc20DepositAmount: bigint, baseTokenAddress: zksync.types.Address ) { + this.reporter.debug(`Base token address is ${baseTokenAddress}`); + const ethIsBaseToken = baseTokenAddress == zksync.utils.ETH_ADDRESS_IN_CONTRACTS; this.reporter.startAction(`Distributing base tokens on L1`); - if (baseTokenAddress != zksync.utils.ETH_ADDRESS_IN_CONTRACTS) { - const chainId = this.env.l2ChainId; - const l1startNonce = await this.mainEthersWallet.getTransactionCount(); + if (!ethIsBaseToken) { + const l1startNonce = await this.mainEthersWallet.getNonce(); this.reporter.debug(`Start nonce is ${l1startNonce}`); - const ethIsBaseToken = - (await (await this.mainSyncWallet.getBridgehubContract()).baseToken(chainId)) == - zksync.utils.ETH_ADDRESS_IN_CONTRACTS; // All the promises we send in this function. const l1TxPromises: Promise[] = []; // Mutable nonce to send the transactions before actually `await`ing them. @@ -289,7 +287,7 @@ export class TestContextOwner { const gasPrice = await scaledGasPrice(this.mainEthersWallet); // Define values for handling ERC20 transfers/deposits. - const baseMintAmount = l2erc20DepositAmount.mul(1000); + const baseMintAmount = l2erc20DepositAmount * 1000n; // Mint ERC20. const l1Erc20ABI = ['function mint(address to, uint256 amount)']; const l1Erc20Contract = new ethers.Contract(baseTokenAddress, l1Erc20ABI, this.mainEthersWallet); @@ -302,12 +300,12 @@ export class TestContextOwner { this.reporter.debug(`Sent ERC20 mint transaction. Hash: ${tx.hash}, tx nonce ${tx.nonce}`); return tx.wait(); }); - l1TxPromises.push(baseMintPromise); + this.reporter.debug(`Nonce changed by 1 for ERC20 mint, new nonce: ${nonce}`); + await baseMintPromise; // Deposit base token if needed - let baseDepositPromise; const baseIsTransferred = true; - baseDepositPromise = this.mainSyncWallet + const baseDepositPromise = this.mainSyncWallet .deposit({ token: baseTokenAddress, amount: l2erc20DepositAmount, @@ -329,25 +327,27 @@ export class TestContextOwner { .then((tx) => { // Note: there is an `approve` tx, not listed here. this.reporter.debug(`Sent ERC20 deposit transaction. Hash: ${tx.hash}, tx nonce: ${tx.nonce}`); - tx.wait(); - - nonce = nonce + 1 + (ethIsBaseToken ? 0 : 1) + (baseIsTransferred ? 0 : 1); - - if (!ethIsBaseToken) { - // Send base token on L1. - const baseTokenTransfers = sendTransfers( - baseTokenAddress, - this.mainEthersWallet, - wallets, - ERC20_PER_ACCOUNT, - nonce, - gasPrice, - this.reporter - ); - return baseTokenTransfers.then((promises) => Promise.all(promises)); - } + return tx.wait(); }); + nonce = nonce + 1 + (ethIsBaseToken ? 0 : 1) + (baseIsTransferred ? 0 : 1); + this.reporter.debug( + `Nonce changed by ${ + 1 + (ethIsBaseToken ? 0 : 1) + (baseIsTransferred ? 0 : 1) + } for ERC20 deposit, new nonce: ${nonce}` + ); + // Send base token on L1. + const baseTokenTransfers = await sendTransfers( + baseTokenAddress, + this.mainEthersWallet, + wallets, + ERC20_PER_ACCOUNT, + nonce, + gasPrice, + this.reporter + ); + l1TxPromises.push(baseDepositPromise); + l1TxPromises.push(...baseTokenTransfers); this.reporter.debug(`Sent ${l1TxPromises.length} base token initial transactions on L1`); await Promise.all(l1TxPromises); @@ -361,17 +361,14 @@ export class TestContextOwner { */ private async distributeL1Tokens( wallets: TestWallets, - l2ETHAmountToDeposit: ethers.BigNumber, - l2erc20DepositAmount: ethers.BigNumber, + l2ETHAmountToDeposit: bigint, + l2erc20DepositAmount: bigint, baseTokenAddress: zksync.types.Address ) { - const chainId = this.env.l2ChainId; + const ethIsBaseToken = baseTokenAddress == zksync.utils.ETH_ADDRESS_IN_CONTRACTS; this.reporter.startAction(`Distributing tokens on L1`); - const l1startNonce = await this.mainEthersWallet.getTransactionCount(); + const l1startNonce = await this.mainEthersWallet.getNonce(); this.reporter.debug(`Start nonce is ${l1startNonce}`); - const ethIsBaseToken = - (await (await this.mainSyncWallet.getBridgehubContract()).baseToken(chainId)) == - zksync.utils.ETH_ADDRESS_IN_CONTRACTS; // All the promises we send in this function. const l1TxPromises: Promise[] = []; // Mutable nonce to send the transactions before actually `await`ing them. @@ -380,7 +377,7 @@ export class TestContextOwner { const gasPrice = await scaledGasPrice(this.mainEthersWallet); // Deposit L2 tokens (if needed). - if (!l2ETHAmountToDeposit.isZero()) { + if (l2ETHAmountToDeposit != 0n) { // Given that we've already sent a number of transactions, // we have to correctly send nonce. const depositHandle = this.mainSyncWallet @@ -401,7 +398,7 @@ export class TestContextOwner { l2GasLimit: 1000000 }) .then((tx) => { - const amount = ethers.utils.formatEther(l2ETHAmountToDeposit); + const amount = ethers.formatEther(l2ETHAmountToDeposit); this.reporter.debug(`Sent ETH deposit. Nonce ${tx.nonce}, amount: ${amount}, hash: ${tx.hash}`); tx.wait(); }); @@ -409,21 +406,21 @@ export class TestContextOwner { this.reporter.debug( `Nonce changed by ${1 + (ethIsBaseToken ? 0 : 1)} for ETH deposit, new nonce: ${nonce}` ); - // Add this promise to the list of L1 tx promises. - // l1TxPromises.push(depositHandle); await depositHandle; } // Define values for handling ERC20 transfers/deposits. const erc20Token = this.env.erc20Token.l1Address; - const erc20MintAmount = l2erc20DepositAmount.mul(100); + const erc20MintAmount = l2erc20DepositAmount * 100n; // Mint ERC20. const baseIsTransferred = false; // we are not transferring the base const l1Erc20ABI = ['function mint(address to, uint256 amount)']; const l1Erc20Contract = new ethers.Contract(erc20Token, l1Erc20ABI, this.mainEthersWallet); + const gasLimit = await l1Erc20Contract.mint.estimateGas(this.mainSyncWallet.address, erc20MintAmount); const erc20MintPromise = l1Erc20Contract .mint(this.mainSyncWallet.address, erc20MintAmount, { nonce: nonce++, - gasPrice + gasPrice, + gasLimit }) .then((tx: any) => { this.reporter.debug(`Sent ERC20 mint transaction. Hash: ${tx.hash}, nonce ${tx.nonce}`); @@ -513,7 +510,7 @@ export class TestContextOwner { */ private async distributeL2Tokens(wallets: TestWallets) { this.reporter.startAction(`Distributing tokens on L2`); - let l2startNonce = await this.mainSyncWallet.getTransactionCount(); + let l2startNonce = await this.mainSyncWallet.getNonce(); // ETH transfers. const l2TxPromises = await sendTransfers( @@ -606,16 +603,16 @@ export async function sendTransfers( token: string, wallet: ethers.Wallet | zksync.Wallet, wallets: TestWallets, - value: ethers.BigNumber, + value: bigint, overrideStartNonce?: number, - gasPrice?: ethers.BigNumber, + gasPrice?: bigint, reporter?: Reporter ): Promise[]> { const erc20Contract = wallet instanceof zksync.Wallet ? new zksync.Contract(token, zksync.utils.IERC20, wallet) : new ethers.Contract(token, zksync.utils.IERC20, wallet); - const startNonce = overrideStartNonce ?? (await wallet.getTransactionCount()); + const startNonce = overrideStartNonce ?? (await wallet.getNonce()); reporter?.debug(`Sending transfers. Token address is ${token}`); const walletsPK = Array.from(Object.values(wallets)); @@ -626,7 +623,7 @@ export async function sendTransfers( const testWalletPK = walletsPK[index]; if (token == zksync.utils.ETH_ADDRESS) { const tx = { - to: ethers.utils.computeAddress(testWalletPK), + to: ethers.computeAddress(testWalletPK), value, nonce: startNonce + index, gasPrice @@ -638,23 +635,25 @@ export async function sendTransfers( txPromises.push( transactionResponse.wait().then((tx) => { - reporter?.debug(`Obtained receipt for ETH transfer tx: ${tx.transactionHash} `); + reporter?.debug(`Obtained receipt for ETH transfer tx: ${tx?.hash} `); return tx; }) ); } else { const txNonce = startNonce + index; reporter?.debug(`Inititated ERC20 transfer with nonce: ${txNonce}`); - const tx = await erc20Contract.transfer(ethers.utils.computeAddress(testWalletPK), value, { + const gasLimit = await erc20Contract.transfer.estimateGas(ethers.computeAddress(testWalletPK), value); + const tx = await erc20Contract.transfer(ethers.computeAddress(testWalletPK), value, { nonce: txNonce, - gasPrice + gasPrice, + gasLimit }); reporter?.debug(`Sent ERC20 transfer tx: ${tx.hash}, nonce: ${tx.nonce}`); txPromises.push( // @ts-ignore tx.wait().then((tx) => { - reporter?.debug(`Obtained receipt for ERC20 transfer tx: ${tx.transactionHash}`); + reporter?.debug(`Obtained receipt for ERC20 transfer tx: ${tx.hash}`); return tx; }) ); @@ -694,21 +693,21 @@ export async function claimEtherBack( } // We use scaled gas price to increase chances of tx not being stuck. const gasPrice = await scaledGasPrice(from); - const transferPrice = gasLimit.mul(gasPrice); + const transferPrice = gasLimit * gasPrice; - const balance = await from.getBalance(); + const balance = await from.provider!.getBalance(from.address); // If we can't afford sending funds back (or the wallet is empty), do nothing. - if (transferPrice.gt(balance)) { + if (transferPrice > balance) { continue; } - const value = balance.sub(transferPrice); + const value = balance - transferPrice; reporter?.debug( - `Wallet balance: ${ethers.utils.formatEther(balance)} ETH,\ - estimated cost is ${ethers.utils.formatEther(transferPrice)} ETH,\ - value for tx is ${ethers.utils.formatEther(value)} ETH` + `Wallet balance: ${ethers.formatEther(balance)} ETH,\ + estimated cost is ${ethers.formatEther(transferPrice)} ETH,\ + value for tx is ${ethers.formatEther(value)} ETH` ); const txPromise = from @@ -736,4 +735,4 @@ export async function claimEtherBack( /** * Type represents a transaction that may have been sent. */ -type ReceiptFuture = Promise; +type ReceiptFuture = Promise; diff --git a/core/tests/ts-integration/src/env.ts b/core/tests/ts-integration/src/env.ts index c440e6b08ea6..cb2638929d05 100644 --- a/core/tests/ts-integration/src/env.ts +++ b/core/tests/ts-integration/src/env.ts @@ -6,6 +6,7 @@ import { DataAvailabityMode, NodeMode, TestEnvironment } from './types'; import { Reporter } from './reporter'; import * as yaml from 'yaml'; import { L2_BASE_TOKEN_ADDRESS } from 'zksync-ethers/build/utils'; +import { loadConfig, loadEcosystem, shouldLoadConfigFromFile } from 'utils/build/file-configs'; /** * Attempts to connect to server. @@ -46,7 +47,7 @@ function getMainWalletPk(pathToHome: string, network: string): string { if (network.toLowerCase() == 'localhost') { const testConfigPath = path.join(pathToHome, `etc/test_config/constant`); const ethTestConfig = JSON.parse(fs.readFileSync(`${testConfigPath}/eth.json`, { encoding: 'utf-8' })); - return ethers.Wallet.fromMnemonic(ethTestConfig.test_mnemonic as string, "m/44'/60'/0'/0/0").privateKey; + return ethers.Wallet.fromPhrase(ethTestConfig.test_mnemonic).privateKey; } else { return ensureVariable(process.env.MASTER_WALLET_PK, 'Main wallet private key'); } @@ -57,11 +58,19 @@ function getMainWalletPk(pathToHome: string, network: string): string { */ async function loadTestEnvironmentFromFile(chain: string): Promise { const pathToHome = path.join(__dirname, '../../../..'); + let nodeMode; + if (process.env.EXTERNAL_NODE == 'true') { + nodeMode = NodeMode.External; + } else { + nodeMode = NodeMode.Main; + } let ecosystem = loadEcosystem(pathToHome); + // Genesis file is common for both EN and Main node + let genesisConfig = loadConfig({ pathToHome, chain, config: 'genesis.yaml' }); - let generalConfig = loadConfig(pathToHome, chain, 'general.yaml'); - let genesisConfig = loadConfig(pathToHome, chain, 'genesis.yaml'); - let secretsConfig = loadConfig(pathToHome, chain, 'secrets.yaml'); + let configsFolderSuffix = nodeMode == NodeMode.External ? 'external_node' : undefined; + let generalConfig = loadConfig({ pathToHome, chain, config: 'general.yaml', configsFolderSuffix }); + let secretsConfig = loadConfig({ pathToHome, chain, config: 'secrets.yaml', configsFolderSuffix }); const network = ecosystem.l1_network; let mainWalletPK = getMainWalletPk(pathToHome, network); @@ -112,15 +121,13 @@ async function loadTestEnvironmentFromFile(chain: string): Promise { - let chain = process.env.CHAIN_NAME; + const { loadFromFile, chain } = shouldLoadConfigFromFile(); - if (chain) { + if (loadFromFile) { return await loadTestEnvironmentFromFile(chain); } return await loadTestEnvironmentFromEnv(); @@ -225,16 +232,16 @@ export async function loadTestEnvironmentFromEnv(): Promise { ).l2TokenAddress(weth.address); const baseTokenAddressL2 = L2_BASE_TOKEN_ADDRESS; - const l2ChainId = parseInt(process.env.CHAIN_ETH_ZKSYNC_NETWORK_ID!); + const l2ChainId = BigInt(process.env.CHAIN_ETH_ZKSYNC_NETWORK_ID!); // If the `CHAIN_STATE_KEEPER_L1_BATCH_COMMIT_DATA_GENERATOR_MODE` is not set, the default value is `Rollup`. const l1BatchCommitDataGeneratorMode = (process.env.CHAIN_STATE_KEEPER_L1_BATCH_COMMIT_DATA_GENERATOR_MODE || process.env.EN_L1_BATCH_COMMIT_DATA_GENERATOR_MODE || 'Rollup') as DataAvailabityMode; let minimalL2GasPrice; if (process.env.CHAIN_STATE_KEEPER_MINIMAL_L2_GAS_PRICE !== undefined) { - minimalL2GasPrice = ethers.BigNumber.from(process.env.CHAIN_STATE_KEEPER_MINIMAL_L2_GAS_PRICE!); + minimalL2GasPrice = BigInt(process.env.CHAIN_STATE_KEEPER_MINIMAL_L2_GAS_PRICE!); } else { - minimalL2GasPrice = ethers.BigNumber.from(0); + minimalL2GasPrice = 0n; } let nodeMode; if (process.env.EN_MAIN_NODE_URL !== undefined) { @@ -246,7 +253,7 @@ export async function loadTestEnvironmentFromEnv(): Promise { const validationComputationalGasLimit = parseInt( process.env.CHAIN_STATE_KEEPER_VALIDATION_COMPUTATIONAL_GAS_LIMIT! ); - const priorityTxMaxGasLimit = parseInt(process.env.CONTRACTS_PRIORITY_TX_MAX_GAS_LIMIT!); + const priorityTxMaxGasLimit = BigInt(process.env.CONTRACTS_PRIORITY_TX_MAX_GAS_LIMIT!); const maxLogsLimit = parseInt( process.env.EN_REQ_ENTITIES_LIMIT ?? process.env.API_WEB3_JSON_RPC_REQ_ENTITIES_LIMIT! ); @@ -311,7 +318,7 @@ type Tokens = { type L1Token = { name: string; symbol: string; - decimals: number; + decimals: bigint; address: string; }; @@ -320,11 +327,13 @@ function getTokens(pathToHome: string, network: string): L1Token[] { if (!fs.existsSync(configPath)) { return []; } - return JSON.parse( + const parsed = JSON.parse( fs.readFileSync(configPath, { encoding: 'utf-8' - }) + }), + (key, value) => (key === 'decimals' ? BigInt(value) : value) ); + return parsed; } function getTokensNew(pathToHome: string): Tokens { @@ -333,7 +342,7 @@ function getTokensNew(pathToHome: string): Tokens { throw Error('Tokens config not found'); } - return yaml.parse( + const parsedObject = yaml.parse( fs.readFileSync(configPath, { encoding: 'utf-8' }), @@ -341,30 +350,11 @@ function getTokensNew(pathToHome: string): Tokens { customTags } ); -} - -function loadEcosystem(pathToHome: string): any { - const configPath = path.join(pathToHome, '/ZkStack.yaml'); - if (!fs.existsSync(configPath)) { - return []; - } - return yaml.parse( - fs.readFileSync(configPath, { - encoding: 'utf-8' - }) - ); -} -function loadConfig(pathToHome: string, chainName: string, config: string): any { - const configPath = path.join(pathToHome, `/chains/${chainName}/configs/${config}`); - if (!fs.existsSync(configPath)) { - return []; + for (const key in parsedObject.tokens) { + parsedObject.tokens[key].decimals = BigInt(parsedObject.tokens[key].decimals); } - return yaml.parse( - fs.readFileSync(configPath, { - encoding: 'utf-8' - }) - ); + return parsedObject; } function customTags(tags: yaml.Tags): yaml.Tags { diff --git a/core/tests/ts-integration/src/helpers.ts b/core/tests/ts-integration/src/helpers.ts index 7848749bfe31..8e31c1a691ff 100644 --- a/core/tests/ts-integration/src/helpers.ts +++ b/core/tests/ts-integration/src/helpers.ts @@ -47,8 +47,8 @@ export async function deployContract( overrides: any = {} ): Promise { const contractFactory = new zksync.ContractFactory(artifact.abi, artifact.bytecode, initiator, deploymentType); - const contract = await contractFactory.deploy(...args, overrides); - await contract.deployed(); + const contract = (await contractFactory.deploy(...args, overrides)) as zksync.Contract; + await contract.waitForDeployment(); return contract; } @@ -59,7 +59,7 @@ export async function deployContract( * @param wallet Wallet to send a transaction from. Should have enough balance to cover the fee. * @returns Transaction receipt. */ -export async function anyTransaction(wallet: zksync.Wallet): Promise { +export async function anyTransaction(wallet: zksync.Wallet): Promise { return await wallet.transfer({ to: wallet.address, amount: 0 }).then((tx) => tx.wait()); } @@ -74,10 +74,10 @@ export async function waitForNewL1Batch(wallet: zksync.Wallet): Promise { - const gasPrice = await wallet.getGasPrice(); +export async function scaledGasPrice(wallet: ethers.Wallet | zksync.Wallet): Promise { + const provider = wallet.provider; + if (!provider) { + throw new Error('Wallet should have provider'); + } + const feeData = await provider.getFeeData(); + const gasPrice = feeData.gasPrice; + if (!gasPrice) { + throw new Error('Failed to fetch gas price'); + } // Increase by 40% - return gasPrice.mul(140).div(100); + return (gasPrice * 140n) / 100n; +} + +export const bigIntReviver = (_: string, value: any) => { + if (typeof value === 'string' && value.endsWith('n')) { + const number = value.slice(0, -1); + if (/^-?\d+$/.test(number)) { + return BigInt(number); + } + } + return value; +}; + +export const bigIntReplacer = (_: string, value: any) => { + if (typeof value === 'bigint') { + return `${value}n`; + } + return value; +}; + +export function bigIntMax(...args: bigint[]) { + if (args.length === 0) { + throw new Error('No arguments provided'); + } + + return args.reduce((max, current) => (current > max ? current : max), args[0]); } diff --git a/core/tests/ts-integration/src/jest-setup/add-matchers.ts b/core/tests/ts-integration/src/jest-setup/add-matchers.ts index e673e7a909d1..f3e10bab07af 100644 --- a/core/tests/ts-integration/src/jest-setup/add-matchers.ts +++ b/core/tests/ts-integration/src/jest-setup/add-matchers.ts @@ -1,9 +1,7 @@ -import * as bigNumberMatchers from '../matchers/big-number'; import * as ethPrimitives from '../matchers/eth-primitives'; import * as transaction from '../matchers/transaction'; import * as fail from '../matchers/fail'; -expect.extend(bigNumberMatchers); expect.extend(ethPrimitives); expect.extend(transaction); expect.extend(fail); diff --git a/core/tests/ts-integration/src/jest-setup/global-setup.ts b/core/tests/ts-integration/src/jest-setup/global-setup.ts index f86961eb1dc1..d84d70fe69da 100644 --- a/core/tests/ts-integration/src/jest-setup/global-setup.ts +++ b/core/tests/ts-integration/src/jest-setup/global-setup.ts @@ -1,3 +1,4 @@ +import { bigIntReplacer } from '../helpers'; import { TestContextOwner, loadTestEnvironment } from '../index'; declare global { @@ -26,7 +27,7 @@ async function performSetup(_globalConfig: any, _projectConfig: any) { // Set the test context for test suites to pick up. // Currently, jest doesn't provide a way to pass data from `globalSetup` to suites, // so we store the data as serialized JSON. - process.env.ZKSYNC_JEST_TEST_CONTEXT = JSON.stringify(testContext); + process.env.ZKSYNC_JEST_TEST_CONTEXT = JSON.stringify(testContext, bigIntReplacer); // Store the context object for teardown script, so it can perform, well, the teardown. globalThis.__ZKSYNC_TEST_CONTEXT_OWNER__ = testContextOwner; diff --git a/core/tests/ts-integration/src/matchers/big-number.ts b/core/tests/ts-integration/src/matchers/big-number.ts deleted file mode 100644 index df93ad1c71af..000000000000 --- a/core/tests/ts-integration/src/matchers/big-number.ts +++ /dev/null @@ -1,100 +0,0 @@ -import { BigNumber, BigNumberish } from 'ethers'; -import { TestMessage } from './matcher-helpers'; - -// Note: I attempted to "overload" the existing matchers from Jest (like `toBeGreaterThan`), -// but failed. There is a proposed hack in one GitHub issue from 2018: if you'll be trying to -// do the same, know: this hack doesn't work anymore. Default matchers rely on `this` to have -// certain properties, so attempt to load default matchers from `build` directory and call them -// as a fallback won't work (or I failed to make it work). - -// This file contains implementation of matchers for BigNumber objects. -// For actual doc-comments, see `typings/jest.d.ts` file. - -// Matcher for `l.gt(r)` -export function bnToBeGt(l: BigNumberish, r: BigNumberish, additionalInfo?: string) { - const comparator = (l: BigNumber, r: BigNumber) => l.gt(r); - const matcherName = `bnToBeGt`; - const matcherMessage = `greater than`; - return matcherBody(l, r, comparator, matcherName, matcherMessage, additionalInfo); -} - -// Matcher for `l.gte(r)` -export function bnToBeGte(l: BigNumberish, r: BigNumberish, additionalInfo?: string) { - const comparator = (l: BigNumber, r: BigNumber) => l.gte(r); - const matcherName = `bnToBeGte`; - const matcherMessage = `greater or equal than`; - return matcherBody(l, r, comparator, matcherName, matcherMessage, additionalInfo); -} - -// Matcher for `l.eq(r)` -export function bnToBeEq(l: BigNumberish, r: BigNumberish, additionalInfo?: string) { - const comparator = (l: BigNumber, r: BigNumber) => l.eq(r); - const matcherName = `bnToBeEq`; - const matcherMessage = `equal to`; - return matcherBody(l, r, comparator, matcherName, matcherMessage, additionalInfo); -} - -// Matcher for `l.lt(r)` -export function bnToBeLt(l: BigNumberish, r: BigNumberish, additionalInfo?: string) { - const comparator = (l: BigNumber, r: BigNumber) => l.lt(r); - const matcherName = `bnToBeLt`; - const matcherMessage = `less than`; - return matcherBody(l, r, comparator, matcherName, matcherMessage, additionalInfo); -} - -// Matcher for `l.lte(r)` -export function bnToBeLte(l: BigNumberish, r: BigNumberish, additionalInfo?: string) { - const comparator = (l: BigNumber, r: BigNumber) => l.lte(r); - const matcherName = `bnToBeLte`; - const matcherMessage = `less than or equal`; - return matcherBody(l, r, comparator, matcherName, matcherMessage, additionalInfo); -} - -/** - * Generic body of the BigNumber matchers. Use to reduce the amount of boilerplate code. - * - * @param l Initial number (from `expect(l)`). - * @param r Number to compare to (from `.bnToBeXXX(r)`). - * @param comparator Comparator function to invoke to see if test passes (e.g. `(l, r) => l.gt(r)`). - * @param matcherName Name of the matcher function (e.g. `bnToBeGt`). - * @param matcherMessage Generic part of the failure message (e.g. `greater than`). - * @param additionalInfo Message provided by user to be included in case of failure. - * @returns Object expected by jest matcher. - */ -function matcherBody( - l: BigNumberish, - r: BigNumberish, - comparator: (l: BigNumber, r: BigNumber) => boolean, - matcherName: string, - matcherMessage: string, - additionalInfo?: string -) { - // Numbers are provided as `BigNumberish`, so they can be strings or numbers. - const left = BigNumber.from(l); - const right = BigNumber.from(r); - const pass = comparator(left, right); - - // Declare messages for normal case and case where matcher was preceded by `.not`. - let passMessage = new TestMessage() - .matcherHint(`.not.${matcherName}`) - .line('Expected the following number:') - .received(left) - .line(`to not be ${matcherMessage}:`) - .expected(right) - .additional(additionalInfo) - .build(); - - let failMessage = new TestMessage() - .matcherHint(`.${matcherName}`) - .line('Expected the following number:') - .received(left) - .line(`to be ${matcherMessage}:`) - .expected(right) - .additional(additionalInfo) - .build(); - - return { - pass, - message: () => (pass ? passMessage : failMessage) - }; -} diff --git a/core/tests/ts-integration/src/matchers/eth-primitives.ts b/core/tests/ts-integration/src/matchers/eth-primitives.ts index 509b4aa51d26..87347e1e1228 100644 --- a/core/tests/ts-integration/src/matchers/eth-primitives.ts +++ b/core/tests/ts-integration/src/matchers/eth-primitives.ts @@ -5,7 +5,7 @@ import { TestMessage } from './matcher-helpers'; // For actual doc-comments, see `typings/jest.d.ts` file. export function toBeAddress(value: string, additionalInfo?: string) { - const pass = ethers.utils.isAddress(value); + const pass = ethers.isAddress(value); // Declare messages for normal case and case where matcher was preceded by `.not`. let passMessage = new TestMessage() @@ -29,7 +29,7 @@ export function toBeAddress(value: string, additionalInfo?: string) { } export function toBeHexString(value: string, additionalInfo?: string) { - const pass = ethers.utils.isHexString(value); + const pass = ethers.isHexString(value); // Declare messages for normal case and case where matcher was preceded by `.not`. let passMessage = new TestMessage() diff --git a/core/tests/ts-integration/src/matchers/transaction.ts b/core/tests/ts-integration/src/matchers/transaction.ts index 4058d28321a3..89e90b6d5f16 100644 --- a/core/tests/ts-integration/src/matchers/transaction.ts +++ b/core/tests/ts-integration/src/matchers/transaction.ts @@ -219,7 +219,8 @@ function checkReceiptFields(request: zksync.types.TransactionResponse, receipt: if (receipt.status !== 0 && receipt.status !== 1) { return failWith(`Status field in the receipt has an unexpected value (expected 0 or 1): ${receipt.status}`); } - if (!receipt.effectiveGasPrice) { + const effectiveGasPrice = receipt.gasUsed * receipt.gasPrice; + if (effectiveGasPrice <= 0n) { return failWith(`Effective gas price expected to be greater than 0`); } if (!receipt.gasUsed) { diff --git a/core/tests/ts-integration/src/modifiers/balance-checker.ts b/core/tests/ts-integration/src/modifiers/balance-checker.ts index aeb60aaf4ab1..bdf04db05982 100644 --- a/core/tests/ts-integration/src/modifiers/balance-checker.ts +++ b/core/tests/ts-integration/src/modifiers/balance-checker.ts @@ -7,7 +7,7 @@ import * as ethers from 'ethers'; import { TestMessage } from '../matchers/matcher-helpers'; import { MatcherModifier, MatcherMessage } from '.'; import { Fee } from '../types'; -import { Ierc20Factory as IERC20Factory } from 'zksync-ethers/build/typechain/Ierc20Factory'; +import { IERC20__factory as IERC20Factory } from 'zksync-ethers/build/typechain'; /** * Modifier that ensures that fee was taken from the wallet for a transaction. @@ -19,7 +19,7 @@ import { Ierc20Factory as IERC20Factory } from 'zksync-ethers/build/typechain/Ie * @returns Matcher object */ export async function shouldOnlyTakeFee(wallet: zksync.Wallet, isL1ToL2?: boolean): Promise { - return await ShouldChangeBalance.create(zksync.utils.ETH_ADDRESS, [{ wallet, change: 0 }], { l1ToL2: isL1ToL2 }); + return await ShouldChangeBalance.create(zksync.utils.ETH_ADDRESS, [{ wallet, change: 0n }], { l1ToL2: isL1ToL2 }); } /** @@ -69,7 +69,7 @@ export async function shouldChangeTokenBalances( */ export interface BalanceChange { wallet: zksync.Wallet; - change: ethers.BigNumberish; + change: bigint; addressToCheck?: string; } @@ -87,7 +87,7 @@ export interface Params { * *before* the transaction was sent. */ interface PopulatedBalanceChange extends BalanceChange { - initialBalance: ethers.BigNumber; + initialBalance: bigint; } /** @@ -156,20 +156,19 @@ class ShouldChangeBalance extends MatcherModifier { // To "ignore" subtracted fee, we just add it back to the account balance. // For L1->L2 transactions the sender might be different from the refund recipient if (this.l1ToL2) { - newBalance = newBalance.sub(extractRefundForL1ToL2(receipt, address)); + newBalance = newBalance - extractRefundForL1ToL2(receipt, address); } else if (address == receipt.from) { - newBalance = newBalance.add(extractFee(receipt).feeAfterRefund); + newBalance = newBalance + extractFee(receipt).feeAfterRefund; } } - const diff = newBalance.sub(prevBalance); - const change = ethers.BigNumber.from(balanceChange.change); - if (!diff.eq(change)) { + const diff = newBalance - prevBalance; + if (diff != balanceChange.change) { const message = new TestMessage() .matcherHint(`ShouldChangeBalance modifier`) .line(`Incorrect balance change for wallet ${balanceChange.wallet.address} (index ${id} in array)`) .line(`Expected balance change to be:`) - .expected(change) + .expected(balanceChange.change) .line(`But actual change is:`) .received(diff) .line(`Balance before: ${prevBalance}, balance after: ${newBalance}`) @@ -201,7 +200,7 @@ export function extractFee(receipt: zksync.types.TransactionReceipt, from?: stri const systemAccountAddress = '0x0000000000000000000000000000000000000000000000000000000000008001'; // We need to pad address to represent 256-bit value. - const fromAccountAddress = ethers.utils.hexZeroPad(ethers.utils.arrayify(from), 32); + const fromAccountAddress = ethers.zeroPadValue(ethers.getBytes(from), 32); // Fee log is one that sends money to the system contract account. const feeLog = receipt.logs.find((log) => { return log.topics.length == 3 && log.topics[1] == fromAccountAddress && log.topics[2] == systemAccountAddress; @@ -213,7 +212,7 @@ export function extractFee(receipt: zksync.types.TransactionReceipt, from?: stri }; } - const feeAmount = ethers.BigNumber.from(feeLog.data); + const feeAmount = BigInt(feeLog.data); // There may be more than one refund log for the user const feeRefund = receipt.logs @@ -222,14 +221,14 @@ export function extractFee(receipt: zksync.types.TransactionReceipt, from?: stri log.topics.length == 3 && log.topics[1] == systemAccountAddress && log.topics[2] == fromAccountAddress ); }) - .map((log) => ethers.BigNumber.from(log.data)) + .map((log) => BigInt(log.data)) .reduce((prev, cur) => { - return prev.add(cur); - }, ethers.BigNumber.from(0)); + return prev + cur; + }, 0n); return { feeBeforeRefund: feeAmount, - feeAfterRefund: feeAmount.sub(feeRefund), + feeAfterRefund: feeAmount - feeRefund, refund: feeRefund }; } @@ -241,10 +240,10 @@ export function extractFee(receipt: zksync.types.TransactionReceipt, from?: stri * @param from Optional substitute to `receipt.from`. * @returns Extracted fee */ -function extractRefundForL1ToL2(receipt: zksync.types.TransactionReceipt, refundRecipient?: string): ethers.BigNumber { +function extractRefundForL1ToL2(receipt: zksync.types.TransactionReceipt, refundRecipient?: string): bigint { refundRecipient = refundRecipient ?? receipt.from; - const mintTopic = ethers.utils.keccak256(ethers.utils.toUtf8Bytes('Mint(address,uint256)')); + const mintTopic = ethers.keccak256(ethers.toUtf8Bytes('Mint(address,uint256)')); const refundLogs = receipt.logs.filter((log) => { return log.topics.length == 2 && log.topics[0] == mintTopic; @@ -262,7 +261,7 @@ function extractRefundForL1ToL2(receipt: zksync.types.TransactionReceipt, refund // final refund. const refundLog = refundLogs[refundLogs.length - 1]; - const formattedRefundRecipient = ethers.utils.hexlify(ethers.utils.zeroPad(refundRecipient, 32)); + const formattedRefundRecipient = ethers.hexlify(ethers.zeroPadValue(refundRecipient, 32)); if (refundLog.topics[1].toLowerCase() !== formattedRefundRecipient.toLowerCase()) { throw { @@ -271,7 +270,7 @@ function extractRefundForL1ToL2(receipt: zksync.types.TransactionReceipt, refund }; } - return ethers.BigNumber.from(refundLog.data); + return BigInt(refundLog.data); } /** @@ -283,12 +282,7 @@ function extractRefundForL1ToL2(receipt: zksync.types.TransactionReceipt, refund * @param token Address of the token * @returns Token balance */ -async function getBalance( - l1: boolean, - wallet: zksync.Wallet, - address: string, - token: string -): Promise { +async function getBalance(l1: boolean, wallet: zksync.Wallet, address: string, token: string): Promise { const provider = l1 ? wallet.providerL1! : wallet.provider; if (zksync.utils.isETH(token)) { return await provider.getBalance(address); diff --git a/core/tests/ts-integration/src/retry-provider.ts b/core/tests/ts-integration/src/retry-provider.ts index 924af720cab2..1763c0e4edf5 100644 --- a/core/tests/ts-integration/src/retry-provider.ts +++ b/core/tests/ts-integration/src/retry-provider.ts @@ -1,7 +1,6 @@ import * as zksync from 'zksync-ethers'; import * as ethers from 'ethers'; import { Reporter } from './reporter'; -import { TransactionResponse } from 'zksync-ethers/build/types'; /** * RetryProvider retries every RPC request if it detects a timeout-related issue on the server side. @@ -9,11 +8,16 @@ import { TransactionResponse } from 'zksync-ethers/build/types'; export class RetryProvider extends zksync.Provider { private readonly reporter: Reporter; - constructor( - url?: string | ethers.ethers.utils.ConnectionInfo | undefined, - network?: ethers.ethers.providers.Networkish | undefined, - reporter?: Reporter - ) { + constructor(_url?: string | { url: string; timeout: number }, network?: ethers.Networkish, reporter?: Reporter) { + let url; + if (typeof _url === 'object') { + const fetchRequest: ethers.FetchRequest = new ethers.FetchRequest(_url.url); + fetchRequest.timeout = _url.timeout; + url = fetchRequest; + } else { + url = _url; + } + super(url, network); this.reporter = reporter ?? new Reporter(); } @@ -51,21 +55,15 @@ export class RetryProvider extends zksync.Provider { } } - override _wrapTransaction(tx: ethers.Transaction, hash?: string): AugmentedTransactionResponse { - const wrapped = super._wrapTransaction(tx, hash); - const originalWait = wrapped.wait; - wrapped.wait = async (confirmations) => { - this.reporter.debug(`Started waiting for transaction ${tx.hash} (from=${tx.from}, nonce=${tx.nonce})`); - const receipt = await originalWait(confirmations); - this.reporter.debug( - `Obtained receipt for transaction ${tx.hash}: blockNumber=${receipt.blockNumber}, status=${receipt.status}` - ); - return receipt; - }; - return { ...wrapped, reporter: this.reporter }; + override _wrapTransactionReceipt(receipt: any): zksync.types.TransactionReceipt { + const wrapped = super._wrapTransactionReceipt(receipt); + this.reporter.debug( + `Obtained receipt for transaction ${receipt.transactionHash}: blockNumber=${receipt.blockNumber}, status=${receipt.status}` + ); + return wrapped; } } -export interface AugmentedTransactionResponse extends TransactionResponse { +export interface AugmentedTransactionResponse extends zksync.types.TransactionResponse { readonly reporter?: Reporter; } diff --git a/core/tests/ts-integration/src/test-master.ts b/core/tests/ts-integration/src/test-master.ts index 3072c3244e6b..09fddd1589ca 100644 --- a/core/tests/ts-integration/src/test-master.ts +++ b/core/tests/ts-integration/src/test-master.ts @@ -4,6 +4,7 @@ import { TestEnvironment, TestContext } from './types'; import { claimEtherBack } from './context-owner'; import { RetryProvider } from './retry-provider'; import { Reporter } from './reporter'; +import { bigIntReviver } from './helpers'; /** * Test master is a singleton class (per suite) that is capable of providing wallets to the suite. @@ -18,7 +19,7 @@ export class TestMaster { private readonly env: TestEnvironment; readonly reporter: Reporter; - private readonly l1Provider: ethers.providers.JsonRpcProvider; + private readonly l1Provider: ethers.JsonRpcProvider; private readonly l2Provider: zksync.Provider; private readonly mainWallet: zksync.Wallet; @@ -34,7 +35,7 @@ export class TestMaster { throw new Error('Test context was not initialized; unable to load context environment variable'); } - const context = JSON.parse(contextStr) as TestContext; + const context = JSON.parse(contextStr, bigIntReviver) as TestContext; this.env = context.environment; this.reporter = new Reporter(); @@ -51,7 +52,7 @@ export class TestMaster { if (!suiteWalletPK) { throw new Error(`Wallet for ${suiteName} suite was not provided`); } - this.l1Provider = new ethers.providers.JsonRpcProvider(this.env.l1NodeUrl); + this.l1Provider = new ethers.JsonRpcProvider(this.env.l1NodeUrl); this.l2Provider = new RetryProvider( { url: this.env.l2NodeUrl, diff --git a/core/tests/ts-integration/src/types.ts b/core/tests/ts-integration/src/types.ts index 14cf11cec14f..058dcd4929d9 100644 --- a/core/tests/ts-integration/src/types.ts +++ b/core/tests/ts-integration/src/types.ts @@ -1,5 +1,3 @@ -import { ethers } from 'ethers'; - export enum NodeMode { Main, External @@ -16,7 +14,7 @@ export enum DataAvailabityMode { export interface Token { name: string; symbol: string; - decimals: number; + decimals: bigint; l1Address: string; l2Address: string; } @@ -32,7 +30,7 @@ export interface TestEnvironment { /* * Gas limit for priority txs */ - priorityTxMaxGasLimit: number; + priorityTxMaxGasLimit: bigint; /* * Gas limit for computations */ @@ -40,7 +38,7 @@ export interface TestEnvironment { /* * Minimal gas price of l2 */ - minimalL2GasPrice: ethers.BigNumber; + minimalL2GasPrice: bigint; /* * Data availability mode */ @@ -52,7 +50,7 @@ export interface TestEnvironment { /** * Chain Id of the L2 Network */ - l2ChainId: number; + l2ChainId: bigint; /* * Mode of the l2 node */ @@ -120,7 +118,7 @@ export interface TestContext { } export interface Fee { - feeBeforeRefund: ethers.BigNumber; - feeAfterRefund: ethers.BigNumber; - refund: ethers.BigNumber; + feeBeforeRefund: bigint; + feeAfterRefund: bigint; + refund: bigint; } diff --git a/core/tests/ts-integration/tests/api/contract-verification.test.ts b/core/tests/ts-integration/tests/api/contract-verification.test.ts index 0a538b272464..c0cd887bcf7d 100644 --- a/core/tests/ts-integration/tests/api/contract-verification.test.ts +++ b/core/tests/ts-integration/tests/api/contract-verification.test.ts @@ -1,4 +1,4 @@ -import { TestMaster } from '../../src/index'; +import { TestMaster } from '../../src'; import * as zksync from 'zksync-ethers'; import * as ethers from 'ethers'; import fetch from 'node-fetch'; @@ -55,7 +55,7 @@ describe('Tests for the contract verification API', () => { const constructorArguments = counterContract.interface.encodeDeploy([]); const requestBody = { - contractAddress: counterContract.address, + contractAddress: await counterContract.getAddress(), contractName: 'contracts/counter/counter.sol:Counter', sourceCode: getContractSource('counter/counter.sol'), compilerZksolcVersion: ZKSOLC_VERSION, @@ -81,7 +81,7 @@ describe('Tests for the contract verification API', () => { const constructorArguments = counterContract.interface.encodeDeploy([]); const requestBody = { - contractAddress: counterContract.address, + contractAddress: await counterContract.getAddress(), contractName: 'contracts/counter/counter.sol:Counter', sourceCode: getContractSource('counter/counter.sol'), compilerZksolcVersion: ZKSOLC_VERSION, @@ -102,7 +102,7 @@ describe('Tests for the contract verification API', () => { factoryDeps: [contracts.create.factoryDep] } }); - const importContract = await contractHandle.deployed(); + const importContract = await contractHandle.waitForDeployment(); const standardJsonInput = { language: 'Solidity', sources: { @@ -122,7 +122,7 @@ describe('Tests for the contract verification API', () => { const constructorArguments = importContract.interface.encodeDeploy([]); const requestBody = { - contractAddress: importContract.address, + contractAddress: await importContract.getAddress(), contractName: 'contracts/create/create.sol:Import', sourceCode: standardJsonInput, codeFormat: 'solidity-standard-json-input', @@ -149,7 +149,7 @@ describe('Tests for the contract verification API', () => { const contractFactory = new zksync.ContractFactory([], bytecode, alice); const deployTx = await contractFactory.deploy(); - const contractAddress = (await deployTx.deployed()).address; + const contractAddress = await (await deployTx.waitForDeployment()).getAddress(); const requestBody = { contractAddress, @@ -173,17 +173,17 @@ describe('Tests for the contract verification API', () => { contracts.greeter2.bytecode, alice ); - const randomAddress = ethers.utils.hexlify(ethers.utils.randomBytes(20)); + const randomAddress = ethers.hexlify(ethers.randomBytes(20)); const contractHandle = await contractFactory.deploy(randomAddress, { customData: { factoryDeps: [contracts.greeter2.factoryDep] } }); - const contract = await contractHandle.deployed(); + const contract = await contractHandle.waitForDeployment(); const constructorArguments = contract.interface.encodeDeploy([randomAddress]); const requestBody = { - contractAddress: contract.address, + contractAddress: await contract.getAddress(), contractName: 'Greeter2', sourceCode: { Greeter: getContractSource('vyper/Greeter.vy'), diff --git a/core/tests/ts-integration/tests/api/debug.test.ts b/core/tests/ts-integration/tests/api/debug.test.ts index 4982ebb8bb57..dd1ea141a419 100644 --- a/core/tests/ts-integration/tests/api/debug.test.ts +++ b/core/tests/ts-integration/tests/api/debug.test.ts @@ -34,7 +34,7 @@ describe('Debug methods', () => { const contractFactory = new zksync.ContractFactory([], bytecode, testMaster.mainAccount()); const deployTx = await contractFactory.deploy(); - const contractAddress = (await deployTx.deployed()).address; + const contractAddress = await (await deployTx.waitForDeployment()).getAddress(); let txCallTrace = await testMaster.mainAccount().provider.send('debug_traceCall', [ { to: contractAddress, @@ -43,7 +43,7 @@ describe('Debug methods', () => { ]); let expected = { error: null, - from: ethers.constants.AddressZero, + from: ethers.ZeroAddress, gas: expect.any(String), gasUsed: expect.any(String), input: expect.any(String), @@ -58,7 +58,7 @@ describe('Debug methods', () => { }); test('Debug sending erc20 token in a block', async () => { - const value = ethers.BigNumber.from(200); + const value = 200n; await aliceErc20.transfer(bob.address, value).then((tx: any) => tx.wait()); const tx = await aliceErc20.transfer(bob.address, value); const receipt = await tx.wait(); @@ -69,7 +69,7 @@ describe('Debug methods', () => { .mainAccount() .provider.send('debug_traceBlockByNumber', [receipt.blockNumber.toString(16), { tracer: 'callTracer' }]); const expectedTraceInBlock = { - from: ethers.constants.AddressZero, + from: ethers.ZeroAddress, gas: expect.any(String), gasUsed: expect.any(String), input: expect.any(String), @@ -88,14 +88,14 @@ describe('Debug methods', () => { const expected = { error: null, - from: ethers.constants.AddressZero, + from: ethers.ZeroAddress, gas: expect.any(String), gasUsed: expect.any(String), input: `0xa9059cbb000000000000000000000000${bob.address .slice(2, 42) .toLowerCase()}00000000000000000000000000000000000000000000000000000000000000${value - .toHexString() - .slice(2, 4)}`, + .toString(16) + .slice(0, 2)}`, // no 0x prefix output: '0x', revertReason: null, to: BOOTLOADER_FORMAL_ADDRESS, diff --git a/core/tests/ts-integration/tests/api/web3.test.ts b/core/tests/ts-integration/tests/api/web3.test.ts index 09f78ce75059..e78ec452b2f5 100644 --- a/core/tests/ts-integration/tests/api/web3.test.ts +++ b/core/tests/ts-integration/tests/api/web3.test.ts @@ -4,8 +4,7 @@ import { TestMaster } from '../../src'; import * as zksync from 'zksync-ethers'; import { types } from 'zksync-ethers'; -import { BigNumberish, ethers, Event } from 'ethers'; -import { serialize } from '@ethersproject/transactions'; +import * as ethers from 'ethers'; import { anyTransaction, deployContract, getTestContract, waitForNewL1Batch } from '../../src/helpers'; import { shouldOnlyTakeFee } from '../../src/modifiers/balance-checker'; import fetch, { RequestInit } from 'node-fetch'; @@ -20,14 +19,15 @@ const contracts = { counter: getTestContract('Counter'), events: getTestContract('Emitter'), outer: getTestContract('Outer'), - inner: getTestContract('Inner') + inner: getTestContract('Inner'), + stateOverride: getTestContract('StateOverrideTest') }; describe('web3 API compatibility tests', () => { let testMaster: TestMaster; let alice: zksync.Wallet; let l2Token: string; - let chainId: BigNumberish; + let chainId: bigint; beforeAll(async () => { testMaster = TestMaster.getInstance(__filename); @@ -41,20 +41,20 @@ describe('web3 API compatibility tests', () => { const blockNumberHex = '0x1'; // eth_getBlockByNumber - const blockHash = (await alice.provider.getBlock(blockNumber)).hash; - const blockWithTxsByNumber = await alice.provider.getBlockWithTransactions(blockNumber); - expect(blockWithTxsByNumber.gasLimit).bnToBeGt(0); - let sumTxGasUsed = ethers.BigNumber.from(0); + const blockHash = (await alice.provider.getBlock(blockNumber)).hash!; + const blockWithTxsByNumber = await alice.provider.getBlock(blockNumber, true); + expect(blockWithTxsByNumber.gasLimit).toBeGreaterThan(0n); + let sumTxGasUsed = 0n; - for (const tx of blockWithTxsByNumber.transactions) { + for (const tx of blockWithTxsByNumber.prefetchedTransactions) { const receipt = await alice.provider.getTransactionReceipt(tx.hash); - sumTxGasUsed = sumTxGasUsed.add(receipt.gasUsed); + sumTxGasUsed = sumTxGasUsed + receipt!.gasUsed; } - expect(blockWithTxsByNumber.gasUsed).bnToBeGte(sumTxGasUsed); + expect(blockWithTxsByNumber.gasUsed).toBeGreaterThanOrEqual(sumTxGasUsed); let expectedReceipts = []; - for (const tx of blockWithTxsByNumber.transactions) { + for (const tx of blockWithTxsByNumber.prefetchedTransactions) { const receipt = await alice.provider.send('eth_getTransactionReceipt', [tx.hash]); expectedReceipts.push(receipt); } @@ -64,16 +64,16 @@ describe('web3 API compatibility tests', () => { // eth_getBlockByHash await alice.provider.getBlock(blockHash); - const blockWithTxsByHash = await alice.provider.getBlockWithTransactions(blockHash); + const blockWithTxsByHash = await alice.provider.getBlock(blockHash, true); expect(blockWithTxsByNumber.number).toEqual(blockWithTxsByHash.number); // eth_getBlockTransactionCountByNumber const txCountByNumber = await alice.provider.send('eth_getBlockTransactionCountByNumber', [blockNumberHex]); - expect(parseInt(txCountByNumber, 16)).toEqual(blockWithTxsByNumber.transactions.length); + expect(parseInt(txCountByNumber, 16)).toEqual(blockWithTxsByNumber.prefetchedTransactions.length); // eth_getBlockTransactionCountByHash const txCountByHash = await alice.provider.send('eth_getBlockTransactionCountByHash', [blockHash]); - expect(parseInt(txCountByHash, 16)).toEqual(blockWithTxsByNumber.transactions.length); + expect(parseInt(txCountByHash, 16)).toEqual(blockWithTxsByNumber.prefetchedTransactions.length); // eth_getTransactionByBlockNumberAndIndex const txByBlockNumberAndIndex = await alice.provider.send('eth_getTransactionByBlockNumberAndIndex', [ @@ -97,15 +97,15 @@ describe('web3 API compatibility tests', () => { const counterContract = await deployContract(alice, contracts.counter, []); // eth_getCode - const code = await alice.provider.getCode(counterContract.address); - expect(code).toEqual(ethers.utils.hexlify(contracts.counter.bytecode)); + const code = await alice.provider.getCode(await counterContract.getAddress()); + expect(code).toEqual(ethers.hexlify(contracts.counter.bytecode)); // eth_getStorageAt const accCodeStorageAddress = '0x0000000000000000000000000000000000008002'; - const codeKey = '0x000000000000000000000000' + counterContract.address.substring(2); - const codeHash = await alice.provider.getStorageAt(accCodeStorageAddress, codeKey); + const codeKey = '0x000000000000000000000000' + (await counterContract.getAddress()).substring(2); + const codeHash = await alice.provider.getStorage(accCodeStorageAddress, codeKey); - const expectedHash = ethers.utils.sha256(contracts.counter.bytecode); + const expectedHash = ethers.sha256(contracts.counter.bytecode); expect(codeHash.substring(10)).toEqual(expectedHash.substring(10)); }); @@ -116,11 +116,11 @@ describe('web3 API compatibility tests', () => { if (testMaster.environment().nodeMode === NodeMode.Main) { const balances = await alice.getAllBalances(); const tokenBalance = await alice.getBalance(l2Token); - expect(balances[l2Token.toLowerCase()].eq(tokenBalance)); + expect(balances[l2Token.toLowerCase()] == tokenBalance); } // zks_L1ChainId const l1ChainId = (await alice.providerL1!.getNetwork()).chainId; - const l1ChainIdFromL2Provider = await alice.provider.l1ChainId(); + const l1ChainIdFromL2Provider = BigInt(await alice.provider.l1ChainId()); expect(l1ChainId).toEqual(l1ChainIdFromL2Provider); // zks_getBlockDetails const blockDetails = await alice.provider.getBlockDetails(1); @@ -128,7 +128,7 @@ describe('web3 API compatibility tests', () => { expect(blockDetails.rootHash).toEqual(block.hash); expect(blockDetails.l1BatchNumber).toEqual(block.l1BatchNumber); // zks_getL1BatchDetails - const batchDetails = await alice.provider.getL1BatchDetails(block.l1BatchNumber); + const batchDetails = await alice.provider.getL1BatchDetails(block.l1BatchNumber!); expect(batchDetails.number).toEqual(block.l1BatchNumber); // zks_estimateFee const response = await alice.provider.send('zks_estimateFee', [ @@ -149,13 +149,26 @@ describe('web3 API compatibility tests', () => { await expect(alice.provider.send('net_version', [])).resolves.toMatch(chainId.toString()); }); + test('Should check the syncing status', async () => { + // We can't know whether the node is synced (in EN case), so we just check the validity of the response. + const response = await alice.provider.send('eth_syncing', []); + // Sync status is either `false` or an object with the following fields. + if (response !== false) { + const expectedObject = { + currentBlock: expect.stringMatching(HEX_VALUE_REGEX), + highestBlock: expect.stringMatching(HEX_VALUE_REGEX), + startingBlock: expect.stringMatching(HEX_VALUE_REGEX) + }; + expect(response).toMatchObject(expectedObject); + } + }); + // @ts-ignore test.each([ ['net_peerCount', [], '0x0'], ['net_listening', [], false], ['web3_clientVersion', [], 'zkSync/v2.0'], ['eth_protocolVersion', [], 'zks/1'], - ['eth_syncing', [], false], ['eth_accounts', [], []], ['eth_coinbase', [], '0x0000000000000000000000000000000000000000'], ['eth_getCompilers', [], []], @@ -190,33 +203,33 @@ describe('web3 API compatibility tests', () => { // We must get the receipt explicitly, because the receipt obtained via `tx.wait()` could resolve // *before* the batch was created and not have all the fields set. - const receipt = await alice.provider.getTransactionReceipt(tx.transactionHash); + const receipt = await alice.provider.getTransactionReceipt(tx.hash); const logs = await alice.provider.getLogs({ - fromBlock: receipt.blockNumber, - toBlock: receipt.blockNumber + fromBlock: receipt!.blockNumber, + toBlock: receipt!.blockNumber }); - const block = await alice.provider.getBlock(receipt.blockNumber); - const blockWithTransactions = await alice.provider.getBlockWithTransactions(receipt.blockNumber); - const tx1 = await alice.provider.getTransaction(tx.transactionHash); + const block = await alice.provider.getBlock(receipt!.blockNumber); + const blockWithTransactions = await alice.provider.getBlock(receipt!.blockNumber, true); + const tx1 = await alice.provider.getTransaction(tx.hash); expect(tx1.l1BatchNumber).toEqual(expect.anything()); // Can be anything except `null` or `undefined`. expect(tx1.l1BatchTxIndex).toEqual(expect.anything()); // Can be anything except `null` or `undefined`. - expect(tx1.chainId).toEqual(testMaster.environment().l2ChainId); + expect(tx1.chainId).toEqual(chainId); expect(tx1.type).toEqual(EIP1559_TX_TYPE); - expect(receipt.l1BatchNumber).toEqual(expect.anything()); // Can be anything except `null` or `undefined`. - expect(receipt.l1BatchTxIndex).toEqual(expect.anything()); // Can be anything except `null` or `undefined`. - expect(receipt.logs[0].l1BatchNumber).toEqual(receipt.l1BatchNumber); - expect(logs[0].l1BatchNumber).toEqual(receipt.l1BatchNumber); - expect(block.l1BatchNumber).toEqual(receipt.l1BatchNumber); + expect(receipt!.l1BatchNumber).toEqual(expect.anything()); // Can be anything except `null` or `undefined`. + expect(receipt!.l1BatchTxIndex).toEqual(expect.anything()); // Can be anything except `null` or `undefined`. + expect(receipt!.logs[0].l1BatchNumber).toEqual(receipt!.l1BatchNumber); + expect(logs[0].l1BatchNumber).toEqual(receipt!.l1BatchNumber); + expect(block.l1BatchNumber).toEqual(receipt!.l1BatchNumber); expect(block.l1BatchTimestamp).toEqual(expect.anything()); - expect(blockWithTransactions.l1BatchNumber).toEqual(receipt.l1BatchNumber); + expect(blockWithTransactions.l1BatchNumber).toEqual(receipt!.l1BatchNumber); expect(blockWithTransactions.l1BatchTimestamp).toEqual(expect.anything()); - blockWithTransactions.transactions.forEach((txInBlock, _) => { - expect(txInBlock.l1BatchNumber).toEqual(expect.anything()); // Can be anything except `null` or `undefined`. - expect(txInBlock.l1BatchTxIndex).toEqual(expect.anything()); // Can be anything except `null` or `undefined`. - expect(txInBlock.chainId).toEqual(testMaster.environment().l2ChainId); - expect([0, EIP712_TX_TYPE, PRIORITY_OPERATION_L2_TX_TYPE, EIP1559_TX_TYPE]).toContain(txInBlock.type); - }); + for (const tx of blockWithTransactions.prefetchedTransactions) { + expect(tx.l1BatchNumber).toEqual(expect.anything()); // Can be anything except `null` or `undefined`. + expect(tx.l1BatchTxIndex).toEqual(expect.anything()); // Can be anything except `null` or `undefined`. + expect(tx.chainId).toEqual(chainId); + expect([0, EIP712_TX_TYPE, PRIORITY_OPERATION_L2_TX_TYPE, EIP1559_TX_TYPE]).toContain(tx.type); + } }); test('Should check transactions from API / Legacy tx', async () => { @@ -228,7 +241,7 @@ describe('web3 API compatibility tests', () => { await legacyTx.wait(); const legacyApiReceipt = await alice.provider.getTransaction(legacyTx.hash); - expect(legacyApiReceipt.gasPrice).bnToBeLte(legacyTx.gasPrice!); + expect(legacyApiReceipt.gasPrice).toBeLessThanOrEqual(legacyTx.gasPrice!); }); test('Should check transactions from API / EIP1559 tx', async () => { @@ -240,8 +253,8 @@ describe('web3 API compatibility tests', () => { await eip1559Tx.wait(); const eip1559ApiReceipt = await alice.provider.getTransaction(eip1559Tx.hash); - expect(eip1559ApiReceipt.maxFeePerGas).bnToBeEq(eip1559Tx.maxFeePerGas!); - expect(eip1559ApiReceipt.maxPriorityFeePerGas).bnToBeEq(eip1559Tx.maxPriorityFeePerGas!); + expect(eip1559ApiReceipt.maxFeePerGas).toEqual(eip1559Tx.maxFeePerGas!); + expect(eip1559ApiReceipt.maxPriorityFeePerGas).toEqual(eip1559Tx.maxPriorityFeePerGas!); }); test('Should test getFilterChanges for pending transactions', async () => { @@ -296,7 +309,7 @@ describe('web3 API compatibility tests', () => { test('Should test pub-sub API: blocks', async () => { // Checks that we can receive an event for new block being created. - let wsProvider = new ethers.providers.WebSocketProvider(testMaster.environment().wsL2NodeUrl); + let wsProvider = new ethers.WebSocketProvider(testMaster.environment().wsL2NodeUrl); let newBlock: number | null = null; const currentBlock = await alice._providerL2().getBlockNumber(); @@ -331,7 +344,7 @@ describe('web3 API compatibility tests', () => { test('Should test pub-sub API: txs', async () => { // Checks that we can receive an event for new pending transactions. - let wsProvider = new ethers.providers.WebSocketProvider(testMaster.environment().wsL2NodeUrl); + let wsProvider = new ethers.WebSocketProvider(testMaster.environment().wsL2NodeUrl); // We're sending a few transfers from the wallet, so we'll use a new account to make event unique. let uniqueRecipient = testMaster.newEmptyAccount().address; @@ -352,7 +365,7 @@ describe('web3 API compatibility tests', () => { const tx = await alice.transfer({ to: uniqueRecipient, amount: 1, - token: zksync.utils.ETH_ADDRESS // With ERC20 "to" would be an address of the contract. + token: zksync.utils.L2_BASE_TOKEN_ADDRESS // With ERC20 "to" would be an address of the contract. }); let iterationsCount = 0; @@ -368,20 +381,20 @@ describe('web3 API compatibility tests', () => { test('Should test pub-sub API: events', async () => { // Checks that we can receive an event for events matching a certain filter. - let wsProvider = new ethers.providers.WebSocketProvider(testMaster.environment().wsL2NodeUrl); + let wsProvider = new ethers.WebSocketProvider(testMaster.environment().wsL2NodeUrl); let newEvent: Event | null = null; // We're sending a few transfers from the wallet, so we'll use a new account to make event unique. let uniqueRecipient = testMaster.newEmptyAccount().address; // Setup a filter for an ERC20 transfer. - const erc20TransferTopic = ethers.utils.id('Transfer(address,address,uint256)'); + const erc20TransferTopic = ethers.id('Transfer(address,address,uint256)'); let filter = { address: l2Token, topics: [ erc20TransferTopic, - ethers.utils.hexZeroPad(alice.address, 32), // Filter only transfers from this wallet., - ethers.utils.hexZeroPad(uniqueRecipient, 32) // Recipient + ethers.zeroPadValue(alice.address, 32), // Filter only transfers from this wallet., + ethers.zeroPadValue(uniqueRecipient, 32) // Recipient ] }; wsProvider.once(filter, (event) => { @@ -407,7 +420,7 @@ describe('web3 API compatibility tests', () => { await tryWait(iterationsCount++); } - expect((newEvent as any as Event).transactionHash).toEqual(tx.hash); + expect((newEvent as any).transactionHash).toEqual(tx.hash); await tx.wait(); // To not leave a hanging promise. wsProvider.removeAllListeners(); await wsProvider.destroy(); @@ -417,7 +430,7 @@ describe('web3 API compatibility tests', () => { const amount = 1; const token = l2Token; - const randomHash = ethers.utils.hexlify(ethers.utils.randomBytes(32)); + const randomHash = ethers.hexlify(ethers.randomBytes(32)); let status = await alice.provider.getTransactionStatus(randomHash); expect(status).toEqual(types.TransactionStatus.NotFound); @@ -453,7 +466,7 @@ describe('web3 API compatibility tests', () => { const amount = 1; const token = l2Token; - const randomHash = ethers.utils.hexlify(ethers.utils.randomBytes(32)); + const randomHash = ethers.hexlify(ethers.randomBytes(32)); let details = await alice.provider.getTransactionDetails(randomHash); expect(details).toEqual(null); @@ -476,14 +489,14 @@ describe('web3 API compatibility tests', () => { const receipt = await sentTx.wait(); expectedDetails.status = expect.stringMatching(/failed|included|verified/); - details = await alice.provider.getTransactionDetails(receipt.transactionHash); + details = await alice.provider.getTransactionDetails(receipt.hash); expect(details).toMatchObject(expectedDetails); if (!testMaster.isFastMode()) { // It's not worth it to wait for finalization in the API test. // If it works on localhost, it *must* work elsewhere. await sentTx.waitFinalize(); - details = await alice.provider.getTransactionDetails(receipt.transactionHash); + details = await alice.provider.getTransactionDetails(receipt.hash); expectedDetails.status = expect.stringMatching(/verified/); expect(details).toMatchObject(expectedDetails); } @@ -502,7 +515,7 @@ describe('web3 API compatibility tests', () => { }); const receipt = await sentTx.wait(); - let details = await alice.provider.getTransactionDetails(receipt.transactionHash); + let details = await alice.provider.getTransactionDetails(receipt.hash); let expectedDetails = { fee: expect.stringMatching(HEX_VALUE_REGEX), @@ -523,18 +536,18 @@ describe('web3 API compatibility tests', () => { const [from, to] = range!; for (let i = from; i <= to; i++) { - const block = await alice.provider.getBlockWithTransactions(i); + const block = await alice.provider.getBlock(i, true); expect(block.l1BatchNumber).toEqual(l1BatchNumber); expect(block.l1BatchTimestamp).toEqual(expect.anything()); expect(block.number).toEqual(i); - for (let tx of block.transactions) { + for (let tx of block.prefetchedTransactions) { expect(tx.blockNumber).toEqual(i); const receipt = await alice.provider.getTransactionReceipt(tx.hash); - expect(receipt.l1BatchNumber).toEqual(l1BatchNumber); + expect(receipt!.l1BatchNumber).toEqual(l1BatchNumber); } } - const prevBlock = await alice.provider.getBlockWithTransactions(from - 1); + const prevBlock = await alice.provider.getBlock(from - 1, true); expect(prevBlock.l1BatchNumber).toEqual(l1BatchNumber - 1); const nextBlock = await alice.provider.getBlock(to + 1); @@ -545,12 +558,12 @@ describe('web3 API compatibility tests', () => { // subscribe for events and then send transactions. However, this test // sometimes fails because one of the events was not received. Probably, there is // some problem in the pub-sub API that should be found & fixed. - test.skip('Should listen for human-readable events', async () => { + test('Should listen for human-readable events', async () => { const contract = await deployContract(alice, contracts.events, []); const blockNumber = await alice.provider.getBlockNumber(); - const deadbeef = ethers.utils.hexZeroPad('0xdeadbeef', 20); - const c0ffee = ethers.utils.hexZeroPad('0xc0ffee', 20); + const deadbeef = ethers.zeroPadValue('0xdeadbeef', 20); + const c0ffee = ethers.zeroPadValue('0xc0ffee', 20); const emitted = { trivial: 0, simple: 0, @@ -558,17 +571,18 @@ describe('web3 API compatibility tests', () => { }; contract.connect(alice); - contract - .on(contract.filters.Trivial(), () => ++emitted.trivial) - .on(contract.filters.Simple(), (_number: any, address: any) => { + ( + await ( + await contract.on(contract.filters.Trivial(), () => ++emitted.trivial) + ).on(contract.filters.Simple(), (_number: any, address: any) => { ++emitted.simple; expect(address.toLowerCase()).toEqual(deadbeef); }) - .on(contract.filters.Indexed(42), (number: any, address: any) => { - ++emitted.indexed; - expect(number.toNumber()).toEqual(42); - expect(address.toLowerCase()).toEqual(c0ffee); - }); + ).on(contract.filters.Indexed(42), (number: any, address: any) => { + ++emitted.indexed; + expect(number.toNumber()).toEqual(42); + expect(address.toLowerCase()).toEqual(c0ffee); + }); let tx = await contract.test(42); await tx.wait(); @@ -609,10 +623,10 @@ describe('web3 API compatibility tests', () => { test('Should check metamask interoperability', async () => { // Prepare "metamask" wallet. - const from = new MockMetamask(alice, testMaster.environment().l2ChainId); + const from = new MockMetamask(alice, chainId); const to = alice.address; - const web3Provider = new zksync.Web3Provider(from); - const signer = zksync.Signer.from(web3Provider.getSigner(), alice.provider); + const browserProvider = new zksync.BrowserProvider(from); + const signer = zksync.Signer.from(await browserProvider.getSigner(), Number(chainId), alice.provider); // Check to ensure that tx was correctly processed. const feeCheck = await shouldOnlyTakeFee(alice); @@ -666,19 +680,28 @@ describe('web3 API compatibility tests', () => { // There are around `0.5 * maxLogsLimit` logs in [tx1Receipt.blockNumber, tx1Receipt.blockNumber] range, // so query with such filter should succeed. - await expect(alice.provider.getLogs({ fromBlock: tx1Receipt.blockNumber, toBlock: tx1Receipt.blockNumber })) - .resolves; + await expect( + alice.provider.getLogs({ + fromBlock: tx1Receipt.blockNumber, + toBlock: tx1Receipt.blockNumber + }) + ).resolves; // There are at least `1.5 * maxLogsLimit` logs in [tx1Receipt.blockNumber, tx3Receipt.blockNumber] range, // so query with such filter should fail. await expect( - alice.provider.getLogs({ fromBlock: tx1Receipt.blockNumber, toBlock: tx3Receipt.blockNumber }) + alice.provider.getLogs({ + fromBlock: tx1Receipt.blockNumber, + toBlock: tx3Receipt.blockNumber + }) ).rejects.toThrow(`Query returned more than ${maxLogsLimit} results.`); }); test('Should throw error for estimate gas for account with balance < tx.value', async () => { let poorBob = testMaster.newEmptyAccount(); - expect(poorBob.estimateGas({ value: 1, to: alice.address })).toBeRejected('insufficient balance for transfer'); + expect( + poorBob.estimateGas({ value: 1, to: alice.address }) + ).toBeRejected(/*'insufficient balance for transfer'*/); }); test('Should check API returns correct block for every tag', async () => { @@ -690,6 +713,8 @@ describe('web3 API compatibility tests', () => { expect(+finalizedBlock.number!).toEqual(expect.any(Number)); const latestBlock = await alice.provider.send('eth_getBlockByNumber', ['latest', true]); expect(+latestBlock.number!).toEqual(expect.any(Number)); + const l1CommittedBlock = await alice.provider.send('eth_getBlockByNumber', ['l1_committed', true]); + expect(+l1CommittedBlock.number!).toEqual(expect.any(Number)); const pendingBlock = await alice.provider.send('eth_getBlockByNumber', ['pending', true]); expect(pendingBlock).toEqual(null); }); @@ -698,12 +723,12 @@ describe('web3 API compatibility tests', () => { const gasPrice = await alice.provider.getGasPrice(); const chainId = (await alice.provider.getNetwork()).chainId; const address = zksync.Wallet.createRandom().address; - const senderNonce = await alice.getTransactionCount(); - const tx: ethers.providers.TransactionRequest = { + const senderNonce = await alice.getNonce(); + const tx: ethers.TransactionRequest = { to: address, from: alice.address, nonce: senderNonce, - gasLimit: ethers.BigNumber.from(300000), + gasLimit: 300000n, gasPrice, data: '0x', value: 0, @@ -732,14 +757,40 @@ describe('web3 API compatibility tests', () => { address: l2Token, topics: [ '0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef', - ethers.utils.hexZeroPad(alice.address, 32), - ethers.utils.hexZeroPad(uniqueRecipient, 32) + ethers.zeroPadValue(alice.address, 32), + ethers.zeroPadValue(uniqueRecipient, 32) ] }); expect(logs).toHaveLength(1); expect(logs[0].transactionHash).toEqual(tx.hash); }); + test('Should check getLogs returns block_timestamp', async () => { + // We're sending a transfer from the wallet, so we'll use a new account to make event unique. + let uniqueRecipient = testMaster.newEmptyAccount().address; + const tx = await alice.transfer({ + to: uniqueRecipient, + amount: 1, + token: l2Token + }); + const receipt = await tx.wait(); + const response = await alice.provider.send('eth_getLogs', [ + { + fromBlock: ethers.toBeHex(receipt.blockNumber), + toBlock: ethers.toBeHex(receipt.blockNumber), + address: l2Token, + topics: [ + '0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef', + ethers.zeroPadValue(alice.address, 32), + ethers.zeroPadValue(uniqueRecipient, 32) + ] + } + ]); + expect(response).toHaveLength(1); + // TODO: switch to provider.getLogs once blockTimestamp is added to zksync ethers.js + expect(response[0].blockTimestamp).toBeDefined(); + }); + test('Should check getLogs endpoint works properly with block tags', async () => { const earliestLogs = alice.provider.send('eth_getLogs', [ { @@ -787,16 +838,10 @@ describe('web3 API compatibility tests', () => { toBlock: latestBlock.number }) ).map((x) => { - x.l1BatchNumber = 0; // Set bogus value. - return x; + return new zksync.types.Log({ ...x, l1BatchNumber: 0 }, alice.provider); // Set bogus value. }); - const getLogsByHash = ( - await alice.provider.getLogs({ - blockHash: latestBlock.hash - }) - ).map((x) => { - x.l1BatchNumber = 0; // Set bogus value. - return x; + const getLogsByHash = (await alice.provider.getLogs({ blockHash: latestBlock.hash || undefined })).map((x) => { + return new zksync.types.Log({ ...x, l1BatchNumber: 0 }, alice.provider); // Set bogus value. }); await expect(getLogsByNumber).toEqual(getLogsByHash); @@ -805,25 +850,21 @@ describe('web3 API compatibility tests', () => { alice.provider.getLogs({ fromBlock: latestBlock.number, toBlock: latestBlock.number, - blockHash: latestBlock.hash + blockHash: latestBlock.hash || undefined }) - ).rejects.toThrow(`invalid filter: if blockHash is supplied fromBlock and toBlock must not be`); + ).rejects.toThrow(`invalid filter`); }); test('Should check eth_feeHistory', async () => { const receipt = await anyTransaction(alice); - const response = await alice.provider.send('eth_feeHistory', [ - '0x2', - ethers.utils.hexlify(receipt.blockNumber), - [] - ]); + const response = await alice.provider.send('eth_feeHistory', ['0x2', ethers.toBeHex(receipt.blockNumber), []]); - expect(ethers.BigNumber.from(response.oldestBlock).toNumber()).toEqual(receipt.blockNumber - 1); + expect(parseInt(response.oldestBlock)).toEqual(receipt.blockNumber - 1); expect(response.baseFeePerGas).toHaveLength(3); for (let i = 0; i < 2; i += 1) { const expectedBaseFee = (await alice.provider.getBlock(receipt.blockNumber - 1 + i)).baseFeePerGas; - expect(ethers.BigNumber.from(response.baseFeePerGas[i])).toEqual(expectedBaseFee); + expect(BigInt(response.baseFeePerGas[i])).toEqual(expectedBaseFee); } }); @@ -857,83 +898,312 @@ describe('web3 API compatibility tests', () => { expect(exactProtocolVersion).toMatchObject(expectedProtocolVersion); }); - test('Should check transaction signature', async () => { - const CHAIN_ID = testMaster.environment().l2ChainId; + test('Should check transaction signature for legacy transaction type', async () => { const value = 1; const gasLimit = 350000; const gasPrice = await alice.provider.getGasPrice(); const data = '0x'; const to = alice.address; - let tx_handle; - let txFromApi; - let signerAddr; - - // check for legacy transaction type const LEGACY_TX_TYPE = 0; const legacyTxReq = { type: LEGACY_TX_TYPE, to, value, - chainId: CHAIN_ID, + chainId, gasLimit, gasPrice, data, - nonce: await alice.getTransactionCount() + nonce: await alice.getNonce() }; const signedLegacyTx = await alice.signTransaction(legacyTxReq); - tx_handle = await alice.provider.sendTransaction(signedLegacyTx); + const tx_handle = await alice.provider.broadcastTransaction(signedLegacyTx); await tx_handle.wait(); - txFromApi = await alice.provider.getTransaction(tx_handle.hash); + const txFromApi = await alice.provider.getTransaction(tx_handle.hash); - const serializedLegacyTxReq = ethers.utils.serializeTransaction(legacyTxReq); + const serializedLegacyTxReq = ethers.Transaction.from(legacyTxReq).unsignedSerialized; // check that API returns correct signature values for the given transaction // by invoking recoverAddress() method with the serialized transaction and signature values - signerAddr = ethers.utils.recoverAddress(ethers.utils.keccak256(serializedLegacyTxReq), { - r: txFromApi.r!, - s: txFromApi.s!, - v: txFromApi.v! - }); + const signerAddr = ethers.recoverAddress(ethers.keccak256(serializedLegacyTxReq), txFromApi.signature); expect(signerAddr).toEqual(alice.address); - const expectedV = 35 + CHAIN_ID! * 2; - expect(Math.abs(txFromApi.v! - expectedV) <= 1).toEqual(true); + const expectedV = 35n + BigInt(chainId) * 2n; + const actualV = ethers.Signature.getChainIdV(chainId, txFromApi.signature.v); + expect(actualV === expectedV); + }); + + test('Should check transaction signature for EIP1559 transaction type', async () => { + const value = 1; + const gasLimit = 350000; + const gasPrice = await alice.provider.getGasPrice(); + const data = '0x'; + const to = alice.address; - // check for EIP1559 transaction type const EIP1559_TX_TYPE = 2; const eip1559TxReq = { type: EIP1559_TX_TYPE, to, value, - chainId: CHAIN_ID, + chainId, gasLimit, data, - nonce: await alice.getTransactionCount(), + nonce: await alice.getNonce(), maxFeePerGas: gasPrice, maxPriorityFeePerGas: gasPrice }; const signedEip1559TxReq = await alice.signTransaction(eip1559TxReq); - tx_handle = await alice.provider.sendTransaction(signedEip1559TxReq); + const tx_handle = await alice.provider.broadcastTransaction(signedEip1559TxReq); await tx_handle.wait(); - txFromApi = await alice.provider.getTransaction(tx_handle.hash); + const txFromApi = await alice.provider.getTransaction(tx_handle.hash); - const serializedEip1559TxReq = ethers.utils.serializeTransaction(eip1559TxReq); + const serializedEip1559TxReq = ethers.Transaction.from(eip1559TxReq).unsignedSerialized; // check that API returns correct signature values for the given transaction // by invoking recoverAddress() method with the serialized transaction and signature values - signerAddr = ethers.utils.recoverAddress(ethers.utils.keccak256(serializedEip1559TxReq), { - r: txFromApi.r!, - s: txFromApi.s!, - v: txFromApi.v! - }); + const signerAddr = ethers.recoverAddress(ethers.keccak256(serializedEip1559TxReq), txFromApi.signature); expect(signerAddr).toEqual(alice.address); - expect(txFromApi.v! <= 1).toEqual(true); + expect(txFromApi.signature.v! === 27 || 28); }); + describe('Storage override', () => { + test('Should be able to estimate_gas overriding the balance of the sender', async () => { + const balance = await alice.getBalance(); + const amount = balance + 1n; + + // Expect the transaction to be reverted without the overridden balance + await expect( + alice.provider.estimateGas({ + from: alice.address, + to: alice.address, + value: amount.toString() + }) + ).toBeRejected(); + + // Call estimate_gas overriding the balance of the sender using the eth_estimateGas endpoint + const response = await alice.provider.send('eth_estimateGas', [ + { + from: alice.address, + to: alice.address, + value: amount.toString() + }, + 'latest', + //override with the balance needed to send the transaction + { + [alice.address]: { + balance: amount.toString() + } + } + ]); + + // Assert that the response is successful + expect(response).toEqual(expect.stringMatching(HEX_VALUE_REGEX)); + }); + test('Should be able to estimate_gas overriding contract code', async () => { + // Deploy the first contract + const contract1 = await deployContract(alice, contracts.events, []); + const contract1Address = await contract1.getAddress(); + + // Deploy the second contract to extract the code that we are overriding the estimation with + const contract2 = await deployContract(alice, contracts.counter, []); + const contract2Address = await contract2.getAddress(); + + // Get the code of contract2 + const code = await alice.provider.getCode(contract2Address); + + // Get the calldata of the increment function of contract2 + const incrementFunctionData = contract2.interface.encodeFunctionData('increment', [1]); + + // Assert that the estimation fails because the increment function is not present in contract1 + expect( + alice.provider.estimateGas({ + to: contract1Address.toString(), + data: incrementFunctionData + }) + ).toBeRejected(); + + // Call estimate_gas overriding the code of contract1 with the code of contract2 using the eth_estimateGas endpoint + const response = await alice.provider.send('eth_estimateGas', [ + { + from: alice.address, + to: contract1Address.toString(), + data: incrementFunctionData + }, + 'latest', + { [contract1Address.toString()]: { code: code } } + ]); + + // Assert that the response is successful + expect(response).toEqual(expect.stringMatching(HEX_VALUE_REGEX)); + }); + + test('Should estimate gas by overriding state with State', async () => { + const contract = await deployContract(alice, contracts.stateOverride, []); + const contractAddress = await contract.getAddress(); + + const sumValuesFunctionData = contract.interface.encodeFunctionData('sumValues', []); + + // Ensure that the initial gas estimation fails due to contract requirements + await expect( + alice.provider.estimateGas({ + to: contractAddress.toString(), + data: sumValuesFunctionData + }) + ).toBeRejected(); + + // Override the entire contract state using State + const state = { + [contractAddress.toString()]: { + state: { + '0x0000000000000000000000000000000000000000000000000000000000000000': + '0x0000000000000000000000000000000000000000000000000000000000000001', + '0x0000000000000000000000000000000000000000000000000000000000000001': + '0x0000000000000000000000000000000000000000000000000000000000000002' + } + } + }; + + const response = await alice.provider.send('eth_estimateGas', [ + { + from: alice.address, + to: contractAddress.toString(), + data: sumValuesFunctionData + }, + 'latest', + state + ]); + + expect(response).toEqual(expect.stringMatching(HEX_VALUE_REGEX)); + }); + + test('Should estimate gas by overriding state with StateDiff', async () => { + const contract = await deployContract(alice, contracts.stateOverride, []); + const contractAddress = await contract.getAddress(); + const incrementFunctionData = contract.interface.encodeFunctionData('increment', [1]); + + // Ensure that the initial gas estimation fails due to contract requirements + await expect( + alice.provider.estimateGas({ + to: contractAddress.toString(), + data: incrementFunctionData + }) + ).toBeRejected(); + + // Override the contract state using StateDiff + const stateDiff = { + [contractAddress.toString()]: { + stateDiff: { + '0x0000000000000000000000000000000000000000000000000000000000000000': + '0x0000000000000000000000000000000000000000000000000000000000000001' + } + } + }; + + const response = await alice.provider.send('eth_estimateGas', [ + { + from: alice.address, + to: contractAddress.toString(), + data: incrementFunctionData + }, + 'latest', + stateDiff + ]); + + expect(response).toEqual(expect.stringMatching(HEX_VALUE_REGEX)); + }); + + test('Should call and succeed with overriding state with State', async () => { + const contract = await deployContract(alice, contracts.stateOverride, []); + const contractAddress = await contract.getAddress(); + const sumValuesFunctionData = contract.interface.encodeFunctionData('sumValues', []); + + // Ensure that the initial call fails due to contract requirements + await alice.provider + .call({ + to: contractAddress.toString(), + data: sumValuesFunctionData + }) + .catch((error) => { + const errorString = 'Initial state not set'; + expect(error.message).toContain(errorString); + }); + + // Override the contract state using State + const state = { + [contractAddress.toString()]: { + state: { + '0x0000000000000000000000000000000000000000000000000000000000000000': + '0x0000000000000000000000000000000000000000000000000000000000000001', + '0x0000000000000000000000000000000000000000000000000000000000000001': + '0x0000000000000000000000000000000000000000000000000000000000000002' + } + } + }; + + const response = await alice.provider.send('eth_call', [ + { + from: alice.address, + to: contractAddress.toString(), + data: sumValuesFunctionData + }, + 'latest', + state + ]); + + // The state replace the entire state of the contract, so the sum now would be + // 1 (0x1) + 2 (0x2) = 3 (0x3) + expect(response).toEqual('0x0000000000000000000000000000000000000000000000000000000000000003'); + }); + + test('Should call and succeed with overriding state with StateDiff', async () => { + const contract = await deployContract(alice, contracts.stateOverride, []); + const contractAddress = await contract.getAddress(); + const sumValuesFunctionData = contract.interface.encodeFunctionData('sumValues', []); + + // Ensure that the initial call fails due to contract requirements + await alice.provider + .call({ + to: contractAddress.toString(), + data: sumValuesFunctionData + }) + .catch((error) => { + const errorString = 'Initial state not set'; + expect(error.message).toContain(errorString); + }); + + // Override the contract state using State + const stateDiff = { + [contractAddress.toString()]: { + stateDiff: { + '0x0000000000000000000000000000000000000000000000000000000000000000': + '0x0000000000000000000000000000000000000000000000000000000000000001', + '0x0000000000000000000000000000000000000000000000000000000000000001': + '0x0000000000000000000000000000000000000000000000000000000000000002' + } + } + }; + + const response = await alice.provider.send('eth_call', [ + { + from: alice.address, + to: contractAddress.toString(), + data: sumValuesFunctionData + }, + 'latest', + stateDiff + ]); + + // The stateDiff only changes the specific slots provided in the override. + // The initial value of the storage slot at key 0x2 remains unchanged, which is 100 (0x64 in hex). + // Therefore, the sum of the values at the three storage slots is: + // 1 (0x1) + 2 (0x2) + 100 (0x64) = 103 (0x67 in hex). + // This is why the expected response is 0x67. + expect(response).toEqual('0x0000000000000000000000000000000000000000000000000000000000000067'); + }); + }); // We want to be sure that correct(outer) contract address is return in the transaction receipt, // when there is a contract that initializa another contract in the constructor test('Should check inner-outer contract address in the receipt of the deploy tx', async () => { @@ -949,11 +1219,11 @@ describe('web3 API compatibility tests', () => { } }; const outerContract = await deployContract(alice, contracts.outer, [1], undefined, outerContractOverrides); - let receipt = await outerContract.deployTransaction.wait(); + const contract = await outerContract.waitForDeployment(); - const deployedBytecode = await alice.provider.getCode(receipt.contractAddress); + const deployedBytecode = await alice.provider.getCode(await contract.getAddress()); - expect(expectedAddress).toEqual(receipt.contractAddress); + expect(expectedAddress).toEqual(await contract.getAddress()); expect(expectedBytecode).toEqual(deployedBytecode); }); @@ -980,8 +1250,8 @@ export class MockMetamask { readonly isMetaMask: boolean = true; readonly chainId: string; - constructor(readonly wallet: zksync.Wallet, readonly networkVersion: number) { - this.chainId = ethers.utils.hexlify(networkVersion); + constructor(readonly wallet: zksync.Wallet, readonly networkVersion: bigint) { + this.chainId = ethers.toBeHex(networkVersion); } // EIP-1193 @@ -1004,19 +1274,18 @@ export class MockMetamask { delete tx.gas; let populated = { ...(await this.wallet.populateTransaction(tx)), - nonce: await this.wallet.getTransactionCount() + nonce: await this.wallet.getNonce() }; delete populated.from; - const signature = this.wallet._signingKey().signDigest(ethers.utils.keccak256(serialize(populated))); - const signed = serialize(populated, signature); - const response = await this.wallet.provider.sendTransaction(signed); + const signed = await this.wallet.signTransaction(populated); + const response = await this.wallet.provider.broadcastTransaction(signed); return response.hash; case 'eth_getTransactionCount': - return this.wallet.getTransactionCount(); + return this.wallet.getNonce(); case 'eth_signTypedData_v4': let payload = JSON.parse(params[1]); delete payload.types.EIP712Domain; - return this.wallet._signTypedData(payload.domain, payload.types, payload.message); + return this.wallet.signTypedData(payload.domain, payload.types, payload.message); default: // unfortunately though, metamask does not forward methods from zks_ namespace if (method.startsWith('zks')) { diff --git a/core/tests/ts-integration/tests/base-token.test.ts b/core/tests/ts-integration/tests/base-token.test.ts index 00c7196ea854..51d88f7dd52a 100644 --- a/core/tests/ts-integration/tests/base-token.test.ts +++ b/core/tests/ts-integration/tests/base-token.test.ts @@ -2,11 +2,11 @@ * This suite contains tests checking default ERC-20 contract behavior. */ -import { TestMaster } from '../src/index'; +import { TestMaster } from '../src'; import { Token } from '../src/types'; import * as zksync from 'zksync-ethers'; -import { BigNumber, utils as etherUtils } from 'ethers'; +import * as ethers from 'ethers'; import { scaledGasPrice } from '../src/helpers'; describe('base ERC20 contract checks', () => { @@ -27,8 +27,8 @@ describe('base ERC20 contract checks', () => { }); test('Can perform a deposit', async () => { - const amount = 1; // 1 wei is enough. - const gasPrice = scaledGasPrice(alice); + const amount = 1n; // 1 wei is enough. + const gasPrice = await scaledGasPrice(alice); const initialEthBalance = await alice.getBalanceL1(); const initialL1Balance = await alice.getBalanceL1(baseTokenDetails.l1Address); @@ -53,23 +53,26 @@ describe('base ERC20 contract checks', () => { await depositTx.wait(); const receipt = await alice._providerL1().getTransactionReceipt(depositHash); - const fee = receipt.effectiveGasPrice.mul(receipt.gasUsed); + if (!receipt) { + throw new Error('No receipt for deposit'); + } + const fee = receipt.gasPrice * receipt.gasUsed; // TODO: should all the following tests use strict equality? const finalEthBalance = await alice.getBalanceL1(); - expect(initialEthBalance).bnToBeGt(finalEthBalance.add(fee)); // Fee should be taken from the ETH balance on L1. + expect(initialEthBalance).toBeGreaterThan(finalEthBalance + fee); // Fee should be taken from the ETH balance on L1. const finalL1Balance = await alice.getBalanceL1(baseTokenDetails.l1Address); - expect(initialL1Balance).bnToBeGte(finalL1Balance.add(amount)); + expect(initialL1Balance).toBeGreaterThanOrEqual(finalL1Balance + amount); const finalL2Balance = await alice.getBalance(); - expect(initialL2Balance).bnToBeLte(finalL2Balance.add(amount)); + expect(initialL2Balance).toBeLessThanOrEqual(finalL2Balance + amount); }); test('Not enough balance should revert', async () => { - const amount = BigNumber.from('0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffff'); - const gasPrice = scaledGasPrice(alice); + const amount = BigInt('0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffff'); + const gasPrice = await scaledGasPrice(alice); let errorMessage; await expect( @@ -92,7 +95,7 @@ describe('base ERC20 contract checks', () => { }); test('Can perform a transfer to self', async () => { - const amount = BigNumber.from(200); + const amount = 200n; const initialAliceBalance = await alice.getBalance(); @@ -107,14 +110,14 @@ describe('base ERC20 contract checks', () => { await transferTx.waitFinalize(); const receipt = await alice._providerL2().getTransactionReceipt(transferTx.hash); - const fee = receipt.effectiveGasPrice.mul(receipt.gasUsed); + const fee = receipt!.gasPrice * receipt!.gasUsed; const finalAliceBalance = await alice.getBalance(); - expect(initialAliceBalance.sub(fee)).bnToBeEq(finalAliceBalance); + expect(initialAliceBalance - fee).toEqual(finalAliceBalance); }); test('Incorrect transfer should revert', async () => { - const amount = etherUtils.parseEther('1000000.0'); + const amount = ethers.parseEther('1000000.0'); const initialAliceBalance = await alice.getBalance(); const initialBobBalance = await bob.getBalance(); @@ -131,15 +134,15 @@ describe('base ERC20 contract checks', () => { const finalAliceBalance = await alice.getBalance(); const finalBobBalance = await bob.getBalance(); - await expect(finalAliceBalance).bnToBeEq(initialAliceBalance); - await expect(finalBobBalance).bnToBeEq(initialBobBalance); + await expect(finalAliceBalance).toEqual(initialAliceBalance); + await expect(finalBobBalance).toEqual(initialBobBalance); }); test('Can perform a withdrawal', async () => { if (testMaster.isFastMode() || isETHBasedChain) { return; } - const amount = 1; + const amount = 1n; const initialL1Balance = await alice.getBalanceL1(baseTokenDetails.l1Address); const initialL2Balance = await alice.getBalance(); @@ -151,13 +154,13 @@ describe('base ERC20 contract checks', () => { await expect(alice.finalizeWithdrawal(withdrawalTx.hash)).toBeAccepted([]); const receipt = await alice._providerL2().getTransactionReceipt(withdrawalTx.hash); - const fee = receipt.effectiveGasPrice.mul(receipt.gasUsed); + const fee = receipt!.gasPrice * receipt!.gasUsed; const finalL1Balance = await alice.getBalanceL1(baseTokenDetails.l1Address); const finalL2Balance = await alice.getBalance(); - expect(finalL1Balance).bnToBeEq(initialL1Balance.add(amount)); - expect(finalL2Balance.add(amount).add(fee)).bnToBeEq(initialL2Balance); + expect(finalL1Balance).toEqual(initialL1Balance + amount); + expect(finalL2Balance + amount + fee).toEqual(initialL2Balance); }); afterAll(async () => { diff --git a/core/tests/ts-integration/tests/contracts.test.ts b/core/tests/ts-integration/tests/contracts.test.ts index 08fef555af0c..5ac87b71b684 100644 --- a/core/tests/ts-integration/tests/contracts.test.ts +++ b/core/tests/ts-integration/tests/contracts.test.ts @@ -6,13 +6,12 @@ * Let's try to keep only relatively simple and self-contained tests here. */ -import { TestMaster } from '../src/index'; +import { TestMaster } from '../src'; import { deployContract, getTestContract, waitForNewL1Batch } from '../src/helpers'; import { shouldOnlyTakeFee } from '../src/modifiers/balance-checker'; import * as ethers from 'ethers'; import * as zksync from 'zksync-ethers'; -import { Provider } from 'zksync-ethers'; import * as elliptic from 'elliptic'; import { RetryProvider } from '../src/retry-provider'; @@ -47,27 +46,27 @@ describe('Smart contract behavior checks', () => { const feeCheck = await shouldOnlyTakeFee(alice); // Change the storage slot and ensure it actually changes. - expect(counterContract.get()).resolves.bnToBeEq(0); + expect(counterContract.get()).resolves.toEqual(0n); await expect(counterContract.increment(42)).toBeAccepted([feeCheck]); - expect(counterContract.get()).resolves.bnToBeEq(42); + expect(counterContract.get()).resolves.toEqual(42n); }); test('Should deploy contract with a constructor', async () => { const contract1 = await deployContract(alice, contracts.constructor, [2, 3, false]); - await expect(contract1.get()).resolves.bnToBeEq(2 * 3); + await expect(contract1.get()).resolves.toEqual(2n * 3n); const contract2 = await deployContract(alice, contracts.constructor, [5, 10, false]); - await expect(contract2.get()).resolves.bnToBeEq(5 * 10); + await expect(contract2.get()).resolves.toEqual(5n * 10n); }); test('Should deploy contract with create', async () => { const contractFactory = new zksync.ContractFactory(contracts.create.abi, contracts.create.bytecode, alice); - const contract = await contractFactory.deploy({ + const contract = (await contractFactory.deploy({ customData: { factoryDeps: [contracts.create.factoryDep] } - }); - await contract.deployed(); + })) as zksync.Contract; + await contract.waitForDeployment(); await expect(contract.getFooName()).resolves.toBe('Foo'); }); @@ -80,7 +79,7 @@ describe('Smart contract behavior checks', () => { // Second, check that processable transaction may fail with "out of gas" error. // To do so, we estimate gas for arg "1" and supply it to arg "20". // This guarantees that transaction won't fail during verification. - const lowGasLimit = await expensiveContract.estimateGas.expensive(1); + const lowGasLimit = await expensiveContract.expensive.estimateGas(1); await expect( expensiveContract.expensive(20, { gasLimit: lowGasLimit @@ -114,42 +113,66 @@ describe('Smart contract behavior checks', () => { // The tx has been reverted, so the value Should not have been changed: const newValue = await counterContract.get(); - expect(newValue).bnToBeEq(prevValue, 'The counter has changed despite the revert'); + expect(newValue).toEqual(prevValue); // The counter has changed despite the revert }); test('Should not allow invalid constructor calldata', async () => { const randomWrongArgs = [12, 12, true]; - await expect(deployContract(alice, contracts.counter, randomWrongArgs)).toBeRejected('too many arguments'); + await expect(deployContract(alice, contracts.counter, randomWrongArgs)).toBeRejected( + 'incorrect number of arguments to constructor' + ); }); test('Should not allow invalid contract bytecode', async () => { // In this test we ensure that bytecode validity is checked by server. // Helpers to interact with the RPC API directly. - const send = (tx: any) => alice.provider.send('eth_sendRawTransaction', [zksync.utils.serialize(tx)]); - const call = (tx: any) => alice.provider.send('eth_call', [Provider.hexlifyTransaction(tx)]); - const estimateGas = (tx: any) => alice.provider.send('eth_estimateGas', [Provider.hexlifyTransaction(tx)]); + const send = (tx: any) => alice.provider.send('eth_sendRawTransaction', [zksync.utils.serializeEip712(tx)]); + const call = (tx: any) => alice.provider.send('eth_call', [alice.provider.getRpcTransaction(tx)]); + const estimateGas = (tx: any) => alice.provider.send('eth_estimateGas', [alice.provider.getRpcTransaction(tx)]); // Prepares an invalid serialized transaction with the bytecode of provided length. const invalidTx = (length: number) => invalidBytecodeTestTransaction(alice.provider, [new Uint8Array(length)]); const txWithUnchunkableBytecode = await invalidTx(17); const unchunkableError = 'Bytecode length is not divisible by 32'; await expect(send(txWithUnchunkableBytecode)).toBeRejected(unchunkableError); - await expect(call(txWithUnchunkableBytecode)).toBeRejected(unchunkableError); - await expect(estimateGas(txWithUnchunkableBytecode)).toBeRejected(unchunkableError); + + /* + Ethers v6 error handling is not capable of handling this format of messages. + See: https://github.com/ethers-io/ethers.js/blob/main/src.ts/providers/provider-jsonrpc.ts#L976 + { + code: 3, + message: 'Failed to serialize transaction: factory dependency #0 is invalid: Bytecode length is not divisible by 32' + } + */ + await expect(call(txWithUnchunkableBytecode)).toBeRejected(/*unchunkableError*/); + await expect(estimateGas(txWithUnchunkableBytecode)).toBeRejected(/*unchunkableError*/); const txWithBytecodeWithEvenChunks = await invalidTx(64); const evenChunksError = 'Bytecode has even number of 32-byte words'; await expect(send(txWithBytecodeWithEvenChunks)).toBeRejected(evenChunksError); - await expect(call(txWithBytecodeWithEvenChunks)).toBeRejected(evenChunksError); - await expect(estimateGas(txWithBytecodeWithEvenChunks)).toBeRejected(evenChunksError); + + /* + { + code: 3, + message: 'Failed to serialize transaction: factory dependency #0 is invalid: Bytecode has even number of 32-byte words' + } + */ + await expect(call(txWithBytecodeWithEvenChunks)).toBeRejected(/*evenChunksError*/); + await expect(estimateGas(txWithBytecodeWithEvenChunks)).toBeRejected(/*evenChunksError*/); const longBytecodeLen = zksync.utils.MAX_BYTECODE_LEN_BYTES + 32; const txWithTooLongBytecode = await invalidTx(longBytecodeLen); const tooLongBytecodeError = `Bytecode too long: ${longBytecodeLen} bytes, while max ${zksync.utils.MAX_BYTECODE_LEN_BYTES} allowed`; await expect(send(txWithTooLongBytecode)).toBeRejected(tooLongBytecodeError); - await expect(call(txWithTooLongBytecode)).toBeRejected(tooLongBytecodeError); - await expect(estimateGas(txWithTooLongBytecode)).toBeRejected(tooLongBytecodeError); + /* + { + code: 3, + message: 'Failed to serialize transaction: factory dependency #0 is invalid: Bytecode too long: 2097152 bytes, while max 2097120 allowed' + } + */ + await expect(call(txWithTooLongBytecode)).toBeRejected(/*tooLongBytecodeError*/); + await expect(estimateGas(txWithTooLongBytecode)).toBeRejected(/*tooLongBytecodeError*/); }); test('Should interchangeably use ethers for eth calls', async () => { @@ -161,39 +184,53 @@ describe('Smart contract behavior checks', () => { const rpcAddress = testMaster.environment().l2NodeUrl; const provider = new RetryProvider(rpcAddress); const wallet = new ethers.Wallet(alice.privateKey, provider); - const ethersBasedContract = new ethers.Contract(counterContract.address, counterContract.interface, wallet); + const ethersBasedContract = new ethers.Contract( + await counterContract.getAddress(), + counterContract.interface, + wallet + ); const oldValue = await ethersBasedContract.get(); await expect(ethersBasedContract.increment(1)).toBeAccepted([]); - expect(ethersBasedContract.get()).resolves.bnToBeEq(oldValue.add(1)); + expect(ethersBasedContract.get()).resolves.toEqual(oldValue + 1n); }); test('Should check that eth_call works with custom block tags', async () => { // Retrieve value normally. + counterContract = await deployContract(alice, contracts.counter, []); const counterValue = await counterContract.get(); // Check current block tag. - await expect(counterContract.callStatic.get({ blockTag: 'pending' })).resolves.bnToBeEq(counterValue); - + await expect(counterContract.get.staticCall({ blockTag: 'pending' })).resolves.toEqual(counterValue); + + /* + Ethers v6 error handling is not capable of handling this format of messages. + See: https://github.com/ethers-io/ethers.js/blob/main/src.ts/providers/provider-jsonrpc.ts#L976 + { + "code": -32602, + "message": "Block with such an ID doesn't exist yet" + } + */ // Block from the future. - await expect(counterContract.callStatic.get({ blockTag: 1000000000 })).toBeRejected( - "Block with such an ID doesn't exist yet" - ); + await expect(counterContract.get.staticCall({ blockTag: 1000000000 })) + .toBeRejected + //"Block with such an ID doesn't exist yet" + (); // Genesis block - await expect(counterContract.callStatic.get({ blockTag: 0 })).toBeRejected('call revert exception'); + await expect(counterContract.get.staticCall({ blockTag: 0 })).toBeRejected('could not decode result data'); }); test('Should correctly process msg.value inside constructor and in ethCall', async () => { - const value = ethers.BigNumber.from(1); + const value = 1n; // Check that value provided to the constructor is processed. const contextContract = await deployContract(alice, contracts.context, [], undefined, { value }); - await expect(contextContract.valueOnCreate()).resolves.bnToBeEq(value); + await expect(contextContract.valueOnCreate()).resolves.toEqual(value); // Check that value provided to `eth_Call` is processed. // This call won't return anything, but will throw if it'll result in a revert. - await contextContract.callStatic.requireMsgValue(value, { + await contextContract.requireMsgValue.staticCall(value, { value }); }); @@ -201,16 +238,27 @@ describe('Smart contract behavior checks', () => { test('Should return correct error during fee estimation', async () => { const errorContract = await deployContract(alice, contracts.error, []); - await expect(errorContract.estimateGas.require_long()).toBeRevertedEstimateGas('longlonglong'); - await expect(errorContract.require_long()).toBeRevertedEthCall('longlonglong'); - await expect(errorContract.estimateGas.new_error()).toBeRevertedEstimateGas( + /* + { + "code": 3, + "message": "execution reverted: longlonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglong", + "data": "0x08c379a0000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000c86c6f6e676c6f6e676c6f6e676c6f6e676c6f6e676c6f6e676c6f6e676c6f6e676c6f6e676c6f6e676c6f6e676c6f6e676c6f6e676c6f6e676c6f6e676c6f6e676c6f6e676c6f6e676c6f6e676c6f6e676c6f6e676c6f6e676c6f6e676c6f6e676c6f6e676c6f6e676c6f6e676c6f6e676c6f6e676c6f6e676c6f6e676c6f6e676c6f6e676c6f6e676c6f6e676c6f6e676c6f6e676c6f6e676c6f6e676c6f6e676c6f6e676c6f6e676c6f6e676c6f6e676c6f6e676c6f6e676c6f6e676c6f6e676c6f6e676c6f6e67000000000000000000000000000000000000000000000000" + } + */ + await expect(errorContract.require_long.estimateGas()).toBeRevertedEstimateGas(/*'longlonglong'*/); + await expect(errorContract.require_long()).toBeRevertedEthCall(/*'longlonglong'*/); + await expect(errorContract.new_error.estimateGas()).toBeRevertedEstimateGas( undefined, '0x157bea60000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000046461746100000000000000000000000000000000000000000000000000000000' ); - await expect(errorContract.callStatic.new_error()).toBeRevertedEthCall( + // execution reverted: TestError(uint256,uint256,uint256,string) + await expect(errorContract.new_error.staticCall()) + .toBeRevertedEthCall + /* undefined, '0x157bea60000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000046461746100000000000000000000000000000000000000000000000000000000' - ); + */ + (); }); test('Should check block properties for tx execution', async () => { @@ -225,14 +273,14 @@ describe('Smart contract behavior checks', () => { // will correspond to the last *sealed* batch (e.g. previous one). const contextContract = await deployContract(alice, contracts.context, []); - const deploymentBlock = contextContract.deployTransaction.blockNumber!; + const deploymentBlock = await contextContract.deploymentTransaction()!.blockNumber!; const deploymentBlockInfo = await alice.provider.getBlock(deploymentBlock); // If batch was not sealed, its number may not be present in the receipt. const deploymentl1Batch = deploymentBlockInfo.l1BatchNumber ?? (await alice.provider.getL1BatchNumber()) + 1; // Check that block gas limit is correct. const blockGasLimit = await contextContract.getBlockGasLimit({ blockTag: 'pending' }); - expect(blockGasLimit).bnToBeGt(0); + expect(blockGasLimit).toBeGreaterThan(0n); // Record values from the contract right after deployment to compare them with new ones later. const initialL1Batch = await contextContract.getBlockNumber({ @@ -247,7 +295,7 @@ describe('Smart contract behavior checks', () => { // Check that current number of L1 batch on contract has sane value. // Here and below we use "gte"/"gt" instead of strict checks because tests are executed in parallel // and we can't guarantee a certain block commitment order. - expect(initialL1Batch).bnToBeGte(deploymentl1Batch); + expect(initialL1Batch).toBeGreaterThanOrEqual(deploymentl1Batch); // Wait till the new L1 batch is created. await waitForNewL1Batch(alice); @@ -260,17 +308,17 @@ describe('Smart contract behavior checks', () => { blockTag: 'pending' }); - expect(newL1Batch).bnToBeGt(initialL1Batch, 'New L1 batch number must be strictly greater'); - expect(newTimestamp).bnToBeGte(initialTimestamp, 'New timestamp must not be less than previous one'); + expect(newL1Batch).toBeGreaterThan(initialL1Batch); // New L1 batch number must be strictly greater + expect(newTimestamp).toBeGreaterThanOrEqual(initialTimestamp); // New timestamp must not be less than previous one // And finally check block properties for the actual contract execution (not `eth_call`). - const acceptedBlockLag = 20; - const acceptedTimestampLag = 600; - await expect(contextContract.checkBlockNumber(newL1Batch, newL1Batch.add(acceptedBlockLag))).toBeAccepted([]); + const acceptedBlockLag = 20n; + const acceptedTimestampLag = 600n; + await expect(contextContract.checkBlockNumber(newL1Batch, newL1Batch + acceptedBlockLag)).toBeAccepted([]); // `newTimestamp` was received from the API, so actual timestamp in the state keeper may be lower. // This is why we use `initialTimestamp` here. await expect( - contextContract.checkBlockTimestamp(initialTimestamp, initialTimestamp.add(acceptedTimestampLag)) + contextContract.checkBlockTimestamp(initialTimestamp, initialTimestamp + acceptedTimestampLag) ).toBeAccepted([]); }); @@ -311,7 +359,7 @@ describe('Smart contract behavior checks', () => { // Transaction should be rejected by API. const BYTECODE_LEN = 50016; - const bytecode = ethers.utils.hexlify(ethers.utils.randomBytes(BYTECODE_LEN)); + const bytecode = ethers.hexlify(ethers.randomBytes(BYTECODE_LEN)); // Estimate gas for "no-op". It's a good estimate for validation gas. const gasLimit = await alice.estimateGas({ @@ -338,7 +386,7 @@ describe('Smart contract behavior checks', () => { const message = '0x5905238877c77421f73e43ee3da6f2d9e2ccad5fc942dcec0cbd25482935faaf416983fe165b1a045ee2bcd2e6dca3bdf46c4310a7461f9a37960ca672d3feb5473e253605fb1ddfd28065b53cb5858a8ad28175bf9bd386a5e471ea7a65c17cc934a9d791e91491eb3754d03799790fe2d308d16146d5c9b0d0debd97d79ce8'; - const digest = ethers.utils.arrayify(ethers.utils.keccak256(message)); + const digest = ethers.getBytes(ethers.keccak256(message)); const signature = ec.sign(digest, privateKey); const publicKeyHex = @@ -347,7 +395,7 @@ describe('Smart contract behavior checks', () => { // Check that verification succeeds. const res = await alice.provider.call({ to: '0x0000000000000000000000000000000000000100', - data: ethers.utils.concat([ + data: ethers.concat([ digest, '0x' + signature.r.toString('hex'), '0x' + signature.s.toString('hex'), @@ -359,7 +407,7 @@ describe('Smart contract behavior checks', () => { // Send the transaction. const tx = await alice.sendTransaction({ to: '0x0000000000000000000000000000000000000100', - data: ethers.utils.concat([ + data: ethers.concat([ digest, '0x' + signature.r.toString('hex'), '0x' + signature.s.toString('hex'), @@ -375,8 +423,8 @@ describe('Smart contract behavior checks', () => { testMaster.environment().pathToHome }/etc/contracts-test-data/artifacts-zk/contracts/storage/storage.sol/StorageTester.json`); const contractFactory = new zksync.ContractFactory(artifact.abi, artifact.bytecode, alice); - const storageContract = await contractFactory.deploy(); - await storageContract.deployed(); + const storageContract = (await contractFactory.deploy()) as zksync.Contract; + await storageContract.waitForDeployment(); // Tests transient storage, see contract code for details. await expect(storageContract.testTransientStore()).toBeAccepted([]); // Checks that transient storage is cleaned up after each tx. @@ -390,12 +438,12 @@ describe('Smart contract behavior checks', () => { testMaster.environment().pathToHome }/etc/contracts-test-data/artifacts-zk/contracts/precompiles/precompiles.sol/Precompiles.json`); const contractFactory = new zksync.ContractFactory(artifact.abi, artifact.bytecode, alice); - const contract = await contractFactory.deploy(); - await contract.deployed(); + const contract = (await contractFactory.deploy()) as zksync.Contract; + await contract.waitForDeployment(); // Check that CodeOracle can decommit code of just deployed contract. const versionedHash = zksync.utils.hashBytecode(artifact.bytecode); - const expectedBytecodeHash = ethers.utils.keccak256(artifact.bytecode); + const expectedBytecodeHash = ethers.keccak256(artifact.bytecode); await expect(contract.callCodeOracle(versionedHash, expectedBytecodeHash)).toBeAccepted([]); }); @@ -408,17 +456,17 @@ describe('Smart contract behavior checks', () => { async function invalidBytecodeTestTransaction( provider: zksync.Provider, factoryDeps: Uint8Array[] -): Promise { +): Promise { const chainId = (await provider.getNetwork()).chainId; const gasPrice = await provider.getGasPrice(); const address = zksync.Wallet.createRandom().address; - const tx: ethers.providers.TransactionRequest = { + const tx: ethers.TransactionRequest = { to: address, from: address, nonce: 0, - gasLimit: ethers.BigNumber.from(300000), + gasLimit: 300000n, data: '0x', value: 0, diff --git a/core/tests/ts-integration/tests/custom-account.test.ts b/core/tests/ts-integration/tests/custom-account.test.ts index d923325a701d..46ddba95323a 100644 --- a/core/tests/ts-integration/tests/custom-account.test.ts +++ b/core/tests/ts-integration/tests/custom-account.test.ts @@ -2,10 +2,9 @@ * This suite contains tests checking the behavior of custom accounts (accounts represented by smart contracts). */ -import { TestMaster } from '../src/index'; +import { TestMaster } from '../src'; import * as zksync from 'zksync-ethers'; -import { utils, types } from 'zksync-ethers'; import * as ethers from 'ethers'; import { deployContract, getTestContract } from '../src/helpers'; import { ERC20_PER_ACCOUNT, L2_DEFAULT_ETH_PER_ACCOUNT } from '../src/context-owner'; @@ -17,8 +16,8 @@ const contracts = { }; // We create multiple custom accounts and we need to fund them with ETH to pay for fees. -const ETH_PER_CUSTOM_ACCOUNT = L2_DEFAULT_ETH_PER_ACCOUNT.div(8); -const TRANSFER_AMOUNT = 1; +const ETH_PER_CUSTOM_ACCOUNT = L2_DEFAULT_ETH_PER_ACCOUNT / 8n; +const TRANSFER_AMOUNT = 1n; describe('Tests for the custom account behavior', () => { let testMaster: TestMaster; @@ -33,7 +32,7 @@ describe('Tests for the custom account behavior', () => { erc20Address = testMaster.environment().erc20Token.l2Address; erc20 = new zksync.Contract( erc20Address, - utils.IERC20, + zksync.utils.IERC20, // Signer doesn't matter for custom account transactions, as signature would be replaced with custom one. alice ); @@ -44,33 +43,36 @@ describe('Tests for the custom account behavior', () => { customAccount = await deployContract(alice, contracts.customAccount, [violateRules], 'createAccount'); // Now we need to check that it was correctly marked as an account: - const contractAccountInfo = await alice.provider.getContractAccountInfo(customAccount.address); + const contractAccountInfo = await alice.provider.getContractAccountInfo(await customAccount.getAddress()); // Checking that the version of the account abstraction is correct - expect(contractAccountInfo.supportedAAVersion).toEqual(types.AccountAbstractionVersion.Version1); + expect(contractAccountInfo.supportedAAVersion).toEqual(zksync.types.AccountAbstractionVersion.Version1); // Checking that the nonce ordering is correct - expect(contractAccountInfo.nonceOrdering).toEqual(types.AccountNonceOrdering.Sequential); + expect(contractAccountInfo.nonceOrdering).toEqual(zksync.types.AccountNonceOrdering.Sequential); }); test('Should fund the custom account', async () => { - await alice.transfer({ to: customAccount.address, amount: ETH_PER_CUSTOM_ACCOUNT }).then((tx) => tx.wait()); + await alice + .transfer({ to: await customAccount.getAddress(), amount: ETH_PER_CUSTOM_ACCOUNT }) + .then((tx) => tx.wait()); await alice .transfer({ - to: customAccount.address, + to: await customAccount.getAddress(), token: erc20Address, - amount: ERC20_PER_ACCOUNT.div(4) + amount: ERC20_PER_ACCOUNT / 4n }) .then((tx) => tx.wait()); }); test('Should execute contract by custom account', async () => { - const tx = await erc20.populateTransaction.transfer(alice.address, TRANSFER_AMOUNT); + const tx = await erc20.transfer.populateTransaction(alice.address, TRANSFER_AMOUNT); + const customAccountAddress = await customAccount.getAddress(); const erc20BalanceChange = await shouldChangeTokenBalances(erc20Address, [ // Custom account change (sender) { - addressToCheck: customAccount.address, + addressToCheck: customAccountAddress, wallet: alice, change: -TRANSFER_AMOUNT }, @@ -79,23 +81,28 @@ describe('Tests for the custom account behavior', () => { ]); const feeCheck = await shouldChangeETHBalances([ // 0 change would only check for fees. - { addressToCheck: customAccount.address, wallet: alice, change: 0 } + { addressToCheck: customAccountAddress, wallet: alice, change: 0n } ]); // Check that transaction succeeds. await expect( - sendCustomAccountTransaction(tx, alice.provider, customAccount.address, testMaster.environment().l2ChainId) + sendCustomAccountTransaction( + tx as zksync.types.Transaction, + alice.provider, + customAccountAddress, + testMaster.environment().l2ChainId + ) ).toBeAccepted([erc20BalanceChange, feeCheck]); }); test('Should fail the validation with incorrect signature', async () => { - const tx = await erc20.populateTransaction.transfer(alice.address, TRANSFER_AMOUNT); + const tx = await erc20.transfer.populateTransaction(alice.address, TRANSFER_AMOUNT); const fakeSignature = new Uint8Array(12); await expect( sendCustomAccountTransaction( - tx, + tx as zksync.types.Transaction, alice.provider, - customAccount.address, + await customAccount.getAddress(), testMaster.environment().l2ChainId, fakeSignature ) @@ -106,28 +113,29 @@ describe('Tests for the custom account behavior', () => { // We configure account to violate storage access rules during tx validation. const violateRules = true; const badCustomAccount = await deployContract(alice, contracts.customAccount, [violateRules], 'createAccount'); + const badCustomAccountAddress = await badCustomAccount.getAddress(); // Fund the account. await alice .transfer({ - to: badCustomAccount.address, + to: badCustomAccountAddress, amount: ETH_PER_CUSTOM_ACCOUNT }) .then((tx) => tx.wait()); await alice .transfer({ - to: badCustomAccount.address, + to: badCustomAccountAddress, token: erc20Address, amount: TRANSFER_AMOUNT }) .then((tx) => tx.wait()); - let tx = await erc20.populateTransaction.transfer(alice.address, TRANSFER_AMOUNT); + let tx = await erc20.transfer.populateTransaction(alice.address, TRANSFER_AMOUNT); await expect( sendCustomAccountTransaction( - tx, + tx as zksync.types.Transaction, alice.provider, - badCustomAccount.address, + badCustomAccountAddress, testMaster.environment().l2ChainId ) ).toBeRejected('Violated validation rules'); @@ -138,21 +146,36 @@ describe('Tests for the custom account behavior', () => { // be treated as a common contract. const violateRules = false; const nonAccount = await deployContract(alice, contracts.customAccount, [violateRules], 'create'); + const nonAccountAddress = await nonAccount.getAddress(); // Fund the account. - await alice.transfer({ to: nonAccount.address, amount: ETH_PER_CUSTOM_ACCOUNT }).then((tx) => tx.wait()); + await alice.transfer({ to: nonAccountAddress, amount: ETH_PER_CUSTOM_ACCOUNT }).then((tx) => tx.wait()); await alice .transfer({ - to: nonAccount.address, + to: nonAccountAddress, token: erc20Address, amount: TRANSFER_AMOUNT }) .then((tx) => tx.wait()); - let tx = await erc20.populateTransaction.transfer(alice.address, TRANSFER_AMOUNT); + let tx = await erc20.transfer.populateTransaction(alice.address, TRANSFER_AMOUNT); + /* + Ethers v6 error handling is not capable of handling this format of messages. + See: https://github.com/ethers-io/ethers.js/blob/main/src.ts/providers/provider-jsonrpc.ts#L976 + { + "code": 3, + "message": "invalid sender. can't start a transaction from a non-account", + "data": "0x" + } + */ await expect( - sendCustomAccountTransaction(tx, alice.provider, nonAccount.address, testMaster.environment().l2ChainId) - ).toBeRejected("invalid sender. can't start a transaction from a non-account"); + sendCustomAccountTransaction( + tx as zksync.types.Transaction, + alice.provider, + nonAccountAddress, + testMaster.environment().l2ChainId + ) + ).toBeRejected(/*"invalid sender. can't start a transaction from a non-account"*/); }); test('Should provide correct tx.origin for EOA and custom accounts', async () => { @@ -162,12 +185,14 @@ describe('Tests for the custom account behavior', () => { await expect(contextContract.checkTxOrigin(alice.address)).toBeAccepted([]); // For custom accounts, the tx.origin should be the bootloader address - const customAATx = await contextContract.populateTransaction.checkTxOrigin(utils.BOOTLOADER_FORMAL_ADDRESS); + const customAATx = await contextContract.checkTxOrigin.populateTransaction( + zksync.utils.BOOTLOADER_FORMAL_ADDRESS + ); await expect( sendCustomAccountTransaction( - customAATx, + customAATx as zksync.types.Transaction, alice.provider, - customAccount.address, + await customAccount.getAddress(), testMaster.environment().l2ChainId ) ).toBeAccepted([]); @@ -181,18 +206,19 @@ describe('Tests for the custom account behavior', () => { [violateStorageRules], 'createAccount' ); + const badCustomAccountAddress = await badCustomAccount.getAddress(); badCustomAccount.connect(alice); // Fund the account. await alice .transfer({ - to: badCustomAccount.address, + to: badCustomAccountAddress, amount: ETH_PER_CUSTOM_ACCOUNT }) .then((tx) => tx.wait()); await alice .transfer({ - to: badCustomAccount.address, + to: badCustomAccountAddress, token: erc20Address, amount: TRANSFER_AMOUNT }) @@ -202,12 +228,12 @@ describe('Tests for the custom account behavior', () => { const validationGasLimit = testMaster.environment().validationComputationalGasLimit; await badCustomAccount.setGasToSpent(validationGasLimit).then((tx: any) => tx.wait()); - let tx = await erc20.populateTransaction.transfer(alice.address, TRANSFER_AMOUNT); + let tx = await erc20.transfer.populateTransaction(alice.address, TRANSFER_AMOUNT); await expect( sendCustomAccountTransaction( - tx, + tx as zksync.types.Transaction, alice.provider, - badCustomAccount.address, + badCustomAccountAddress, testMaster.environment().l2ChainId ) ).toBeRejected('Violated validation rules: Took too many computational gas'); @@ -221,32 +247,33 @@ describe('Tests for the custom account behavior', () => { [violateStorageRules], 'createAccount' ); + const badCustomAccountAddress = await badCustomAccount.getAddress(); badCustomAccount.connect(alice); // Fund the account. await alice .transfer({ - to: badCustomAccount.address, + to: badCustomAccountAddress, amount: ETH_PER_CUSTOM_ACCOUNT }) .then((tx) => tx.wait()); await alice .transfer({ - to: badCustomAccount.address, + to: badCustomAccountAddress, token: erc20Address, amount: TRANSFER_AMOUNT }) .then((tx) => tx.wait()); - const transfer = await erc20.populateTransaction.transfer(alice.address, TRANSFER_AMOUNT); - const nonce = await alice.provider.getTransactionCount(badCustomAccount.address); + const transfer = await erc20.transfer.populateTransaction(alice.address, TRANSFER_AMOUNT); + const nonce = await alice.provider.getTransactionCount(badCustomAccountAddress); // delayedTx should pass API checks (if not then error will be thrown on the next lime) // but should be rejected by the state-keeper (checked later). const delayedTx = await sendCustomAccountTransaction( - transfer, + transfer as zksync.types.Transaction, alice.provider, - badCustomAccount.address, + badCustomAccountAddress, testMaster.environment().l2ChainId, undefined, nonce + 1 @@ -254,12 +281,12 @@ describe('Tests for the custom account behavior', () => { // Increase nonce and set flag to do many calculations during validation. const validationGasLimit = testMaster.environment().validationComputationalGasLimit; - const tx = await badCustomAccount.populateTransaction.setGasToSpent(validationGasLimit); + const tx = await badCustomAccount.setGasToSpent.populateTransaction(validationGasLimit); await expect( sendCustomAccountTransaction( - tx, + tx as zksync.types.Transaction, alice.provider, - badCustomAccount.address, + badCustomAccountAddress, testMaster.environment().l2ChainId, undefined, nonce @@ -284,38 +311,37 @@ describe('Tests for the custom account behavior', () => { // Accepts the tx request with filled transaction's data and // sends the transaction that should be accepted by the `custom-aa.sol` test contract. async function sendCustomAccountTransaction( - tx: ethers.PopulatedTransaction, - web3Provider: zksync.Provider, + tx: zksync.types.Transaction, + browserProvider: zksync.Provider, accountAddress: string, - chainId: number, + chainId: bigint, customSignature?: Uint8Array, nonce?: number ) { - const gasLimit = await web3Provider.estimateGas({ + const gasLimit = await browserProvider.estimateGas({ ...tx, from: accountAddress }); - const gasPrice = await web3Provider.getGasPrice(); + const gasPrice = await browserProvider.getGasPrice(); tx.gasLimit = gasLimit; tx.gasPrice = gasPrice; tx.chainId = chainId; - tx.value = ethers.BigNumber.from(0); - tx.nonce = nonce ?? (await web3Provider.getTransactionCount(accountAddress)); + tx.value = 0n; + tx.nonce = nonce ?? (await browserProvider.getTransactionCount(accountAddress)); tx.type = 113; tx.from = accountAddress; tx.customData = { - gasPerPubdata: utils.DEFAULT_GAS_PER_PUBDATA_LIMIT + gasPerPubdata: zksync.utils.DEFAULT_GAS_PER_PUBDATA_LIMIT }; const signedTxHash = zksync.EIP712Signer.getSignedDigest(tx); tx.customData = { ...tx.customData, - from: accountAddress, - customSignature: customSignature ?? ethers.utils.concat([signedTxHash, accountAddress]) + customSignature: customSignature ?? ethers.concat([signedTxHash, accountAddress]) }; - const serializedTx = utils.serialize({ ...tx }); + const serializedTx = zksync.utils.serializeEip712({ ...tx }); - return await web3Provider.sendTransaction(serializedTx); + return await browserProvider.broadcastTransaction(serializedTx); } diff --git a/core/tests/ts-integration/tests/erc20.test.ts b/core/tests/ts-integration/tests/erc20.test.ts index 053f41829f1f..257592c15941 100644 --- a/core/tests/ts-integration/tests/erc20.test.ts +++ b/core/tests/ts-integration/tests/erc20.test.ts @@ -2,12 +2,11 @@ * This suite contains tests checking default ERC-20 contract behavior. */ -import { TestMaster } from '../src/index'; +import { TestMaster } from '../src'; import { Token } from '../src/types'; import { shouldChangeTokenBalances, shouldOnlyTakeFee } from '../src/modifiers/balance-checker'; import * as zksync from 'zksync-ethers'; -import { BigNumber, utils as etherUtils } from 'ethers'; import * as ethers from 'ethers'; import { scaledGasPrice, waitUntilBlockFinalized } from '../src/helpers'; import { L2_DEFAULT_ETH_PER_ACCOUNT } from '../src/context-owner'; @@ -17,7 +16,6 @@ describe('ERC20 contract checks', () => { let alice: zksync.Wallet; let bob: zksync.Wallet; let tokenDetails: Token; - let baseTokenDetails: Token; let aliceErc20: zksync.Contract; beforeAll(async () => { @@ -26,20 +24,19 @@ describe('ERC20 contract checks', () => { bob = testMaster.newEmptyAccount(); tokenDetails = testMaster.environment().erc20Token; - baseTokenDetails = testMaster.environment().baseToken; aliceErc20 = new zksync.Contract(tokenDetails.l2Address, zksync.utils.IERC20, alice); }); test('Token properties are correct', async () => { - expect(aliceErc20.name()).resolves.toBe(tokenDetails.name); - expect(aliceErc20.decimals()).resolves.toBe(tokenDetails.decimals); - expect(aliceErc20.symbol()).resolves.toBe(tokenDetails.symbol); - expect(aliceErc20.balanceOf(alice.address)).resolves.bnToBeGt(0, 'Alice should have non-zero balance'); + await expect(aliceErc20.name()).resolves.toBe(tokenDetails.name); + await expect(aliceErc20.decimals()).resolves.toBe(tokenDetails.decimals); + await expect(aliceErc20.symbol()).resolves.toBe(tokenDetails.symbol); + await expect(aliceErc20.balanceOf(alice.address)).resolves.toBeGreaterThan(0n); // 'Alice should have non-zero balance' }); test('Can perform a deposit', async () => { - const amount = 1; // 1 wei is enough. - const gasPrice = scaledGasPrice(alice); + const amount = 1n; // 1 wei is enough. + const gasPrice = await scaledGasPrice(alice); // Note: for L1 we should use L1 token address. const l1BalanceChange = await shouldChangeTokenBalances( @@ -70,7 +67,7 @@ describe('ERC20 contract checks', () => { }); test('Can perform a transfer', async () => { - const value = BigNumber.from(200); + const value = 200n; const balanceChange = await shouldChangeTokenBalances(tokenDetails.l2Address, [ { wallet: alice, change: -value }, @@ -83,24 +80,24 @@ describe('ERC20 contract checks', () => { }); test('Can perform a transfer to self', async () => { - const value = BigNumber.from(200); + const value = 200n; // When transferring to self, balance should not change. - const balanceChange = await shouldChangeTokenBalances(tokenDetails.l2Address, [{ wallet: alice, change: 0 }]); + const balanceChange = await shouldChangeTokenBalances(tokenDetails.l2Address, [{ wallet: alice, change: 0n }]); const feeCheck = await shouldOnlyTakeFee(alice); await expect(aliceErc20.transfer(alice.address, value)).toBeAccepted([balanceChange, feeCheck]); }); test('Incorrect transfer should revert', async () => { - const value = etherUtils.parseEther('1000000.0'); + const value = ethers.parseEther('1000000.0'); // Since gas estimation is expected to fail, we request gas limit for similar non-failing tx. - const gasLimit = await aliceErc20.estimateGas.transfer(bob.address, 1); + const gasLimit = await aliceErc20.transfer.estimateGas(bob.address, 1); // Balances should not change for this token. const noBalanceChange = await shouldChangeTokenBalances(tokenDetails.l2Address, [ - { wallet: alice, change: 0 }, - { wallet: bob, change: 0 } + { wallet: alice, change: 0n }, + { wallet: bob, change: 0n } ]); // Fee in ETH should be taken though. const feeTaken = await shouldOnlyTakeFee(alice); @@ -110,14 +107,16 @@ describe('ERC20 contract checks', () => { }); test('Transfer to zero address should revert', async () => { - const zeroAddress = ethers.constants.AddressZero; - const value = BigNumber.from(200); + const zeroAddress = ethers.ZeroAddress; + const value = 200n; // Since gas estimation is expected to fail, we request gas limit for similar non-failing tx. - const gasLimit = await aliceErc20.estimateGas.transfer(bob.address, 1); + const gasLimit = await aliceErc20.transfer.estimateGas(bob.address, 1); // Balances should not change for this token. - const noBalanceChange = await shouldChangeTokenBalances(tokenDetails.l2Address, [{ wallet: alice, change: 0 }]); + const noBalanceChange = await shouldChangeTokenBalances(tokenDetails.l2Address, [ + { wallet: alice, change: 0n } + ]); // Fee in ETH should be taken though. const feeTaken = await shouldOnlyTakeFee(alice); @@ -126,32 +125,39 @@ describe('ERC20 contract checks', () => { }); test('Approve and transferFrom should work', async () => { - const approveAmount = 42; + const approveAmount = 42n; const bobErc20 = new zksync.Contract(tokenDetails.l2Address, zksync.utils.IERC20, bob); // Fund bob's account to perform a transaction from it. await alice - .transfer({ to: bob.address, amount: L2_DEFAULT_ETH_PER_ACCOUNT.div(8), token: zksync.utils.ETH_ADDRESS }) + .transfer({ + to: bob.address, + amount: L2_DEFAULT_ETH_PER_ACCOUNT / 8n, + token: zksync.utils.L2_BASE_TOKEN_ADDRESS + }) .then((tx) => tx.wait()); - await expect(aliceErc20.allowance(alice.address, bob.address)).resolves.bnToBeEq(0); + await expect(aliceErc20.allowance(alice.address, bob.address)).resolves.toEqual(0n); await expect(aliceErc20.approve(bob.address, approveAmount)).toBeAccepted(); - await expect(aliceErc20.allowance(alice.address, bob.address)).resolves.bnToBeEq(approveAmount); + await expect(aliceErc20.allowance(alice.address, bob.address)).resolves.toEqual(approveAmount); await expect(bobErc20.transferFrom(alice.address, bob.address, approveAmount)).toBeAccepted(); - await expect(aliceErc20.allowance(alice.address, bob.address)).resolves.bnToBeEq(0); + await expect(aliceErc20.allowance(alice.address, bob.address)).resolves.toEqual(0n); }); test('Can perform a withdrawal', async () => { if (testMaster.isFastMode()) { return; } - const amount = 1; + const amount = 1n; const l2BalanceChange = await shouldChangeTokenBalances(tokenDetails.l2Address, [ { wallet: alice, change: -amount } ]); const feeCheck = await shouldOnlyTakeFee(alice); - const withdrawalPromise = alice.withdraw({ token: tokenDetails.l2Address, amount }); + const withdrawalPromise = alice.withdraw({ + token: tokenDetails.l2Address, + amount + }); await expect(withdrawalPromise).toBeAccepted([l2BalanceChange, feeCheck]); const withdrawalTx = await withdrawalPromise; await withdrawalTx.waitFinalize(); @@ -172,12 +178,12 @@ describe('ERC20 contract checks', () => { return; } - const amount = 1; + const amount = 1n; const initialBalance = await alice.getBalanceL1(tokenDetails.l1Address); // Deposit to the zero address is forbidden and should fail with the current implementation. const depositHandle = await alice.deposit({ token: tokenDetails.l1Address, - to: ethers.constants.AddressZero, + to: ethers.ZeroAddress, amount, approveERC20: true, approveBaseERC20: true, @@ -186,7 +192,7 @@ describe('ERC20 contract checks', () => { const l1Receipt = await depositHandle.waitL1Commit(); // L1 balance should change, but tx should fail in L2. - await expect(alice.getBalanceL1(tokenDetails.l1Address)).resolves.bnToBeEq(initialBalance.sub(amount)); + await expect(alice.getBalanceL1(tokenDetails.l1Address)).resolves.toEqual(initialBalance - amount); await expect(depositHandle).toBeReverted(); // Wait for tx to be finalized. @@ -194,28 +200,34 @@ describe('ERC20 contract checks', () => { // It throws once it gets status == 0 in the receipt and doesn't wait for the finalization. const l2Hash = zksync.utils.getL2HashFromPriorityOp(l1Receipt, await alice.provider.getMainContractAddress()); const l2TxReceipt = await alice.provider.getTransactionReceipt(l2Hash); - await waitUntilBlockFinalized(alice, l2TxReceipt.blockNumber); + await waitUntilBlockFinalized(alice, l2TxReceipt!.blockNumber); // Claim failed deposit. await expect(alice.claimFailedDeposit(l2Hash)).toBeAccepted(); - await expect(alice.getBalanceL1(tokenDetails.l1Address)).resolves.bnToBeEq(initialBalance); + await expect(alice.getBalanceL1(tokenDetails.l1Address)).resolves.toEqual(initialBalance); }); test('Can perform a deposit with precalculated max value', async () => { - const maxAmountBase = await alice.getBalanceL1(baseTokenDetails.l1Address); - const maxAmount = await alice.getBalanceL1(tokenDetails.l1Address); + const baseTokenAddress = await alice._providerL2().getBaseTokenContractAddress(); + const isETHBasedChain = baseTokenAddress == zksync.utils.ETH_ADDRESS_IN_CONTRACTS; + if (!isETHBasedChain) { + const baseTokenDetails = testMaster.environment().baseToken; + const baseTokenMaxAmount = await alice.getBalanceL1(baseTokenDetails.l1Address); + await (await alice.approveERC20(baseTokenDetails.l1Address, baseTokenMaxAmount)).wait(); + } // Approving the needed allowance to ensure that the user has enough funds. - await (await alice.approveERC20(baseTokenDetails.l1Address, maxAmountBase)).wait(); + const maxAmount = await alice.getBalanceL1(tokenDetails.l1Address); await (await alice.approveERC20(tokenDetails.l1Address, maxAmount)).wait(); const depositFee = await alice.getFullRequiredDepositFee({ token: tokenDetails.l1Address }); - const l1Fee = depositFee.l1GasLimit.mul(depositFee.maxFeePerGas! || depositFee.gasPrice!); - const l2Fee = depositFee.baseCost; + const l1Fee = depositFee.l1GasLimit * (depositFee.maxFeePerGas! || depositFee.gasPrice!); + const l2Fee = depositFee.baseCost; const aliceETHBalance = await alice.getBalanceL1(); - if (aliceETHBalance.lt(l1Fee.add(l2Fee))) { + + if (aliceETHBalance < l1Fee + l2Fee) { throw new Error('Not enough ETH to perform a deposit'); } @@ -236,7 +248,6 @@ describe('ERC20 contract checks', () => { l2GasLimit: depositFee.l2GasLimit, overrides }); - await expect(depositOp).toBeAccepted([l2ERC20BalanceChange]); }); diff --git a/core/tests/ts-integration/tests/ether.test.ts b/core/tests/ts-integration/tests/ether.test.ts index e5ecf595acfa..4e6b2eb0ef3d 100644 --- a/core/tests/ts-integration/tests/ether.test.ts +++ b/core/tests/ts-integration/tests/ether.test.ts @@ -2,7 +2,7 @@ * This suite contains tests checking our handling of Ether (such as depositing, checking `msg.value`, etc). */ -import { TestMaster } from '../src/index'; +import { TestMaster } from '../src'; import { shouldChangeETHBalances, shouldChangeTokenBalances, @@ -11,14 +11,8 @@ import { import { checkReceipt } from '../src/modifiers/receipt-check'; import * as zksync from 'zksync-ethers'; -import { BigNumber, Overrides } from 'ethers'; import { scaledGasPrice } from '../src/helpers'; -import { - EIP712_TX_TYPE, - ETH_ADDRESS, - ETH_ADDRESS_IN_CONTRACTS, - REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_LIMIT -} from 'zksync-ethers/build/utils'; +import { ethers } from 'ethers'; describe('ETH token checks', () => { let testMaster: TestMaster; @@ -34,20 +28,20 @@ describe('ETH token checks', () => { bob = testMaster.newEmptyAccount(); // Get the information about base token address directly from the L2. baseTokenAddress = await alice._providerL2().getBaseTokenContractAddress(); - isETHBasedChain = baseTokenAddress == ETH_ADDRESS_IN_CONTRACTS; + isETHBasedChain = baseTokenAddress == zksync.utils.ETH_ADDRESS_IN_CONTRACTS; console.log(`Starting checks for base token: ${baseTokenAddress} isEthBasedChain: ${isETHBasedChain}`); - l2EthTokenAddressNonBase = await alice.l2TokenAddress(ETH_ADDRESS_IN_CONTRACTS); + l2EthTokenAddressNonBase = await alice.l2TokenAddress(zksync.utils.ETH_ADDRESS_IN_CONTRACTS); }); test('Can perform a deposit', async () => { if (!isETHBasedChain) { - // Approving the needed allowance previously so we don't do it inside of the deposit. + // Approving the needed allowance previously, so we don't do it inside the deposit. // This prevents the deposit fee from being miscalculated. const l1MaxBaseTokenBalance = await alice.getBalanceL1(baseTokenAddress); await (await alice.approveERC20(baseTokenAddress, l1MaxBaseTokenBalance)).wait(); } - const amount = 1; // 1 wei is enough. - const gasPrice = scaledGasPrice(alice); + const amount = 1n; // 1 wei is enough. + const gasPrice = await scaledGasPrice(alice); // Unfortunately, since fee is taken in ETH, we must calculate the L1 ETH balance diff explicitly. const l1EthBalanceBefore = await alice.getBalanceL1(); @@ -61,12 +55,12 @@ describe('ETH token checks', () => { const l1BaseTokenBalanceBefore = await alice.getBalanceL1(baseTokenAddress); const l2BaseTokenBalanceBefore = await alice.getBalance(); // Base token balance on L2 - const gasPerPubdataByte = REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_LIMIT; + const gasPerPubdataByte = zksync.utils.REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_LIMIT; const l2GasLimit = await zksync.utils.estimateDefaultBridgeDepositL2Gas( alice.providerL1!, alice.provider, - ETH_ADDRESS, + zksync.utils.ETH_ADDRESS, amount, alice.address, alice.address, @@ -75,15 +69,15 @@ describe('ETH token checks', () => { const expectedL2Costs = await alice.getBaseCost({ gasLimit: l2GasLimit, gasPerPubdataByte, - gasPrice: await gasPrice + gasPrice }); const depositOp = alice.deposit({ - token: ETH_ADDRESS, + token: zksync.utils.ETH_ADDRESS, amount, gasPerPubdataByte, l2GasLimit, - approveERC20: isETHBasedChain ? true : false, + approveERC20: isETHBasedChain, approveBaseOverrides: { gasPrice }, @@ -96,36 +90,36 @@ describe('ETH token checks', () => { const depositFee = await depositOp .then((op) => op.waitL1Commit()) .then(async (receipt) => { - const l1GasFee = receipt.gasUsed.mul(receipt.effectiveGasPrice); + const l1GasFee = receipt.gasUsed * receipt.gasPrice; if (!isETHBasedChain) { return l1GasFee; } - return l1GasFee.add(expectedL2Costs); + return l1GasFee + expectedL2Costs; }); const l1EthBalanceAfter = await alice.getBalanceL1(); if (isETHBasedChain) { - expect(l1EthBalanceBefore.sub(depositFee).sub(l1EthBalanceAfter)).bnToBeEq(amount); + expect(l1EthBalanceBefore - depositFee - l1EthBalanceAfter).toEqual(amount); } else { // Base token checks const l1BaseTokenBalanceAfter = await alice.getBalanceL1(baseTokenAddress); - expect(l1BaseTokenBalanceBefore).bnToBeEq(l1BaseTokenBalanceAfter.add(expectedL2Costs)); + expect(l1BaseTokenBalanceBefore).toEqual(l1BaseTokenBalanceAfter + expectedL2Costs); const l2BaseTokenBalanceAfter = await alice.getBalance(); - expect(l1EthBalanceBefore).bnToBeEq(l1EthBalanceAfter.add(depositFee).add(amount)); + expect(l1EthBalanceBefore).toEqual(l1EthBalanceAfter + depositFee + amount); // L2 balance for the base token increases do to some "overminting" of the base token // We verify that the amount reduced on L1 is greater than the amount increased on L2 // so that we are not generating tokens out of thin air - const l1BaseTokenBalanceDiff = l1BaseTokenBalanceBefore.sub(l1BaseTokenBalanceAfter); - const l2BaseTokenBalanceDiff = l2BaseTokenBalanceAfter.sub(l2BaseTokenBalanceBefore); - expect(l1BaseTokenBalanceDiff).bnToBeGt(l2BaseTokenBalanceDiff); + const l1BaseTokenBalanceDiff = l1BaseTokenBalanceBefore - l1BaseTokenBalanceAfter; + const l2BaseTokenBalanceDiff = l2BaseTokenBalanceAfter - l2BaseTokenBalanceBefore; + expect(l1BaseTokenBalanceDiff).toBeGreaterThan(l2BaseTokenBalanceDiff); } }); test('Can perform a transfer (legacy pre EIP-155)', async () => { const LEGACY_TX_TYPE = 0; - const value = BigNumber.from(200); + const value = 200n; const ethBalanceChange = await shouldChangeETHBalances([ { wallet: alice, change: -value }, @@ -141,7 +135,7 @@ describe('ETH token checks', () => { // Remove chainId and sign the transaction without it. transaction.chainId = undefined; const signedTransaction = await alice.signTransaction(transaction); - await expect(alice.provider.sendTransaction(signedTransaction)).toBeAccepted([ + await expect(alice.provider.broadcastTransaction(signedTransaction)).toBeAccepted([ ethBalanceChange, correctReceiptType ]); @@ -149,7 +143,7 @@ describe('ETH token checks', () => { test('Can perform a transfer (legacy EIP-155)', async () => { const LEGACY_TX_TYPE = 0; - const value = BigNumber.from(200); + const value = 200n; const ethBalanceChange = await shouldChangeETHBalances([ { wallet: alice, change: -value }, @@ -167,26 +161,25 @@ describe('ETH token checks', () => { }); test('Can perform a transfer (EIP712)', async () => { - const value = BigNumber.from(200); + const value = 200n; const ethBalanceChange = await shouldChangeETHBalances([ { wallet: alice, change: -value }, { wallet: bob, change: value } ]); const correctReceiptType = checkReceipt( - (receipt) => receipt.type == EIP712_TX_TYPE, + (receipt) => receipt.type == zksync.utils.EIP712_TX_TYPE, 'Incorrect tx type in receipt' ); - await expect(alice.sendTransaction({ type: EIP712_TX_TYPE, to: bob.address, value })).toBeAccepted([ - ethBalanceChange, - correctReceiptType - ]); + await expect(alice.sendTransaction({ type: zksync.utils.EIP712_TX_TYPE, to: bob.address, value })).toBeAccepted( + [ethBalanceChange, correctReceiptType] + ); }); test('Can perform a transfer (EIP1559)', async () => { const EIP1559_TX_TYPE = 2; - const value = BigNumber.from(200); + const value = 200n; const ethBalanceChange = await shouldChangeETHBalances([ { wallet: alice, change: -value }, @@ -206,7 +199,7 @@ describe('ETH token checks', () => { test('Should reject transactions with access lists', async () => { const EIP_2930_TX_TYPE = 0x01; const EIP_1559_TX_TYPE = 0x02; - const value = BigNumber.from(200); + const value = 200n; await expect(alice.sendTransaction({ type: EIP_2930_TX_TYPE, to: bob.address, value })).toBeRejected( 'access lists are not supported' @@ -223,7 +216,7 @@ describe('ETH token checks', () => { }); test('Can perform a transfer to self', async () => { - const value = BigNumber.from(200); + const value = 200n; // Balance should not change, only fee should be taken. const ethBalanceChange = await shouldOnlyTakeFee(alice); @@ -251,14 +244,14 @@ describe('ETH token checks', () => { if (testMaster.isFastMode()) { return; } - const amount = 1; + const amount = 1n; const l2ethBalanceChange = isETHBasedChain ? await shouldChangeETHBalances([{ wallet: alice, change: -amount }]) : await shouldChangeTokenBalances(l2EthTokenAddressNonBase, [{ wallet: alice, change: -amount }]); const withdrawalPromise = alice.withdraw({ - token: isETHBasedChain ? ETH_ADDRESS : l2EthTokenAddressNonBase, + token: isETHBasedChain ? zksync.utils.ETH_ADDRESS : l2EthTokenAddressNonBase, amount }); await expect(withdrawalPromise).toBeAccepted([l2ethBalanceChange]); @@ -269,7 +262,7 @@ describe('ETH token checks', () => { await expect(alice.finalizeWithdrawal(withdrawalTx.hash)).toBeAccepted(); const tx = await alice.provider.getTransactionReceipt(withdrawalTx.hash); - expect(tx.l2ToL1Logs[0].txIndexInL1Batch).toEqual(expect.anything()); + expect(tx!.l2ToL1Logs[0].transactionIndex).toEqual(expect.anything()); }); test('Can perform a deposit with precalculated max value', async () => { @@ -278,40 +271,35 @@ describe('ETH token checks', () => { const baseTokenMaxAmount = await alice.getBalanceL1(baseTokenDetails.l1Address); await (await alice.approveERC20(baseTokenAddress, baseTokenMaxAmount)).wait(); } - const depositFee = await alice.getFullRequiredDepositFee({ - token: ETH_ADDRESS + token: zksync.utils.ETH_ADDRESS }); - const l1Fee = depositFee.l1GasLimit.mul(depositFee.maxFeePerGas! || depositFee.gasPrice!); + const l1Fee = depositFee.l1GasLimit * (depositFee.maxFeePerGas! || depositFee.gasPrice!); const l2Fee = depositFee.baseCost; const maxAmount = isETHBasedChain - ? (await alice.getBalanceL1()).sub(l1Fee).sub(l2Fee) - : (await alice.getBalanceL1()).sub(l1Fee); // l2Fee is paid in base token - + ? (await alice.getBalanceL1()) - l1Fee - l2Fee + : (await alice.getBalanceL1()) - l1Fee; // l2Fee is paid in base token // Approving the needed allowance to ensure that the user has enough funds. const l2ethBalanceChange = isETHBasedChain ? await shouldChangeETHBalances([{ wallet: alice, change: maxAmount }], { l1ToL2: true }) : await shouldChangeTokenBalances(l2EthTokenAddressNonBase, [{ wallet: alice, change: maxAmount }]); - - const overrides: Overrides = depositFee.gasPrice + const overrides: ethers.Overrides = depositFee.gasPrice ? { gasPrice: depositFee.gasPrice } : { maxFeePerGas: depositFee.maxFeePerGas, maxPriorityFeePerGas: depositFee.maxPriorityFeePerGas }; overrides.gasLimit = depositFee.l1GasLimit; - const depositOp = await alice.deposit({ - token: ETH_ADDRESS, + token: zksync.utils.ETH_ADDRESS, amount: maxAmount, l2GasLimit: depositFee.l2GasLimit, approveBaseERC20: true, approveERC20: true, overrides }); - await expect(depositOp).toBeAccepted([l2ethBalanceChange]); }); diff --git a/core/tests/ts-integration/tests/fees.test.ts b/core/tests/ts-integration/tests/fees.test.ts index 796ff6d7daff..522a9d8fd5be 100644 --- a/core/tests/ts-integration/tests/fees.test.ts +++ b/core/tests/ts-integration/tests/fees.test.ts @@ -11,16 +11,15 @@ */ import * as utils from 'utils'; import * as fs from 'fs'; -import { TestMaster } from '../src/index'; +import { TestMaster } from '../src'; import * as zksync from 'zksync-ethers'; -import { BigNumber, ethers } from 'ethers'; +import * as ethers from 'ethers'; import { DataAvailabityMode, Token } from '../src/types'; -import { keccak256 } from 'ethers/lib/utils'; import { SYSTEM_CONTEXT_ADDRESS, getTestContract } from '../src/helpers'; -const UINT32_MAX = BigNumber.from(2).pow(32).sub(1); -const MAX_GAS_PER_PUBDATA = 50_000; +const UINT32_MAX = 2n ** 32n - 1n; +const MAX_GAS_PER_PUBDATA = 50_000n; const logs = fs.createWriteStream('fees.log', { flags: 'a' }); @@ -31,21 +30,21 @@ const testFees = process.env.RUN_FEE_TEST ? describe : describe.skip; // For CI we use only 2 gas prices to not slow it down too much. const L1_GAS_PRICES_TO_TEST = process.env.CI ? [ - 5_000_000_000, // 5 gwei - 10_000_000_000 // 10 gwei + 5_000_000_000n, // 5 gwei + 10_000_000_000n // 10 gwei ] : [ - 1_000_000_000, // 1 gwei - 5_000_000_000, // 5 gwei - 10_000_000_000, // 10 gwei - 25_000_000_000, // 25 gwei - 50_000_000_000, // 50 gwei - 100_000_000_000, // 100 gwei - 200_000_000_000, // 200 gwei - 400_000_000_000, // 400 gwei - 800_000_000_000, // 800 gwei - 1_000_000_000_000, // 1000 gwei - 2_000_000_000_000 // 2000 gwei + 1_000_000_000n, // 1 gwei + 5_000_000_000n, // 5 gwei + 10_000_000_000n, // 10 gwei + 25_000_000_000n, // 25 gwei + 50_000_000_000n, // 50 gwei + 100_000_000_000n, // 100 gwei + 200_000_000_000n, // 200 gwei + 400_000_000_000n, // 400 gwei + 800_000_000_000n, // 800 gwei + 1_000_000_000_000n, // 1000 gwei + 2_000_000_000_000n // 2000 gwei ]; testFees('Test fees', () => { @@ -70,28 +69,36 @@ testFees('Test fees', () => { const feeTestL1Receipt = await ( await alice.ethWallet().sendTransaction({ to: receiver, - value: BigNumber.from(1) + value: 1n }) ).wait(); + if (feeTestL1Receipt === null) { + throw new Error('Failed to send ETH transaction'); + } + const feeTestL1ReceiptERC20 = await ( await alice.ethWallet().sendTransaction({ - to: aliceErc20.address, - data: aliceErc20.interface.encodeFunctionData('transfer', [receiver, BigNumber.from(1)]) + to: aliceErc20.getAddress(), + data: aliceErc20.interface.encodeFunctionData('transfer', [receiver, 1n]) }) ).wait(); + if (feeTestL1ReceiptERC20 === null) { + throw new Error('Failed to send ERC20 transaction'); + } + // Warming up slots for the receiver await ( await alice.sendTransaction({ to: receiver, - value: BigNumber.from(1) + value: BigInt(1) }) ).wait(); await ( await alice.sendTransaction({ - data: aliceErc20.interface.encodeFunctionData('transfer', [receiver, BigNumber.from(1)]), + data: aliceErc20.interface.encodeFunctionData('transfer', [receiver, 1n]), to: tokenDetails.l2Address }) ).wait(); @@ -110,21 +117,21 @@ testFees('Test fees', () => { [ { to: ethers.Wallet.createRandom().address, - value: BigNumber.from(1) + value: 1n }, { to: receiver, - value: BigNumber.from(1) + value: 1n }, { data: aliceErc20.interface.encodeFunctionData('transfer', [ ethers.Wallet.createRandom().address, - BigNumber.from(1) + 1n ]), to: tokenDetails.l2Address }, { - data: aliceErc20.interface.encodeFunctionData('transfer', [receiver, BigNumber.from(1)]), + data: aliceErc20.interface.encodeFunctionData('transfer', [receiver, 1n]), to: tokenDetails.l2Address } ], @@ -147,14 +154,14 @@ testFees('Test fees', () => { // In this test we will set gas per pubdata byte to its maximum value, while publishing a large L1->L2 message. - const minimalL2GasPrice = BigNumber.from(testMaster.environment().minimalL2GasPrice); + const minimalL2GasPrice = testMaster.environment().minimalL2GasPrice; // We want the total gas limit to be over u32::MAX, so we need the gas per pubdata to be 50k. // // Note, that in case, any sort of overhead is present in the l2 fair gas price calculation, the final // gas per pubdata may be lower than 50_000. Here we assume that it is not the case, but we'll double check // that the gasLimit is indeed over u32::MAX, which is the most important tested property. - const requiredPubdataPrice = minimalL2GasPrice.mul(100_000); + const requiredPubdataPrice = minimalL2GasPrice * 100_000n; await setInternalL1GasPrice( alice._providerL2(), @@ -165,25 +172,25 @@ testFees('Test fees', () => { const l1Messenger = new ethers.Contract(zksync.utils.L1_MESSENGER_ADDRESS, zksync.utils.L1_MESSENGER, alice); // Firstly, let's test a successful transaction. - const largeData = ethers.utils.randomBytes(90_000); + const largeData = ethers.randomBytes(90_000); const tx = await l1Messenger.sendToL1(largeData, { type: 0 }); - expect(tx.gasLimit.gt(UINT32_MAX)).toBeTruthy(); + expect(tx.gasLimit > UINT32_MAX).toBeTruthy(); const receipt = await tx.wait(); - expect(receipt.gasUsed.gt(UINT32_MAX)).toBeTruthy(); + expect(receipt.gasUsed > UINT32_MAX).toBeTruthy(); // Let's also check that the same transaction would work as eth_call const systemContextArtifact = getTestContract('ISystemContext'); const systemContext = new ethers.Contract(SYSTEM_CONTEXT_ADDRESS, systemContextArtifact.abi, alice.provider); const systemContextGasPerPubdataByte = await systemContext.gasPerPubdataByte(); - expect(systemContextGasPerPubdataByte.toNumber()).toEqual(MAX_GAS_PER_PUBDATA); + expect(systemContextGasPerPubdataByte).toEqual(MAX_GAS_PER_PUBDATA); - const dataHash = await l1Messenger.callStatic.sendToL1(largeData, { type: 0 }); - expect(dataHash).toEqual(keccak256(largeData)); + const dataHash = await l1Messenger.sendToL1.staticCall(largeData, { type: 0 }); + expect(dataHash).toEqual(ethers.keccak256(largeData)); // Secondly, let's test an unsuccessful transaction with large refund. // The size of the data has increased, so the previous gas limit is not enough. - const largerData = ethers.utils.randomBytes(91_000); + const largerData = ethers.randomBytes(91_000); const gasToPass = receipt.gasUsed; const unsuccessfulTx = await l1Messenger.sendToL1(largerData, { gasLimit: gasToPass, @@ -195,7 +202,7 @@ testFees('Test fees', () => { throw new Error('The transaction should have reverted'); } catch { const receipt = await alice.provider.getTransactionReceipt(unsuccessfulTx.hash); - expect(gasToPass.sub(receipt.gasUsed).gt(UINT32_MAX)).toBeTruthy(); + expect(gasToPass - receipt!.gasUsed > UINT32_MAX).toBeTruthy(); } }); @@ -209,9 +216,9 @@ testFees('Test fees', () => { async function appendResults( sender: zksync.Wallet, - originalL1Receipts: ethers.providers.TransactionReceipt[], - transactionRequests: ethers.providers.TransactionRequest[], - newL1GasPrice: number, + originalL1Receipts: ethers.TransactionReceipt[], + transactionRequests: ethers.TransactionRequest[], + newL1GasPrice: bigint, reports: string[] ): Promise { // For the sake of simplicity, we'll use the same pubdata price as the L1 gas price. @@ -236,28 +243,28 @@ async function appendResults( async function updateReport( sender: zksync.Wallet, - l1Receipt: ethers.providers.TransactionReceipt, - transactionRequest: ethers.providers.TransactionRequest, - newL1GasPrice: number, + l1Receipt: ethers.TransactionReceipt, + transactionRequest: ethers.TransactionRequest, + newL1GasPrice: bigint, oldReport: string ): Promise { - const expectedL1Price = +ethers.utils.formatEther(l1Receipt.gasUsed.mul(newL1GasPrice)); + const expectedL1Price = +ethers.formatEther(l1Receipt.gasUsed * newL1GasPrice); - const estimatedL2GasPrice = await sender.getGasPrice(); + const estimatedL2GasPrice = await sender.provider.getGasPrice(); const estimatedL2GasLimit = await sender.estimateGas(transactionRequest); - const estimatedPrice = estimatedL2GasPrice.mul(estimatedL2GasLimit); + const estimatedPrice = estimatedL2GasPrice * estimatedL2GasLimit; const balanceBefore = await sender.getBalance(); const transaction = await sender.sendTransaction(transactionRequest); console.log(`Sending transaction: ${transaction.hash}`); await transaction.wait(); const balanceAfter = await sender.getBalance(); - const balanceDiff = balanceBefore.sub(balanceAfter); + const balanceDiff = balanceBefore - balanceAfter; - const l2PriceAsNumber = +ethers.utils.formatEther(balanceDiff); - const l2EstimatedPriceAsNumber = +ethers.utils.formatEther(estimatedPrice); + const l2PriceAsNumber = +ethers.formatEther(balanceDiff); + const l2EstimatedPriceAsNumber = +ethers.formatEther(estimatedPrice); - const gasReport = `Gas price ${newL1GasPrice / 1000000000} gwei: + const gasReport = `Gas price ${newL1GasPrice / 1000000000n} gwei: L1 cost ${expectedL1Price}, L2 estimated cost: ${l2EstimatedPriceAsNumber} Estimated Gain: ${expectedL1Price / l2EstimatedPriceAsNumber} @@ -298,7 +305,7 @@ async function setInternalL1GasPrice( } catch (_) {} // Run server in background. - let command = 'zk server --components api,tree,eth,state_keeper'; + let command = 'zk server --components api,tree,eth,state_keeper,da_dispatcher'; command = `DATABASE_MERKLE_TREE_MODE=full ${command}`; if (newPubdataPrice) { @@ -316,7 +323,7 @@ async function setInternalL1GasPrice( command = `CHAIN_STATE_KEEPER_TRANSACTION_SLOTS=1 ${command}`; } - const zkSyncServer = utils.background(command, [null, logs, logs]); + const zkSyncServer = utils.background({ command, stdio: [null, logs, logs] }); if (disconnect) { zkSyncServer.unref(); diff --git a/core/tests/ts-integration/tests/l1.test.ts b/core/tests/ts-integration/tests/l1.test.ts index e149a8f7e59f..0f8466ec463b 100644 --- a/core/tests/ts-integration/tests/l1.test.ts +++ b/core/tests/ts-integration/tests/l1.test.ts @@ -1,14 +1,14 @@ /** * This suite contains tests checking the interaction with L1. * - * !WARN! Tests that interact with L1 may be very time consuming on stage. + * !WARN! Tests that interact with L1 may be very time-consuming on stage. * Please only do the minimal amount of actions to test the behavior (e.g. no unnecessary deposits/withdrawals * and waiting for the block finalization). */ -import { TestMaster } from '../src/index'; +import { TestMaster } from '../src'; import * as zksync from 'zksync-ethers'; import * as ethers from 'ethers'; -import { deployContract, getTestContract, scaledGasPrice, waitForNewL1Batch } from '../src/helpers'; +import { bigIntMax, deployContract, getTestContract, scaledGasPrice, waitForNewL1Batch } from '../src/helpers'; import { getHashedL2ToL1Msg, L1_MESSENGER, @@ -35,7 +35,7 @@ describe('Tests for L1 behavior', () => { let errorContract: zksync.Contract; let isETHBasedChain: boolean; - let expectedL2Costs: ethers.BigNumberish; + let expectedL2Costs: bigint; beforeAll(() => { testMaster = TestMaster.getInstance(__filename); @@ -63,27 +63,26 @@ describe('Tests for L1 behavior', () => { test('Should calculate l2 base cost, if base token is not ETH', async () => { const gasPrice = await scaledGasPrice(alice); if (!isETHBasedChain) { - expectedL2Costs = ( - await alice.getBaseCost({ + expectedL2Costs = + ((await alice.getBaseCost({ gasLimit: maxL2GasLimitForPriorityTxs(testMaster.environment().priorityTxMaxGasLimit), gasPerPubdataByte: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_LIMIT, gasPrice - }) - ) - .mul(140) - .div(100); + })) * + 140n) / + 100n; } }); test('Should request L1 execute', async () => { const calldata = counterContract.interface.encodeFunctionData('increment', ['1']); - const gasPrice = scaledGasPrice(alice); + const gasPrice = await scaledGasPrice(alice); await expect( alice.requestExecute({ - contractAddress: counterContract.address, + contractAddress: await counterContract.getAddress(), calldata, - mintValue: isETHBasedChain ? ethers.BigNumber.from(0) : expectedL2Costs, + mintValue: isETHBasedChain ? 0n : expectedL2Costs, overrides: { gasPrice } @@ -94,14 +93,14 @@ describe('Tests for L1 behavior', () => { test('Should request L1 execute with msg.value', async () => { const l2Value = 10; const calldata = contextContract.interface.encodeFunctionData('requireMsgValue', [l2Value]); - const gasPrice = scaledGasPrice(alice); + const gasPrice = await scaledGasPrice(alice); await expect( alice.requestExecute({ - contractAddress: contextContract.address, + contractAddress: await contextContract.getAddress(), calldata, l2Value, - mintValue: isETHBasedChain ? ethers.BigNumber.from(0) : expectedL2Costs, + mintValue: isETHBasedChain ? 0n : expectedL2Costs, overrides: { gasPrice } @@ -111,14 +110,14 @@ describe('Tests for L1 behavior', () => { test('Should fail requested L1 execute', async () => { const calldata = errorContract.interface.encodeFunctionData('require_short', []); - const gasPrice = scaledGasPrice(alice); + const gasPrice = await scaledGasPrice(alice); await expect( alice.requestExecute({ - contractAddress: errorContract.address, + contractAddress: await errorContract.getAddress(), calldata, l2GasLimit: DEFAULT_L2_GAS_LIMIT, - mintValue: isETHBasedChain ? ethers.BigNumber.from(0) : expectedL2Costs, + mintValue: isETHBasedChain ? 0n : expectedL2Costs, overrides: { gasPrice } @@ -133,9 +132,9 @@ describe('Tests for L1 behavior', () => { const contract = new zksync.Contract(L1_MESSENGER_ADDRESS, L1_MESSENGER, alice); // Send message to L1 and wait until it gets there. - const message = ethers.utils.toUtf8Bytes('Some L2->L1 message'); + const message = ethers.toUtf8Bytes('Some L2->L1 message'); const tx = await contract.sendToL1(message); - const receipt = await tx.waitFinalize(); + const receipt = await (await alice.provider.getTransaction(tx.hash)).waitFinalize(); // Get the proof for the sent message from the server, expect it to exist. const l2ToL1LogIndex = receipt.l2ToL1Logs.findIndex( @@ -146,16 +145,16 @@ describe('Tests for L1 behavior', () => { // Ensure that received proof matches the provided root hash. const { id, proof, root } = msgProof!; - const accumutatedRoot = calculateAccumulatedRoot(alice.address, message, receipt.l1BatchTxIndex, id, proof); - expect(accumutatedRoot).toBe(root); + const accumulatedRoot = calculateAccumulatedRoot(alice.address, message, receipt.l1BatchTxIndex!, id, proof); + expect(accumulatedRoot).toBe(root); // Ensure that provided proof is accepted by the main ZKsync contract. const chainContract = await alice.getMainContract(); const acceptedByContract = await chainContract.proveL2MessageInclusion( - receipt.l1BatchNumber, + receipt.l1BatchNumber!, id, { - txNumberInBatch: receipt.l1BatchTxIndex, + txNumberInBatch: receipt.l1BatchTxIndex!, sender: alice.address, data: message }, @@ -165,15 +164,15 @@ describe('Tests for L1 behavior', () => { }); test('Should check max L2 gas limit for priority txs', async () => { - const gasPrice = scaledGasPrice(alice); + const gasPrice = await scaledGasPrice(alice); const l2GasLimit = maxL2GasLimitForPriorityTxs(testMaster.environment().priorityTxMaxGasLimit); // Check that the request with higher `gasLimit` fails. let priorityOpHandle = await alice.requestExecute({ contractAddress: alice.address, calldata: '0x', - l2GasLimit: l2GasLimit + 1, - mintValue: isETHBasedChain ? ethers.BigNumber.from(0) : expectedL2Costs, + l2GasLimit: l2GasLimit + 1n, + mintValue: isETHBasedChain ? 0n : expectedL2Costs, overrides: { gasPrice, gasLimit: 600_000 @@ -192,7 +191,7 @@ describe('Tests for L1 behavior', () => { contractAddress: alice.address, calldata: '0x', l2GasLimit, - mintValue: isETHBasedChain ? ethers.BigNumber.from(0) : expectedL2Costs, + mintValue: isETHBasedChain ? 0n : expectedL2Costs, overrides: { gasPrice } @@ -208,19 +207,19 @@ describe('Tests for L1 behavior', () => { } const contract = await deployContract(alice, contracts.writesAndMessages, []); - testMaster.reporter.debug(`Deployed 'writesAndMessages' contract at ${contract.address}`); + testMaster.reporter.debug(`Deployed 'writesAndMessages' contract at ${await contract.getAddress()}`); // The circuit allows us to have ~4700 initial writes for an L1 batch. // We check that we will run out of gas if we do a bit smaller amount of writes. const calldata = contract.interface.encodeFunctionData('writes', [0, 4500, 1]); - const gasPrice = scaledGasPrice(alice); + const gasPrice = await scaledGasPrice(alice); const l2GasLimit = maxL2GasLimitForPriorityTxs(testMaster.environment().priorityTxMaxGasLimit); const priorityOpHandle = await alice.requestExecute({ - contractAddress: contract.address, + contractAddress: await contract.getAddress(), calldata, l2GasLimit, - mintValue: isETHBasedChain ? ethers.BigNumber.from(0) : expectedL2Costs, + mintValue: isETHBasedChain ? 0n : expectedL2Costs, overrides: { gasPrice } @@ -243,13 +242,13 @@ describe('Tests for L1 behavior', () => { } const contract = await deployContract(alice, contracts.writesAndMessages, []); - testMaster.reporter.debug(`Deployed 'writesAndMessages' contract at ${contract.address}`); + testMaster.reporter.debug(`Deployed 'writesAndMessages' contract at ${await contract.getAddress()}`); // The circuit allows us to have ~7500 repeated writes for an L1 batch. // We check that we will run out of gas if we do a bit smaller amount of writes. // In order for writes to be repeated we should firstly write to the keys initially. const initialWritesInOneTx = 500; const repeatedWritesInOneTx = 8500; - const gasLimit = await contract.estimateGas.writes(0, initialWritesInOneTx, 1); + const gasLimit = await contract.writes.estimateGas(0, initialWritesInOneTx, 1); let proms = []; const nonce = await alice.getNonce(); @@ -268,14 +267,14 @@ describe('Tests for L1 behavior', () => { testMaster.reporter.debug('L1 batch sealed with write transactions'); const calldata = contract.interface.encodeFunctionData('writes', [0, repeatedWritesInOneTx, 2]); - const gasPrice = scaledGasPrice(alice); + const gasPrice = await scaledGasPrice(alice); const l2GasLimit = maxL2GasLimitForPriorityTxs(testMaster.environment().priorityTxMaxGasLimit); const priorityOpHandle = await alice.requestExecute({ - contractAddress: contract.address, + contractAddress: await contract.getAddress(), calldata, l2GasLimit, - mintValue: isETHBasedChain ? ethers.BigNumber.from(0) : expectedL2Costs, + mintValue: isETHBasedChain ? 0n : expectedL2Costs, overrides: { gasPrice } @@ -298,19 +297,19 @@ describe('Tests for L1 behavior', () => { } const contract = await deployContract(alice, contracts.writesAndMessages, []); - testMaster.reporter.debug(`Deployed 'writesAndMessages' contract at ${contract.address}`); + testMaster.reporter.debug(`Deployed 'writesAndMessages' contract at ${await contract.getAddress()}`); // The circuit allows us to have 512 L2->L1 logs for an L1 batch. // We check that we will run out of gas if we send a bit smaller amount of L2->L1 logs. const calldata = contract.interface.encodeFunctionData('l2_l1_messages', [1000]); - const gasPrice = scaledGasPrice(alice); + const gasPrice = await scaledGasPrice(alice); const l2GasLimit = maxL2GasLimitForPriorityTxs(testMaster.environment().priorityTxMaxGasLimit); const priorityOpHandle = await alice.requestExecute({ - contractAddress: contract.address, + contractAddress: await contract.getAddress(), calldata, l2GasLimit, - mintValue: isETHBasedChain ? ethers.BigNumber.from(0) : expectedL2Costs, + mintValue: isETHBasedChain ? 0n : expectedL2Costs, overrides: { gasPrice } @@ -336,21 +335,21 @@ describe('Tests for L1 behavior', () => { testMaster.reporter.debug(`Deployed 'writesAndMessages' contract at ${contract.address}`); const SYSTEM_CONFIG = require(`${testMaster.environment().pathToHome}/contracts/SystemConfig.json`); - const MAX_PUBDATA_PER_BATCH = ethers.BigNumber.from(SYSTEM_CONFIG['PRIORITY_TX_PUBDATA_PER_BATCH']); + const MAX_PUBDATA_PER_BATCH = BigInt(SYSTEM_CONFIG['PRIORITY_TX_PUBDATA_PER_BATCH']); // We check that we will run out of gas if we send a bit // smaller than `MAX_PUBDATA_PER_BATCH` amount of pubdata in a single tx. const calldata = contract.interface.encodeFunctionData('big_l2_l1_message', [ - MAX_PUBDATA_PER_BATCH.mul(9).div(10) + (MAX_PUBDATA_PER_BATCH * 9n) / 10n ]); - const gasPrice = scaledGasPrice(alice); + const gasPrice = await scaledGasPrice(alice); const l2GasLimit = maxL2GasLimitForPriorityTxs(testMaster.environment().priorityTxMaxGasLimit); const priorityOpHandle = await alice.requestExecute({ - contractAddress: contract.address, + contractAddress: await contract.getAddress(), calldata, l2GasLimit, - mintValue: isETHBasedChain ? ethers.BigNumber.from(0) : expectedL2Costs, + mintValue: isETHBasedChain ? 0n : expectedL2Costs, overrides: { gasPrice } @@ -386,29 +385,29 @@ function calculateAccumulatedRoot( for (const elem of proof) { const bytes = (idCopy & 1) == 0 - ? new Uint8Array([...ethers.utils.arrayify(accumutatedRoot), ...ethers.utils.arrayify(elem)]) - : new Uint8Array([...ethers.utils.arrayify(elem), ...ethers.utils.arrayify(accumutatedRoot)]); + ? new Uint8Array([...ethers.getBytes(accumutatedRoot), ...ethers.getBytes(elem)]) + : new Uint8Array([...ethers.getBytes(elem), ...ethers.getBytes(accumutatedRoot)]); - accumutatedRoot = ethers.utils.keccak256(bytes); + accumutatedRoot = ethers.keccak256(bytes); idCopy /= 2; } return accumutatedRoot; } -function maxL2GasLimitForPriorityTxs(maxGasBodyLimit: number): number { +function maxL2GasLimitForPriorityTxs(maxGasBodyLimit: bigint): bigint { // Find maximum `gasLimit` that satisfies `txBodyGasLimit <= CONTRACTS_PRIORITY_TX_MAX_GAS_LIMIT` // using binary search. const overhead = getOverheadForTransaction( // We can just pass 0 as `encodingLength` because the overhead for the transaction's slot // will be greater than `overheadForLength` for a typical transacction - ethers.BigNumber.from(0) + 0n ); return maxGasBodyLimit + overhead; } -function getOverheadForTransaction(encodingLength: ethers.BigNumber): number { - const TX_SLOT_OVERHEAD_GAS = 10_000; - const TX_LENGTH_BYTE_OVERHEAD_GAS = 10; +function getOverheadForTransaction(encodingLength: bigint): bigint { + const TX_SLOT_OVERHEAD_GAS = 10_000n; + const TX_LENGTH_BYTE_OVERHEAD_GAS = 10n; - return Math.max(TX_SLOT_OVERHEAD_GAS, TX_LENGTH_BYTE_OVERHEAD_GAS * encodingLength.toNumber()); + return bigIntMax(TX_SLOT_OVERHEAD_GAS, TX_LENGTH_BYTE_OVERHEAD_GAS * encodingLength); } diff --git a/core/tests/ts-integration/tests/mempool.test.ts b/core/tests/ts-integration/tests/mempool.test.ts index 6dacc54ac1fb..367e6569e88c 100644 --- a/core/tests/ts-integration/tests/mempool.test.ts +++ b/core/tests/ts-integration/tests/mempool.test.ts @@ -2,7 +2,7 @@ * This suite contains tests checking the mempool behavior: how transactions are inserted, * scheduled, processed and/or postponed. */ -import { TestMaster } from '../src/index'; +import { TestMaster } from '../src'; import * as zksync from 'zksync-ethers'; describe('Tests for the mempool behavior', () => { @@ -17,7 +17,7 @@ describe('Tests for the mempool behavior', () => { test('Should allow a nonce gap', async () => { // Here we check a basic case: first we send a transaction with nonce +1, then with valid nonce. // Both transactions should be processed. - const startNonce = await alice.getTransactionCount(); + const startNonce = await alice.getNonce(); const tx2 = await sendTxWithNonce(alice, startNonce + 1); const tx1 = await sendTxWithNonce(alice, startNonce); @@ -29,7 +29,7 @@ describe('Tests for the mempool behavior', () => { test('Should process shuffled nonces', async () => { // More complex nonce mixup: we send 5 txs completely out of order. - const startNonce = await alice.getTransactionCount(); + const startNonce = await alice.getNonce(); const nonceOffsets = [4, 0, 3, 1, 2]; const txs = nonceOffsets.map((offset) => sendTxWithNonce(alice, startNonce + offset).then((tx) => tx.wait())); @@ -41,23 +41,23 @@ describe('Tests for the mempool behavior', () => { }, 600000); test('Should discard too low nonce', async () => { - const startNonce = await alice.getTransactionCount(); + const startNonce = await alice.getNonce(); await expect(sendTxWithNonce(alice, startNonce - 1)).toBeRejected('nonce too low.'); }); test('Should discard too big nonce', async () => { const maxNonceAhead = 450; // Matches the server config. - const startNonce = await alice.getTransactionCount(); + const startNonce = await alice.getNonce(); await expect(sendTxWithNonce(alice, startNonce + maxNonceAhead + 1)).toBeRejected('nonce too high.'); }); test('Should correctly show pending nonce', async () => { - const startNonce = await alice.getTransactionCount(); + const startNonce = await alice.getNonce(); // Send tx with nonce + 1 const tx2 = await sendTxWithNonce(alice, startNonce + 1); // Nonce from API should not change (e.g. not become "nonce + 2"). - const nonce = await alice.getTransactionCount(); + const nonce = await alice.getNonce(); expect(nonce).toEqual(startNonce); // Finish both transactions to not ruin the flow for other tests. @@ -66,7 +66,7 @@ describe('Tests for the mempool behavior', () => { }); test('Should replace the transaction', async () => { - const startNonce = await alice.getTransactionCount(); + const startNonce = await alice.getNonce(); // Send tx with nonce + 1 const tx2 = await sendTxWithNonce(alice, startNonce + 1); await expect(alice.provider.getTransaction(tx2.hash)).resolves.toMatchObject({ @@ -102,7 +102,7 @@ describe('Tests for the mempool behavior', () => { const gasLimit = await alice.estimateGas({ to: alice.address }); const gasPrice = await alice.provider.getGasPrice(); - const fund = gasLimit.mul(gasPrice).mul(13).div(10); + const fund = (gasLimit * gasPrice * 13n) / 10n; await alice.sendTransaction({ to: poorBob.address, value: fund }).then((tx) => tx.wait()); // delayedTx should pass API checks (if not then error will be thrown on the next lime) @@ -146,7 +146,7 @@ describe('Tests for the mempool behavior', () => { * * @returns Transaction request object. */ -function sendTxWithNonce(wallet: zksync.Wallet, nonce: number, to?: string) { +function sendTxWithNonce(wallet: zksync.Wallet, nonce: number, to?: string): Promise { return wallet.sendTransaction({ to: to ?? wallet.address, value: 1, diff --git a/core/tests/ts-integration/tests/paymaster.test.ts b/core/tests/ts-integration/tests/paymaster.test.ts index 537035777553..8c9024dc4372 100644 --- a/core/tests/ts-integration/tests/paymaster.test.ts +++ b/core/tests/ts-integration/tests/paymaster.test.ts @@ -1,7 +1,7 @@ /** * This suite contains tests checking the behavior of paymasters -- entities that can cover fees for users. */ -import { TestMaster } from '../src/index'; +import { TestMaster } from '../src'; import * as zksync from 'zksync-ethers'; import { Provider, Wallet, utils, Contract } from 'zksync-ethers'; import * as ethers from 'ethers'; @@ -20,12 +20,12 @@ const contracts = { }; // The amount of tokens to transfer (in wei). -const AMOUNT = 1; +const AMOUNT = 1n; // Exchange ratios for each 1 ETH wei -const CUSTOM_PAYMASTER_RATE_NUMERATOR = ethers.BigNumber.from(5); -const TESTNET_PAYMASTER_RATE_NUMERATOR = ethers.BigNumber.from(1); -const PAYMASTER_RATE_DENOMINATOR = ethers.BigNumber.from(1); +const CUSTOM_PAYMASTER_RATE_NUMERATOR = 5n; +const TESTNET_PAYMASTER_RATE_NUMERATOR = 1n; +const PAYMASTER_RATE_DENOMINATOR = 1n; describe('Paymaster tests', () => { let testMaster: TestMaster; @@ -50,31 +50,30 @@ describe('Paymaster tests', () => { paymaster = await deployContract(alice, contracts.customPaymaster, []); // Supplying paymaster with ETH it would need to cover the fees for the user await alice - .transfer({ to: paymaster.address, amount: L2_DEFAULT_ETH_PER_ACCOUNT.div(4) }) + .transfer({ to: await paymaster.getAddress(), amount: L2_DEFAULT_ETH_PER_ACCOUNT / 4n }) .then((tx) => tx.wait()); }); test('Should pay fee with paymaster', async () => { paymaster = await deployContract(alice, contracts.customPaymaster, []); + const paymasterAddress = await paymaster.getAddress(); // Supplying paymaster with ETH it would need to cover the fees for the user - await alice - .transfer({ to: paymaster.address, amount: L2_DEFAULT_ETH_PER_ACCOUNT.div(4) }) - .then((tx) => tx.wait()); + await alice.transfer({ to: paymasterAddress, amount: L2_DEFAULT_ETH_PER_ACCOUNT / 4n }).then((tx) => tx.wait()); const correctSignature = new Uint8Array(46); const paymasterParamsForEstimation = await getTestPaymasterParamsForFeeEstimation( erc20, alice.address, - paymaster.address + paymasterAddress ); - const tx = await erc20.populateTransaction.transfer(alice.address, AMOUNT, { + const tx = await erc20.transfer.populateTransaction(alice.address, AMOUNT, { customData: { gasPerPubdata: utils.DEFAULT_GAS_PER_PUBDATA_LIMIT, paymasterParams: paymasterParamsForEstimation } }); - tx.gasLimit = await erc20.estimateGas.transfer(alice.address, AMOUNT, { + tx.gasLimit = await erc20.transfer.estimateGas(alice.address, AMOUNT, { customData: { gasPerPubdata: utils.DEFAULT_GAS_PER_PUBDATA_LIMIT, paymasterParams: paymasterParamsForEstimation @@ -82,17 +81,17 @@ describe('Paymaster tests', () => { }); const txPromise = sendTxWithTestPaymasterParams( - tx, + tx as zksync.types.Transaction, alice.provider, alice, - paymaster.address, + paymasterAddress, erc20Address, correctSignature, testMaster.environment().l2ChainId ); await expect(txPromise).toBeAccepted([ checkReceipt( - (receipt) => paidFeeWithPaymaster(receipt, CUSTOM_PAYMASTER_RATE_NUMERATOR, paymaster.address), + (receipt) => paidFeeWithPaymaster(receipt, CUSTOM_PAYMASTER_RATE_NUMERATOR, paymasterAddress), 'Fee was not paid (or paid incorrectly)' ) ]); @@ -100,19 +99,20 @@ describe('Paymaster tests', () => { test('Should call postOp of the paymaster', async () => { const correctSignature = new Uint8Array(46); + const paymasterAddress = await paymaster.getAddress(); const paymasterParamsForEstimation = await getTestPaymasterParamsForFeeEstimation( erc20, alice.address, - paymaster.address + paymasterAddress ); - const tx = await erc20.populateTransaction.transfer(alice.address, AMOUNT, { + const tx = await erc20.transfer.populateTransaction(alice.address, AMOUNT, { customData: { gasPerPubdata: utils.DEFAULT_GAS_PER_PUBDATA_LIMIT, paymasterParams: paymasterParamsForEstimation } }); - tx.gasLimit = await erc20.estimateGas.transfer(alice.address, AMOUNT, { + tx.gasLimit = await erc20.transfer.estimateGas(alice.address, AMOUNT, { customData: { gasPerPubdata: utils.DEFAULT_GAS_PER_PUBDATA_LIMIT, paymasterParams: paymasterParamsForEstimation @@ -121,21 +121,21 @@ describe('Paymaster tests', () => { // We add 300k gas to make sure that the postOp is successfully called // Note, that the successful call of the postOp is not guaranteed by the protocol & // should not be required from the users. We still do it here for the purpose of the test. - tx.gasLimit = tx.gasLimit!.add(300000); + tx.gasLimit = tx.gasLimit! + 300000n; testMaster.environment().l2ChainId; const txPromise = sendTxWithTestPaymasterParams( - tx, + tx as zksync.types.Transaction, alice.provider, alice, - paymaster.address, + paymasterAddress, erc20Address, correctSignature, testMaster.environment().l2ChainId ); await expect(txPromise).toBeAccepted([ checkReceipt( - (receipt) => paidFeeWithPaymaster(receipt, CUSTOM_PAYMASTER_RATE_NUMERATOR, paymaster.address), + (receipt) => paidFeeWithPaymaster(receipt, CUSTOM_PAYMASTER_RATE_NUMERATOR, paymasterAddress), 'Fee was not paid (or paid incorrectly)' ) ]); @@ -155,11 +155,9 @@ describe('Paymaster tests', () => { expect(testnetPaymaster).toBeTruthy(); // Supplying paymaster with ETH it would need to cover the fees for the user - await alice - .transfer({ to: testnetPaymaster, amount: L2_DEFAULT_ETH_PER_ACCOUNT.div(4) }) - .then((tx) => tx.wait()); + await alice.transfer({ to: testnetPaymaster, amount: L2_DEFAULT_ETH_PER_ACCOUNT / 4n }).then((tx) => tx.wait()); - const tx = await erc20.populateTransaction.transfer(alice.address, AMOUNT); + const tx = await erc20.transfer.populateTransaction(alice.address, AMOUNT); const gasPrice = await alice.provider.getGasPrice(); const aliceERC20Balance = await erc20.balanceOf(alice.address); @@ -168,7 +166,7 @@ describe('Paymaster tests', () => { // For transaction estimation we provide the paymasterInput with large // minimalAllowance. It is safe for the end users, since the transaction is never // actually signed. - minimalAllowance: aliceERC20Balance.sub(AMOUNT), + minimalAllowance: aliceERC20Balance - AMOUNT, token: erc20Address, // While the "correct" paymaster signature may not be available in the true mainnet // paymasters, it is accessible in this test to make the test paymaster simpler. @@ -176,13 +174,13 @@ describe('Paymaster tests', () => { // to cover the fee for him. innerInput: new Uint8Array() }); - const gasLimit = await erc20.estimateGas.transfer(alice.address, AMOUNT, { + const gasLimit = await erc20.transfer.estimateGas(alice.address, AMOUNT, { customData: { gasPerPubdata: utils.DEFAULT_GAS_PER_PUBDATA_LIMIT, paymasterParams: paramsForFeeEstimation } }); - const fee = gasPrice.mul(gasLimit); + const fee = gasPrice * gasLimit; const paymasterParams = utils.getPaymasterParams(testnetPaymaster, { type: 'ApprovalBased', @@ -208,18 +206,19 @@ describe('Paymaster tests', () => { }); test('Should reject tx with invalid paymaster input', async () => { + const paymasterAddress = await paymaster.getAddress(); const paymasterParamsForEstimation = await getTestPaymasterParamsForFeeEstimation( erc20, alice.address, - paymaster.address + paymasterAddress ); - const tx = await erc20.populateTransaction.transfer(alice.address, AMOUNT, { + const tx = await erc20.transfer.populateTransaction(alice.address, AMOUNT, { customData: { gasPerPubdata: utils.DEFAULT_GAS_PER_PUBDATA_LIMIT, paymasterParams: paymasterParamsForEstimation } }); - tx.gasLimit = await erc20.estimateGas.transfer(alice.address, AMOUNT, { + tx.gasLimit = await erc20.transfer.estimateGas(alice.address, AMOUNT, { customData: { gasPerPubdata: utils.DEFAULT_GAS_PER_PUBDATA_LIMIT, paymasterParams: paymasterParamsForEstimation @@ -229,10 +228,10 @@ describe('Paymaster tests', () => { const incorrectSignature = new Uint8Array(45); await expect( sendTxWithTestPaymasterParams( - tx, + tx as zksync.types.Transaction, alice.provider, alice, - paymaster.address, + paymasterAddress, erc20Address, incorrectSignature, testMaster.environment().l2ChainId @@ -240,22 +239,23 @@ describe('Paymaster tests', () => { ).toBeRejected('Paymaster validation error'); }); - it('Should deploy nonce-check paymaster and not fail validation', async function () { + test('Should deploy nonce-check paymaster and not fail validation', async function () { const deployer = new Deployer(hre as any, alice as any); const paymaster = await deployPaymaster(deployer); + const paymasterAddress = await paymaster.getAddress(); const token = testMaster.environment().erc20Token; await ( await deployer.zkWallet.sendTransaction({ - to: paymaster.address, - value: ethers.utils.parseEther('0.01') + to: paymasterAddress, + value: ethers.parseEther('0.01') }) ).wait(); - const paymasterParams = utils.getPaymasterParams(paymaster.address, { + const paymasterParams = utils.getPaymasterParams(paymasterAddress, { type: 'ApprovalBased', token: token.l2Address, - minimalAllowance: ethers.BigNumber.from(1), + minimalAllowance: 1n, innerInput: new Uint8Array() }); @@ -281,7 +281,16 @@ describe('Paymaster tests', () => { } }); - await expect(bobTx).toBeRejected('Nonce is zerooo'); + /* + Ethers v6 error handling is not capable of handling this format of messages. + See: https://github.com/ethers-io/ethers.js/blob/main/src.ts/providers/provider-jsonrpc.ts#L976 + { + "code": 3, + "message": "failed paymaster validation. error message: Nonce is zerooo", + "data": "0x" + } + */ + await expect(bobTx).toBeRejected(/*'Nonce is zerooo'*/); const aliceTx2 = alice.transfer({ to: alice.address, @@ -304,13 +313,13 @@ describe('Paymaster tests', () => { }); /** - * Matcher modifer that checks if the fee was paid with the paymaster. + * Matcher modifier that checks if the fee was paid with the paymaster. * It only checks the receipt logs and assumes that logs are correct (e.g. if event is present, tokens were moved). * Assumption is that other tests ensure this invariant. */ function paidFeeWithPaymaster( receipt: zksync.types.TransactionReceipt, - ratioNumerator: ethers.BigNumber, + ratioNumerator: bigint, paymaster: string ): boolean { const errorMessage = (line: string) => { @@ -342,11 +351,11 @@ function paidFeeWithPaymaster( // Find the log showing that the fee in ERC20 was taken from the user. // We need to pad values to represent 256-bit value. - const fromAccountAddress = ethers.utils.hexZeroPad(ethers.utils.arrayify(receipt.from), 32); - const paddedAmount = ethers.utils.hexZeroPad(ethers.utils.arrayify(expectedErc20Fee), 32); - const paddedPaymaster = ethers.utils.hexZeroPad(ethers.utils.arrayify(paymaster), 32); + const fromAccountAddress = ethers.zeroPadValue(receipt.from, 32); + const paddedAmount = ethers.toBeHex(expectedErc20Fee, 32); + const paddedPaymaster = ethers.zeroPadValue(paymaster, 32); // ERC20 fee log is one that sends money to the paymaster. - const erc20TransferTopic = ethers.utils.id('Transfer(address,address,uint256)'); + const erc20TransferTopic = ethers.id('Transfer(address,address,uint256)'); const erc20FeeLog = receipt.logs.find((log) => { return ( log.topics.length == 3 && @@ -365,7 +374,7 @@ function paidFeeWithPaymaster( return true; } -function getTestPaymasterFeeInToken(feeInEth: ethers.BigNumber, numerator: ethers.BigNumber) { +function getTestPaymasterFeeInToken(feeInEth: bigint, numerator: bigint) { // The number of ETH that the paymaster agrees to swap is equal to // tokenAmount * exchangeRateNumerator / exchangeRateDenominator // @@ -374,11 +383,11 @@ function getTestPaymasterFeeInToken(feeInEth: ethers.BigNumber, numerator: ether // tokenAmount = ceil(feeInEth * exchangeRateDenominator / exchangeRateNumerator) // for easier ceiling we do the following: // tokenAmount = (ethNeeded * exchangeRateDenominator + exchangeRateNumerator - 1) / exchangeRateNumerator - return feeInEth.mul(PAYMASTER_RATE_DENOMINATOR).add(numerator).sub(1).div(numerator); + return (feeInEth * PAYMASTER_RATE_DENOMINATOR + numerator - 1n) / numerator; } -function getTestPaymasterInnerInput(signature: ethers.BytesLike, tokenAmount: ethers.BigNumber) { - const abiEncoder = new ethers.utils.AbiCoder(); +function getTestPaymasterInnerInput(signature: ethers.BytesLike, tokenAmount: bigint) { + const abiEncoder = new ethers.AbiCoder(); return abiEncoder.encode( ['bytes', 'uint256', 'uint256', 'uint256'], [signature, CUSTOM_PAYMASTER_RATE_NUMERATOR, PAYMASTER_RATE_DENOMINATOR, tokenAmount] @@ -401,21 +410,16 @@ async function getTestPaymasterParamsForFeeEstimation( // minimalAllowance. It is safe for the end users, since the transaction is never // actually signed. minimalAllowance: aliceERC20Balance, - token: erc20.address, + token: await erc20.getAddress(), // The amount that is passed does not matter, since the testnet paymaster does not enforce it // to cover the fee for him. - innerInput: getTestPaymasterInnerInput(correctSignature, ethers.BigNumber.from(1)) + innerInput: getTestPaymasterInnerInput(correctSignature, 1n) }); return paramsForFeeEstimation; } -function getTestPaymasterParams( - paymaster: string, - token: string, - ethNeeded: ethers.BigNumber, - signature: ethers.BytesLike -) { +function getTestPaymasterParams(paymaster: string, token: string, ethNeeded: bigint, signature: ethers.BytesLike) { const tokenAmount = getTestPaymasterFeeInToken(ethNeeded, CUSTOM_PAYMASTER_RATE_NUMERATOR); // The input to the tester paymaster const innerInput = getTestPaymasterInnerInput(signature, tokenAmount); @@ -429,23 +433,23 @@ function getTestPaymasterParams( } async function sendTxWithTestPaymasterParams( - tx: ethers.PopulatedTransaction, - web3Provider: Provider, + tx: zksync.types.Transaction, + browserProvider: Provider, sender: Wallet, paymasterAddress: string, token: string, paymasterSignature: ethers.BytesLike, - l2ChainId: number + l2ChainId: bigint ) { - const gasPrice = await web3Provider.getGasPrice(); + const gasPrice = await browserProvider.getGasPrice(); tx.gasPrice = gasPrice; tx.chainId = l2ChainId; - tx.value = ethers.BigNumber.from(0); - tx.nonce = await web3Provider.getTransactionCount(sender.address); + tx.value = 0n; + tx.nonce = await browserProvider.getTransactionCount(sender.address); tx.type = 113; - const ethNeeded = tx.gasLimit!.mul(gasPrice); + const ethNeeded = tx.gasLimit! * gasPrice; const paymasterParams = getTestPaymasterParams(paymasterAddress, token, ethNeeded, paymasterSignature); tx.customData = { @@ -454,7 +458,7 @@ async function sendTxWithTestPaymasterParams( paymasterParams }; const signedTx = await sender.signTransaction(tx); - return await web3Provider.sendTransaction(signedTx); + return await browserProvider.broadcastTransaction(signedTx); } async function deployPaymaster(deployer: Deployer): Promise { diff --git a/core/tests/ts-integration/tests/self-unit.test.ts b/core/tests/ts-integration/tests/self-unit.test.ts deleted file mode 100644 index 50655e7c2c73..000000000000 --- a/core/tests/ts-integration/tests/self-unit.test.ts +++ /dev/null @@ -1,39 +0,0 @@ -/** - * This file contains unit tests for the framework itself. - * It does not receive a funced account and should not interact with the ZKsync server. - */ -import { TestMaster } from '../src/index'; -import { BigNumber } from 'ethers'; - -describe('Common checks for library invariants', () => { - test('Should not have a test master', () => { - // Should not receive a test account in the unit tests file. - expect(() => TestMaster.getInstance(__filename)).toThrow('Wallet for self-unit.test.ts suite was not provided'); - }); - - test('BigNumber matchers should work', () => { - const hundred = BigNumber.from(100); - - // gt - expect(hundred).bnToBeGt(0); - expect(hundred).not.bnToBeGt(100); - - // gte - expect(hundred).bnToBeGte(0); - expect(hundred).bnToBeGte(100); - expect(hundred).not.bnToBeGte(200); - - // eq - expect(hundred).bnToBeEq(100); - expect(hundred).not.bnToBeEq(200); - - // lte - expect(hundred).not.bnToBeLte(90); - expect(hundred).bnToBeLte(100); - expect(hundred).bnToBeLte(101); - - // lt - expect(hundred).not.bnToBeLt(100); - expect(hundred).bnToBeLt(101); - }); -}); diff --git a/core/tests/ts-integration/tests/system.test.ts b/core/tests/ts-integration/tests/system.test.ts index 2934226eed8f..3c09bcb7b466 100644 --- a/core/tests/ts-integration/tests/system.test.ts +++ b/core/tests/ts-integration/tests/system.test.ts @@ -5,16 +5,15 @@ * Stuff related to the edge cases, bootloader and system contracts normally expected to go here. */ -import { TestMaster } from '../src/index'; +import { TestMaster } from '../src'; import { shouldChangeTokenBalances } from '../src/modifiers/balance-checker'; import { L2_DEFAULT_ETH_PER_ACCOUNT } from '../src/context-owner'; import * as zksync from 'zksync-ethers'; import * as ethers from 'ethers'; -import { BigNumberish, BytesLike } from 'ethers'; -import { hashBytecode, serialize } from 'zksync-ethers/build/utils'; import { SYSTEM_CONTEXT_ADDRESS, getTestContract } from '../src/helpers'; import { DataAvailabityMode } from '../src/types'; +import { BigNumberish } from 'ethers'; const contracts = { counter: getTestContract('Counter'), @@ -60,9 +59,9 @@ describe('System behavior checks', () => { ); const sender = zksync.Wallet.createRandom().address; - const hash = ethers.utils.randomBytes(32); - const salt = ethers.utils.randomBytes(32); - const input = ethers.utils.randomBytes(128); + const hash = ethers.randomBytes(32); + const salt = ethers.randomBytes(32); + const input = ethers.randomBytes(128); const nonce = 5; const create2AddressBySDK = zksync.utils.create2Address(sender, hash, salt, input); @@ -76,7 +75,7 @@ describe('System behavior checks', () => { test('Should accept transactions with small gasPerPubdataByte', async () => { const smallGasPerPubdata = 1; - const senderNonce = await alice.getTransactionCount(); + const senderNonce = await alice.getNonce(); // A safe low value to determine whether we can run this test. // It's higher than `smallGasPerPubdata` to not make the test flaky. @@ -103,7 +102,7 @@ describe('System behavior checks', () => { } }); - test('Should check that bootloader utils: Legacy tx hash', async () => { + test('Should check bootloader utils: Legacy tx hash', async () => { const bootloaderUtils = bootloaderUtilsContract(); // Testing the correctness of calculating the legacy tx hashes @@ -116,12 +115,12 @@ describe('System behavior checks', () => { gasLimit: 50000 }); const txBytes = await alice.signTransaction(legacyTx); - const parsedTx = zksync.utils.parseTransaction(txBytes); + const parsedTx = ethers.Transaction.from(txBytes); + const txData = signedTxToTransactionData(parsedTx)!; const expectedTxHash = parsedTx.hash; - delete legacyTx.from; - const expectedSignedHash = ethers.utils.keccak256(serialize(legacyTx)); + const expectedSignedHash = ethers.keccak256(parsedTx.unsignedSerialized); const proposedHashes = await bootloaderUtils.getTransactionHashes(txData); expect(proposedHashes.txHash).toEqual(expectedTxHash); @@ -142,12 +141,12 @@ describe('System behavior checks', () => { gasPrice: 55000 }); const signedEip2930Tx = await alice.signTransaction(eip2930Tx); - const parsedEIP2930tx = zksync.utils.parseTransaction(signedEip2930Tx); + const parsedEIP2930tx = ethers.Transaction.from(signedEip2930Tx); const EIP2930TxData = signedTxToTransactionData(parsedEIP2930tx)!; - delete eip2930Tx.from; + const expectedEIP2930TxHash = parsedEIP2930tx.hash; - const expectedEIP2930SignedHash = ethers.utils.keccak256(serialize(eip2930Tx)); + const expectedEIP2930SignedHash = ethers.keccak256(parsedEIP2930tx.unsignedSerialized); const proposedEIP2930Hashes = await bootloaderUtils.getTransactionHashes(EIP2930TxData); expect(proposedEIP2930Hashes.txHash).toEqual(expectedEIP2930TxHash); @@ -168,12 +167,12 @@ describe('System behavior checks', () => { maxPriorityFeePerGas: 100 }); const signedEip1559Tx = await alice.signTransaction(eip1559Tx); - const parsedEIP1559tx = zksync.utils.parseTransaction(signedEip1559Tx); + const parsedEIP1559tx = ethers.Transaction.from(signedEip1559Tx); const EIP1559TxData = signedTxToTransactionData(parsedEIP1559tx)!; - delete eip1559Tx.from; + const expectedEIP1559TxHash = parsedEIP1559tx.hash; - const expectedEIP1559SignedHash = ethers.utils.keccak256(serialize(eip1559Tx)); + const expectedEIP1559SignedHash = ethers.keccak256(parsedEIP1559tx.unsignedSerialized); const proposedEIP1559Hashes = await bootloaderUtils.getTransactionHashes(EIP1559TxData); expect(proposedEIP1559Hashes.txHash).toEqual(expectedEIP1559TxHash); @@ -195,7 +194,7 @@ describe('System behavior checks', () => { } }); const signedEip712Tx = await alice.signTransaction(eip712Tx); - const parsedEIP712tx = zksync.utils.parseTransaction(signedEip712Tx); + const parsedEIP712tx = zksync.utils.parseEip712(signedEip712Tx); const eip712TxData = signedTxToTransactionData(parsedEIP712tx)!; const expectedEIP712TxHash = parsedEIP712tx.hash; @@ -217,13 +216,17 @@ describe('System behavior checks', () => { const l2Token = testMaster.environment().erc20Token.l2Address; const l1Token = testMaster.environment().erc20Token.l1Address; - const amount = 1; + const amount = 1n; // Fund Bob's account. await alice.transfer({ amount, to: bob.address, token: l2Token }).then((tx) => tx.wait()); testMaster.reporter.debug('Sent L2 token to Bob'); await alice - .transfer({ amount: L2_DEFAULT_ETH_PER_ACCOUNT.div(8), to: bob.address, token: zksync.utils.ETH_ADDRESS }) + .transfer({ + amount: L2_DEFAULT_ETH_PER_ACCOUNT / 8n, + to: bob.address, + token: zksync.utils.L2_BASE_TOKEN_ADDRESS + }) .then((tx) => tx.wait()); testMaster.reporter.debug('Sent ethereum on L2 to Bob'); @@ -248,9 +251,9 @@ describe('System behavior checks', () => { testMaster.reporter.debug( `Obtained withdrawal receipt for Bob: blockNumber=${bobReceipt.blockNumber}, l1BatchNumber=${bobReceipt.l1BatchNumber}, status=${bobReceipt.status}` ); - await expect(alice.finalizeWithdrawal(aliceReceipt.transactionHash)).toBeAccepted([aliceChange]); + await expect(alice.finalizeWithdrawal(aliceReceipt.hash)).toBeAccepted([aliceChange]); testMaster.reporter.debug('Finalized withdrawal for Alice'); - await expect(alice.finalizeWithdrawal(bobReceipt.transactionHash)).toBeAccepted([bobChange]); + await expect(alice.finalizeWithdrawal(bobReceipt.hash)).toBeAccepted([bobChange]); testMaster.reporter.debug('Finalized withdrawal for Bob'); }); @@ -263,18 +266,20 @@ describe('System behavior checks', () => { const l2Token = testMaster.environment().erc20Token.l2Address; const l1Token = testMaster.environment().erc20Token.l1Address; - const amount = 1; + const amount = 1n; // Prepare matcher modifiers. These modifiers would record the *current* Alice's balance, so after // the first finalization the diff would be (compared to now) `amount`, and after the second -- `amount*2`. const change1 = await shouldChangeTokenBalances(l1Token, [{ wallet: alice, change: amount }], { l1: true }); - const change2 = await shouldChangeTokenBalances(l1Token, [{ wallet: alice, change: amount * 2 }], { l1: true }); + const change2 = await shouldChangeTokenBalances(l1Token, [{ wallet: alice, change: amount * 2n }], { + l1: true + }); testMaster.reporter.debug('Prepared token balance modifiers'); // Maximize chances of including transactions into the same block by first creating both promises // and only then awaiting them. This is still probabilistic though: if this test becomes flaky, // most likely there exists a very big problem in the system. - const nonce = await alice.getTransactionCount(); + const nonce = await alice.getNonce(); testMaster.reporter.debug(`Obtained Alice's nonce: ${nonce}`); const withdrawal1 = alice .withdraw({ token: l2Token, amount, overrides: { nonce } }) @@ -290,18 +295,18 @@ describe('System behavior checks', () => { testMaster.reporter.debug( `Obtained withdrawal receipt #2: blockNumber=${receipt2.blockNumber}, l1BatchNumber=${receipt2.l1BatchNumber}, status=${receipt2.status}` ); - await expect(alice.finalizeWithdrawal(receipt1.transactionHash)).toBeAccepted([change1]); + await expect(alice.finalizeWithdrawal(receipt1.hash)).toBeAccepted([change1]); testMaster.reporter.debug('Finalized withdrawal #1'); - await expect(alice.finalizeWithdrawal(receipt2.transactionHash)).toBeAccepted([change2]); + await expect(alice.finalizeWithdrawal(receipt2.hash)).toBeAccepted([change2]); testMaster.reporter.debug('Finalized withdrawal #2'); }); test('should accept transaction with duplicated factory dep', async () => { const bytecode = contracts.counter.bytecode; // We need some bytecodes that weren't deployed before to test behavior properly. - const dep1 = ethers.utils.hexConcat([bytecode, ethers.utils.randomBytes(64)]); - const dep2 = ethers.utils.hexConcat([bytecode, ethers.utils.randomBytes(64)]); - const dep3 = ethers.utils.hexConcat([bytecode, ethers.utils.randomBytes(64)]); + const dep1 = ethers.concat([bytecode, ethers.randomBytes(64)]); + const dep2 = ethers.concat([bytecode, ethers.randomBytes(64)]); + const dep3 = ethers.concat([bytecode, ethers.randomBytes(64)]); await expect( alice.sendTransaction({ to: alice.address, @@ -320,34 +325,40 @@ describe('System behavior checks', () => { // The current gas per pubdata depends on a lot of factors, so it wouldn't be sustainable to check the exact value. // We'll just check that it is greater than zero. if (testMaster.environment().l1BatchCommitDataGeneratorMode === DataAvailabityMode.Rollup) { - expect(currentGasPerPubdata.toNumber()).toBeGreaterThan(0); + expect(currentGasPerPubdata).toBeGreaterThan(0n); } else { - expect(currentGasPerPubdata.toNumber()).toEqual(0); + expect(currentGasPerPubdata).toEqual(0n); } }); it('should reject transaction with huge gas limit', async () => { - await expect( - alice.sendTransaction({ to: alice.address, gasLimit: ethers.BigNumber.from(2).pow(51) }) - ).toBeRejected('exceeds block gas limit'); + await expect(alice.sendTransaction({ to: alice.address, gasLimit: 2n ** 51n })).toBeRejected( + 'exceeds block gas limit' + ); }); it('Create2Factory should work', async () => { // For simplicity, we'll just deploy a contract factory - const salt = ethers.utils.randomBytes(32); + const salt = ethers.randomBytes(32); const bytecode = await alice.provider.getCode(BUILTIN_CREATE2_FACTORY_ADDRESS); const abi = getTestContract('ICreate2Factory').abi; - const hash = hashBytecode(bytecode); + const hash = zksync.utils.hashBytecode(bytecode); const contractFactory = new ethers.Contract(BUILTIN_CREATE2_FACTORY_ADDRESS, abi, alice); - const deploymentTx = await (await contractFactory.create2(salt, hash, [])).wait(); + const deploymentTx = await (await contractFactory.create2(salt, hash, new Uint8Array(0))).wait(); const deployedAddresses = zksync.utils.getDeployedContracts(deploymentTx); expect(deployedAddresses.length).toEqual(1); const deployedAddress = deployedAddresses[0]; - const correctCreate2Address = zksync.utils.create2Address(contractFactory.address, hash, salt, []); + const contractFactoryAddress = await contractFactory.getAddress(); + const correctCreate2Address = zksync.utils.create2Address( + contractFactoryAddress, + hash, + salt, + new Uint8Array(0) + ); expect(deployedAddress.deployedAddress.toLocaleLowerCase()).toEqual(correctCreate2Address.toLocaleLowerCase()); expect(await alice.provider.getCode(deployedAddress.deployedAddress)).toEqual(bytecode); @@ -359,7 +370,7 @@ describe('System behavior checks', () => { function bootloaderUtilsContract() { const BOOTLOADER_UTILS_ADDRESS = '0x000000000000000000000000000000000000800c'; - const BOOTLOADER_UTILS = new ethers.utils.Interface( + const BOOTLOADER_UTILS = new ethers.Interface( require(`${ testMaster.environment().pathToHome }/contracts/system-contracts/artifacts-zk/contracts-preprocessed/BootloaderUtilities.sol/BootloaderUtilities.json`).abi @@ -390,29 +401,16 @@ export interface TransactionData { // it would allow easier proof integration (in case we will need // some special circuit for preprocessing transactions). reserved: BigNumberish[]; - data: BytesLike; - signature: BytesLike; - factoryDeps: BytesLike[]; - paymasterInput: BytesLike; + data: ethers.BytesLike; + signature: ethers.BytesLike; + factoryDeps: ethers.BytesLike[]; + paymasterInput: ethers.BytesLike; // Reserved dynamic type for the future use-case. Using it should be avoided, // But it is still here, just in case we want to enable some additional functionality. - reservedDynamic: BytesLike; + reservedDynamic: ethers.BytesLike; } -function signedTxToTransactionData(tx: ethers.Transaction) { - // Transform legacy transaction's `v` part of the signature - // to a single byte used in the packed eth signature - function unpackV(v: number) { - if (v >= 35) { - const chainId = Math.floor((v - 35) / 2); - return v - chainId * 2 - 8; - } else if (v <= 1) { - return 27 + v; - } - - throw new Error('Invalid `v`'); - } - +function signedTxToTransactionData(tx: ethers.TransactionLike) { function legacyTxToTransactionData(tx: any): TransactionData { return { txType: 0, @@ -427,7 +425,7 @@ function signedTxToTransactionData(tx: ethers.Transaction) { value: tx.value || 0, reserved: [tx.chainId || 0, 0, 0, 0], data: tx.data!, - signature: ethers.utils.hexConcat([tx.r, tx.s, new Uint8Array([unpackV(tx.v)])]), + signature: tx.signature.serialized, factoryDeps: [], paymasterInput: '0x', reservedDynamic: '0x' @@ -448,7 +446,7 @@ function signedTxToTransactionData(tx: ethers.Transaction) { value: tx.value || 0, reserved: [0, 0, 0, 0], data: tx.data!, - signature: ethers.utils.hexConcat([tx.r, tx.s, unpackV(tx.v)]), + signature: tx.signature.serialized, factoryDeps: [], paymasterInput: '0x', reservedDynamic: '0x' @@ -469,7 +467,7 @@ function signedTxToTransactionData(tx: ethers.Transaction) { value: tx.value || 0, reserved: [0, 0, 0, 0], data: tx.data!, - signature: ethers.utils.hexConcat([tx.r, tx.s, unpackV(tx.v)]), + signature: tx.signature.serialized, factoryDeps: [], paymasterInput: '0x', reservedDynamic: '0x' @@ -491,7 +489,7 @@ function signedTxToTransactionData(tx: ethers.Transaction) { reserved: [0, 0, 0, 0], data: tx.data!, signature: tx.customData.customSignature, - factoryDeps: tx.customData.factoryDeps.map(hashBytecode), + factoryDeps: tx.customData.factoryDeps.map(zksync.utils.hashBytecode), paymasterInput: tx.customData.paymasterParams?.paymasterInput || '0x', reservedDynamic: '0x' }; diff --git a/core/tests/ts-integration/tsconfig.json b/core/tests/ts-integration/tsconfig.json index baf2b2d0a791..e8a4c8ca30c4 100644 --- a/core/tests/ts-integration/tsconfig.json +++ b/core/tests/ts-integration/tsconfig.json @@ -1,6 +1,6 @@ { "compilerOptions": { - "target": "es2019", + "target": "es2020", "module": "commonjs", "esModuleInterop": true, "strict": true, diff --git a/core/tests/ts-integration/typings/jest.d.ts b/core/tests/ts-integration/typings/jest.d.ts index 9a15e4516aab..4d8f1c3530c5 100644 --- a/core/tests/ts-integration/typings/jest.d.ts +++ b/core/tests/ts-integration/typings/jest.d.ts @@ -1,4 +1,3 @@ -import { BigNumberish } from 'ethers'; import { MatcherModifier } from '../src/matchers/transaction-modifiers'; export declare global { @@ -17,44 +16,6 @@ export declare global { */ fail(message: string): R; - // BigNumber matchers - - /** - * Checks if initial number is greater than the provided one. - * - * @param r Number to be checked against. - * @param additionalInfo Optional message to be included if test fails. - */ - bnToBeGt(r: BigNumberish, additionalInfo?: string): R; - /** - * Checks if initial number is greater than or equal to the provided one. - * - * @param r Number to be checked against. - * @param additionalInfo Optional message to be included if test fails. - */ - bnToBeGte(r: BigNumberish, additionalInfo?: string): R; - /** - * Checks if initial number is equals the provided one. - * - * @param r Number to be checked against. - * @param additionalInfo Optional message to be included if test fails. - */ - bnToBeEq(r: BigNumberish, additionalInfo?: string): R; - /** - * Checks if initial number is less than the provided one. - * - * @param r Number to be checked against. - * @param additionalInfo Optional message to be included if test fails. - */ - bnToBeLt(r: BigNumberish, additionalInfo?: string): R; - /** - * Checks if initial number is less than or equal to the provided one. - * - * @param r Number to be checked against. - * @param additionalInfo Optional message to be included if test fails. - */ - bnToBeLte(r: BigNumberish, additionalInfo?: string): R; - // Ethereum primitives matchers /** diff --git a/core/tests/upgrade-test/package.json b/core/tests/upgrade-test/package.json index 834056b9bcba..5bb23c36d3b8 100644 --- a/core/tests/upgrade-test/package.json +++ b/core/tests/upgrade-test/package.json @@ -23,14 +23,13 @@ "@types/node-fetch": "^2.5.7", "chai": "^4.3.4", "chai-as-promised": "^7.1.1", - "ethereumjs-abi": "^0.6.8", - "ethers": "~5.7.0", + "ethers": "^6.7.1", "mocha": "^9.0.2", "mocha-steps": "^1.3.0", "node-fetch": "^2.6.1", "ts-node": "^10.1.0", "typescript": "^4.3.5", - "zksync-ethers": "5.8.0-beta.5" + "zksync-ethers": "^6.9.0" }, "dependencies": { "prettier": "^2.3.2" diff --git a/core/tests/upgrade-test/tests/tester.ts b/core/tests/upgrade-test/tests/tester.ts index bd92b64eec7f..62bf21cd6949 100644 --- a/core/tests/upgrade-test/tests/tester.ts +++ b/core/tests/upgrade-test/tests/tester.ts @@ -6,10 +6,10 @@ import * as path from 'path'; type Network = string; export class Tester { - public runningFee: Map; + public runningFee: Map; constructor( public network: Network, - public ethProvider: ethers.providers.Provider, + public ethProvider: ethers.Provider, public ethWallet: ethers.Wallet, public syncWallet: zksync.Wallet, public web3Provider: zksync.Provider @@ -19,7 +19,7 @@ export class Tester { // prettier-ignore static async init(network: Network) { - const ethProvider = new ethers.providers.JsonRpcProvider(process.env.L1_RPC_ADDRESS || process.env.ETH_CLIENT_WEB3_URL); + const ethProvider = new ethers.JsonRpcProvider(process.env.L1_RPC_ADDRESS || process.env.ETH_CLIENT_WEB3_URL); let ethWallet; if (network == 'localhost') { @@ -27,10 +27,11 @@ export class Tester { const testConfigPath = path.join(process.env.ZKSYNC_HOME!, `etc/test_config/constant`); const ethTestConfig = JSON.parse(fs.readFileSync(`${testConfigPath}/eth.json`, { encoding: 'utf-8' })); - ethWallet = ethers.Wallet.fromMnemonic( - ethTestConfig.test_mnemonic as string, + const ethWalletHD = ethers.HDNodeWallet.fromMnemonic( + ethers.Mnemonic.fromPhrase(ethTestConfig.test_mnemonic), "m/44'/60'/0'/0/0" - ) + ); + ethWallet = new ethers.Wallet(ethWalletHD.privateKey, ethProvider); } else { ethWallet = new ethers.Wallet(process.env.MASTER_WALLET_PK!); @@ -43,16 +44,16 @@ export class Tester { // Since some tx may be pending on stage, we don't want to get stuck because of it. // In order to not get stuck transactions, we manually cancel all the pending txs. - const latestNonce = await ethWallet.getTransactionCount('latest'); - const pendingNonce = await ethWallet.getTransactionCount('pending'); + const latestNonce = await ethWallet.getNonce('latest'); + const pendingNonce = await ethWallet.getNonce('pending'); const cancellationTxs = []; for (let nonce = latestNonce; nonce != pendingNonce; nonce++) { // For each transaction to override it, we need to provide greater fee. // We would manually provide a value high enough (for a testnet) to be both valid // and higher than the previous one. It's OK as we'll only be charged for the bass fee // anyways. We will also set the miner's tip to 5 gwei, which is also much higher than the normal one. - const maxFeePerGas = ethers.utils.parseEther("0.00000025"); // 250 gwei - const maxPriorityFeePerGas = ethers.utils.parseEther("0.000000005"); // 5 gwei + const maxFeePerGas = ethers.parseEther("0.00000025"); // 250 gwei + const maxPriorityFeePerGas = ethers.parseEther("0.000000005"); // 5 gwei cancellationTxs.push(ethWallet.sendTransaction({ to: ethWallet.address, nonce, maxFeePerGas, maxPriorityFeePerGas }).then((tx) => tx.wait())); } if (cancellationTxs.length > 0) { @@ -64,6 +65,7 @@ export class Tester { } emptyWallet() { - return zksync.Wallet.createRandom().connect(this.web3Provider).connectToL1(this.ethProvider); + const walletHD = zksync.Wallet.createRandom(); + return new zksync.Wallet(walletHD.privateKey, this.web3Provider, this.ethProvider); } } diff --git a/core/tests/upgrade-test/tests/upgrade.test.ts b/core/tests/upgrade-test/tests/upgrade.test.ts index 2da6acab18e6..b111d6019b67 100644 --- a/core/tests/upgrade-test/tests/upgrade.test.ts +++ b/core/tests/upgrade-test/tests/upgrade.test.ts @@ -1,43 +1,50 @@ import * as utils from 'utils'; import { Tester } from './tester'; import * as zksync from 'zksync-ethers'; -import { BigNumber, BigNumberish, ethers } from 'ethers'; +import * as ethers from 'ethers'; import { expect } from 'chai'; import fs from 'fs'; import { BytesLike } from '@ethersproject/bytes'; +import { IZkSyncHyperchain } from 'zksync-ethers/build/typechain'; +import { BigNumberish } from 'ethers'; const L1_CONTRACTS_FOLDER = `${process.env.ZKSYNC_HOME}/contracts/l1-contracts/artifacts/contracts`; -const L1_DEFAULT_UPGRADE_ABI = new ethers.utils.Interface( +const L1_DEFAULT_UPGRADE_ABI = new ethers.Interface( require(`${L1_CONTRACTS_FOLDER}/upgrades/DefaultUpgrade.sol/DefaultUpgrade.json`).abi ); -const GOVERNANCE_ABI = new ethers.utils.Interface( +const GOVERNANCE_ABI = new ethers.Interface( require(`${L1_CONTRACTS_FOLDER}/governance/Governance.sol/Governance.json`).abi ); -const ADMIN_FACET_ABI = new ethers.utils.Interface( +const ADMIN_FACET_ABI = new ethers.Interface( require(`${L1_CONTRACTS_FOLDER}/state-transition/chain-interfaces/IAdmin.sol/IAdmin.json`).abi ); -const L2_FORCE_DEPLOY_UPGRADER_ABI = new ethers.utils.Interface( +const CHAIN_ADMIN_ABI = new ethers.Interface( + require(`${L1_CONTRACTS_FOLDER}/governance/ChainAdmin.sol/ChainAdmin.json`).abi +); +const L2_FORCE_DEPLOY_UPGRADER_ABI = new ethers.Interface( require(`${process.env.ZKSYNC_HOME}/contracts/l2-contracts/artifacts-zk/contracts/ForceDeployUpgrader.sol/ForceDeployUpgrader.json`).abi ); -const COMPLEX_UPGRADER_ABI = new ethers.utils.Interface( +const COMPLEX_UPGRADER_ABI = new ethers.Interface( require(`${process.env.ZKSYNC_HOME}/contracts/system-contracts/artifacts-zk/contracts-preprocessed/ComplexUpgrader.sol/ComplexUpgrader.json`).abi ); const COUNTER_BYTECODE = require(`${process.env.ZKSYNC_HOME}/core/tests/ts-integration/artifacts-zk/contracts/counter/counter.sol/Counter.json`).deployedBytecode; -const STATE_TRANSITON_MANAGER = new ethers.utils.Interface( +const STATE_TRANSITON_MANAGER = new ethers.Interface( require(`${L1_CONTRACTS_FOLDER}/state-transition/StateTransitionManager.sol/StateTransitionManager.json`).abi ); -const depositAmount = ethers.utils.parseEther('0.001'); +let serverComponents = 'api,tree,eth,state_keeper,commitment_generator,da_dispatcher'; + +const depositAmount = ethers.parseEther('0.001'); describe('Upgrade test', function () { let tester: Tester; let alice: zksync.Wallet; let govWallet: ethers.Wallet; - let mainContract: ethers.Contract; + let mainContract: IZkSyncHyperchain; let governanceContract: ethers.Contract; + let chainAdminContract: ethers.Contract; let bootloaderHash: string; - let scheduleTransparentOperation: string; let executeOperation: string; let forceDeployAddress: string; let forceDeployBytecode: string; @@ -48,8 +55,11 @@ describe('Upgrade test', function () { alice = tester.emptyWallet(); logs = fs.createWriteStream('upgrade.log', { flags: 'a' }); - const govMnemonic = require('../../../../etc/test_config/constant/eth.json').mnemonic; - govWallet = ethers.Wallet.fromMnemonic(govMnemonic, "m/44'/60'/0'/0/1").connect(alice._providerL1()); + const govMnemonic = ethers.Mnemonic.fromPhrase( + require('../../../../etc/test_config/constant/eth.json').mnemonic + ); + const govWalletHD = ethers.HDNodeWallet.fromMnemonic(govMnemonic, "m/44'/60'/0'/0/1"); + govWallet = new ethers.Wallet(govWalletHD.privateKey, alice._providerL1()); }); step('Run server and execute some transactions', async () => { @@ -67,10 +77,10 @@ describe('Upgrade test', function () { // Must be > 1s, because bootloader requires l1 batch timestamps to be incremental. process.env.CHAIN_STATE_KEEPER_BLOCK_COMMIT_DEADLINE_MS = '2000'; // Run server in background. - utils.background( - 'cd $ZKSYNC_HOME && cargo run --bin zksync_server --release -- --components=api,tree,eth,state_keeper,commitment_generator', - [null, logs, logs] - ); + utils.background({ + command: `cd $ZKSYNC_HOME && cargo run --bin zksync_server --release -- --components=${serverComponents}`, + stdio: [null, logs, logs] + }); // Server may need some time to recompile if it's a cold run, so wait for it. let iter = 0; while (iter < 30 && !mainContract) { @@ -89,6 +99,8 @@ describe('Upgrade test', function () { const stmContract = new ethers.Contract(stmAddr, STATE_TRANSITON_MANAGER, tester.syncWallet.providerL1); const governanceAddr = await stmContract.owner(); governanceContract = new ethers.Contract(governanceAddr, GOVERNANCE_ABI, tester.syncWallet.providerL1); + const chainAdminAddr = await mainContract.getAdmin(); + chainAdminContract = new ethers.Contract(chainAdminAddr, CHAIN_ADMIN_ABI, tester.syncWallet.providerL1); let blocksCommitted = await mainContract.getTotalBatchesCommitted(); const initialL1BatchNumber = await tester.web3Provider.getL1BatchNumber(); @@ -96,8 +108,8 @@ describe('Upgrade test', function () { const baseToken = await tester.syncWallet.provider.getBaseTokenContractAddress(); if (!zksync.utils.isAddressEq(baseToken, zksync.utils.ETH_ADDRESS_IN_CONTRACTS)) { - await (await tester.syncWallet.approveERC20(baseToken, ethers.constants.MaxUint256)).wait(); - await mintToWallet(baseToken, tester.syncWallet, depositAmount.mul(10)); + await (await tester.syncWallet.approveERC20(baseToken, ethers.MaxUint256)).wait(); + await mintToWallet(baseToken, tester.syncWallet, depositAmount * 10n); } const firstDepositHandle = await tester.syncWallet.deposit({ @@ -121,20 +133,20 @@ describe('Upgrade test', function () { } const balance = await alice.getBalance(); - expect(balance.eq(depositAmount.mul(2)), 'Incorrect balance after deposits').to.be.true; + expect(balance === depositAmount * 2n, 'Incorrect balance after deposits').to.be.true; if (process.env.CHECK_EN_URL) { console.log('Checking EN after deposit'); await utils.sleep(2); - const enProvider = new ethers.providers.JsonRpcProvider(process.env.CHECK_EN_URL); + const enProvider = new ethers.JsonRpcProvider(process.env.CHECK_EN_URL); const enBalance = await enProvider.getBalance(alice.address); - expect(enBalance.eq(balance), 'Failed to update the balance on EN after deposit').to.be.true; + expect(enBalance === balance, 'Failed to update the balance on EN after deposit').to.be.true; } // Wait for at least one new committed block let newBlocksCommitted = await mainContract.getTotalBatchesCommitted(); let tryCount = 0; - while (blocksCommitted.eq(newBlocksCommitted) && tryCount < 30) { + while (blocksCommitted === newBlocksCommitted && tryCount < 30) { newBlocksCommitted = await mainContract.getTotalBatchesCommitted(); tryCount += 1; await utils.sleep(1); @@ -143,10 +155,10 @@ describe('Upgrade test', function () { step('Send l1 tx for saving new bootloader', async () => { const path = `${process.env.ZKSYNC_HOME}/contracts/system-contracts/bootloader/build/artifacts/playground_batch.yul.zbin`; - const bootloaderCode = ethers.utils.hexlify(fs.readFileSync(path)); - bootloaderHash = ethers.utils.hexlify(zksync.utils.hashBytecode(bootloaderCode)); + const bootloaderCode = ethers.hexlify(fs.readFileSync(path)); + bootloaderHash = ethers.hexlify(zksync.utils.hashBytecode(bootloaderCode)); const txHandle = await tester.syncWallet.requestExecute({ - contractAddress: ethers.constants.AddressZero, + contractAddress: ethers.ZeroAddress, calldata: '0x', l2GasLimit: 20000000, factoryDeps: [bootloaderCode], @@ -163,10 +175,10 @@ describe('Upgrade test', function () { forceDeployBytecode = COUNTER_BYTECODE; const forceDeployment: ForceDeployment = { - bytecodeHash: zksync.utils.hashBytecode(forceDeployBytecode), + bytecodeHash: ethers.hexlify(zksync.utils.hashBytecode(forceDeployBytecode)), newAddress: forceDeployAddress, callConstructor: false, - value: BigNumber.from(0), + value: 0n, input: '0x' }; @@ -174,10 +186,9 @@ describe('Upgrade test', function () { const delegateCalldata = L2_FORCE_DEPLOY_UPGRADER_ABI.encodeFunctionData('forceDeploy', [[forceDeployment]]); const data = COMPLEX_UPGRADER_ABI.encodeFunctionData('upgrade', [delegateTo, delegateCalldata]); - const { stmUpgradeData, chainUpgradeData } = await prepareUpgradeCalldata( + const { stmUpgradeData, chainUpgradeCalldata, setTimestampCalldata } = await prepareUpgradeCalldata( govWallet, alice._providerL2(), - mainContract.address, { l2ProtocolUpgradeTx: { txType: 254, @@ -192,7 +203,7 @@ describe('Upgrade test', function () { reserved: [0, 0, 0, 0], data, signature: '0x', - factoryDeps: [zksync.utils.hashBytecode(forceDeployBytecode)], + factoryDeps: [ethers.hexlify(zksync.utils.hashBytecode(forceDeployBytecode))], paymasterInput: '0x', reservedDynamic: '0x' }, @@ -201,12 +212,12 @@ describe('Upgrade test', function () { upgradeTimestamp: 0 } ); - scheduleTransparentOperation = chainUpgradeData.scheduleTransparentOperation; - executeOperation = chainUpgradeData.executeOperation; + executeOperation = chainUpgradeCalldata; await sendGovernanceOperation(stmUpgradeData.scheduleTransparentOperation); await sendGovernanceOperation(stmUpgradeData.executeOperation); - await sendGovernanceOperation(scheduleTransparentOperation); + + await sendChainAdminOperation(setTimestampCalldata); // Wait for server to process L1 event. await utils.sleep(2); @@ -214,7 +225,7 @@ describe('Upgrade test', function () { step('Check bootloader is updated on L2', async () => { const receipt = await waitForNewL1Batch(alice); - const batchDetails = await alice.provider.getL1BatchDetails(receipt.l1BatchNumber); + const batchDetails = await alice.provider.getL1BatchDetails(receipt.l1BatchNumber!); expect(batchDetails.baseSystemContractsHashes.bootloader).to.eq(bootloaderHash); }); @@ -240,7 +251,11 @@ describe('Upgrade test', function () { } // Execute the upgrade - await sendGovernanceOperation(executeOperation); + const executeMulticallData = chainAdminContract.interface.encodeFunctionData('multicall', [ + [[await mainContract.getAddress(), 0, executeOperation]], + true + ]); + await sendChainAdminOperation(executeMulticallData); let bootloaderHashL1 = await mainContract.getL2BootloaderBytecodeHash(); expect(bootloaderHashL1).eq(bootloaderHash); @@ -248,7 +263,7 @@ describe('Upgrade test', function () { step('Wait for block finalization', async () => { // Execute an L2 transaction - const txHandle = await checkedRandomTransfer(alice, BigNumber.from(1)); + const txHandle = await checkedRandomTransfer(alice, 1n); await txHandle.waitFinalize(); }); @@ -263,14 +278,14 @@ describe('Upgrade test', function () { await utils.sleep(10); // Run again. - utils.background( - 'cd $ZKSYNC_HOME && zk f cargo run --bin zksync_server --release -- --components=api,tree,eth,state_keeper,commitment_generator &> upgrade.log', - [null, logs, logs] - ); + utils.background({ + command: `cd $ZKSYNC_HOME && zk f cargo run --bin zksync_server --release -- --components=${serverComponents} &> upgrade.log`, + stdio: [null, logs, logs] + }); await utils.sleep(10); // Trying to send a transaction from the same address again - await checkedRandomTransfer(alice, BigNumber.from(1)); + await checkedRandomTransfer(alice, 1n); }); after('Try killing server', async () => { @@ -282,7 +297,17 @@ describe('Upgrade test', function () { async function sendGovernanceOperation(data: string) { await ( await govWallet.sendTransaction({ - to: governanceContract.address, + to: await governanceContract.getAddress(), + data: data, + type: 0 + }) + ).wait(); + } + + async function sendChainAdminOperation(data: string) { + await ( + await govWallet.sendTransaction({ + to: await chainAdminContract.getAddress(), data: data, type: 0 }) @@ -290,12 +315,10 @@ describe('Upgrade test', function () { } }); -async function checkedRandomTransfer( - sender: zksync.Wallet, - amount: BigNumber -): Promise { +async function checkedRandomTransfer(sender: zksync.Wallet, amount: bigint): Promise { const senderBalanceBefore = await sender.getBalance(); - const receiver = zksync.Wallet.createRandom().connect(sender.provider); + const receiverHD = zksync.Wallet.createRandom(); + const receiver = new zksync.Wallet(receiverHD.privateKey, sender.provider); const transferHandle = await sender.sendTransaction({ to: receiver.address, value: amount, @@ -306,18 +329,18 @@ async function checkedRandomTransfer( const senderBalanceAfter = await sender.getBalance(); const receiverBalanceAfter = await receiver.getBalance(); - expect(receiverBalanceAfter.eq(amount), 'Failed updated the balance of the receiver').to.be.true; + expect(receiverBalanceAfter === amount, 'Failed updated the balance of the receiver').to.be.true; - const spentAmount = txReceipt.gasUsed.mul(transferHandle.gasPrice!).add(amount); - expect(senderBalanceAfter.add(spentAmount).gte(senderBalanceBefore), 'Failed to update the balance of the sender') - .to.be.true; + const spentAmount = txReceipt.gasUsed * transferHandle.gasPrice! + amount; + expect(senderBalanceAfter + spentAmount >= senderBalanceBefore, 'Failed to update the balance of the sender').to.be + .true; if (process.env.CHECK_EN_URL) { console.log('Checking EN after transfer'); await utils.sleep(2); - const enProvider = new ethers.providers.JsonRpcProvider(process.env.CHECK_EN_URL); + const enProvider = new ethers.JsonRpcProvider(process.env.CHECK_EN_URL); const enSenderBalance = await enProvider.getBalance(sender.address); - expect(enSenderBalance.eq(senderBalanceAfter), 'Failed to update the balance of the sender on EN').to.be.true; + expect(enSenderBalance === senderBalanceAfter, 'Failed to update the balance of the sender on EN').to.be.true; } return transferHandle; @@ -331,7 +354,7 @@ interface ForceDeployment { // Whether to call the constructor callConstructor: boolean; // The value with which to initialize a contract - value: BigNumber; + value: bigint; // The constructor calldata input: BytesLike; } @@ -340,16 +363,19 @@ async function waitForNewL1Batch(wallet: zksync.Wallet): Promise tx.wait()); // Invariant: even with 1 transaction, l1 batch must be eventually sealed, so this loop must exit. - while (!(await wallet.provider.getTransactionReceipt(oldReceipt.transactionHash)).l1BatchNumber) { + while (!(await wallet.provider.getTransactionReceipt(oldReceipt.hash))!.l1BatchNumber) { await zksync.utils.sleep(wallet.provider.pollingInterval); } - return await wallet.provider.getTransactionReceipt(oldReceipt.transactionHash); + const receipt = await wallet.provider.getTransactionReceipt(oldReceipt.hash); + if (!receipt) { + throw new Error('Failed to get the receipt of the transaction'); + } + return receipt; } async function prepareUpgradeCalldata( govWallet: ethers.Wallet, l2Provider: zksync.Provider, - mainContract: zksync.types.Address, params: { l2ProtocolUpgradeTx: { txType: BigNumberish; @@ -393,18 +419,18 @@ async function prepareUpgradeCalldata( const zksyncContract = new ethers.Contract(zksyncAddress, zksync.utils.ZKSYNC_MAIN_ABI, govWallet); const stmAddress = await zksyncContract.getStateTransitionManager(); - const oldProtocolVersion = await zksyncContract.getProtocolVersion(); + const oldProtocolVersion = Number(await zksyncContract.getProtocolVersion()); const newProtocolVersion = addToProtocolVersion(oldProtocolVersion, 1, 1); - params.l2ProtocolUpgradeTx.nonce ??= unpackNumberSemVer(newProtocolVersion)[1]; + params.l2ProtocolUpgradeTx.nonce ??= BigInt(unpackNumberSemVer(newProtocolVersion)[1]); const upgradeInitData = L1_DEFAULT_UPGRADE_ABI.encodeFunctionData('upgrade', [ [ params.l2ProtocolUpgradeTx, params.factoryDeps, - params.bootloaderHash ?? ethers.constants.HashZero, - params.defaultAAHash ?? ethers.constants.HashZero, - params.verifier ?? ethers.constants.AddressZero, - params.verifierParams ?? [ethers.constants.HashZero, ethers.constants.HashZero, ethers.constants.HashZero], + params.bootloaderHash ?? ethers.ZeroHash, + params.defaultAAHash ?? ethers.ZeroHash, + params.verifier ?? ethers.ZeroAddress, + params.verifierParams ?? [ethers.ZeroHash, ethers.ZeroHash, ethers.ZeroHash], params.l1ContractsUpgradeCalldata ?? '0x', params.postUpgradeCalldata ?? '0x', params.upgradeTimestamp, @@ -424,7 +450,7 @@ async function prepareUpgradeCalldata( upgradeParam, oldProtocolVersion, // The protocol version will not have any deadline in this upgrade - ethers.constants.MaxUint256, + ethers.MaxUint256, newProtocolVersion ]); @@ -433,13 +459,18 @@ async function prepareUpgradeCalldata( oldProtocolVersion, upgradeParam ]); + // Set timestamp for upgrade on a specific chain under this STM. + const setTimestampCalldata = CHAIN_ADMIN_ABI.encodeFunctionData('setUpgradeTimestamp', [ + newProtocolVersion, + params.upgradeTimestamp + ]); const stmUpgradeData = prepareGovernanceCalldata(stmAddress, stmUpgradeCalldata); - const chainUpgradeData = prepareGovernanceCalldata(mainContract, chainUpgradeCalldata); return { - chainUpgradeData, - stmUpgradeData + stmUpgradeData, + chainUpgradeCalldata, + setTimestampCalldata }; } @@ -456,8 +487,8 @@ function prepareGovernanceCalldata(to: string, data: BytesLike): UpgradeCalldata }; const governanceOperation = { calls: [call], - predecessor: ethers.constants.HashZero, - salt: ethers.constants.HashZero + predecessor: ethers.ZeroHash, + salt: ethers.ZeroHash }; // Get transaction data of the `scheduleTransparent` @@ -475,11 +506,7 @@ function prepareGovernanceCalldata(to: string, data: BytesLike): UpgradeCalldata }; } -async function mintToWallet( - baseTokenAddress: zksync.types.Address, - ethersWallet: ethers.Wallet, - amountToMint: ethers.BigNumber -) { +async function mintToWallet(baseTokenAddress: zksync.types.Address, ethersWallet: ethers.Wallet, amountToMint: bigint) { const l1Erc20ABI = ['function mint(address to, uint256 amount)']; const l1Erc20Contract = new ethers.Contract(baseTokenAddress, l1Erc20ABI, ethersWallet); await (await l1Erc20Contract.mint(ethersWallet.address, amountToMint)).wait(); diff --git a/core/tests/upgrade-test/tsconfig.json b/core/tests/upgrade-test/tsconfig.json index 6c8907a86016..3de8e1a1c606 100644 --- a/core/tests/upgrade-test/tsconfig.json +++ b/core/tests/upgrade-test/tsconfig.json @@ -1,6 +1,6 @@ { "compilerOptions": { - "target": "es2019", + "target": "es2020", "module": "commonjs", "strict": true, "esModuleInterop": true, diff --git a/core/tests/vm-benchmark/Cargo.toml b/core/tests/vm-benchmark/Cargo.toml index 01905fac3237..efbc08a957a6 100644 --- a/core/tests/vm-benchmark/Cargo.toml +++ b/core/tests/vm-benchmark/Cargo.toml @@ -3,10 +3,11 @@ name = "vm-benchmark" version = "0.1.0" edition.workspace = true license.workspace = true +publish = false [dependencies] -vm-benchmark-harness.workspace = true -metrics-exporter-prometheus.workspace = true +zksync_vm_benchmark_harness.workspace = true +zksync_vlog.workspace = true vise.workspace = true tokio.workspace = true diff --git a/core/tests/vm-benchmark/benches/criterion.rs b/core/tests/vm-benchmark/benches/criterion.rs index c6c81d723365..5becccfab801 100644 --- a/core/tests/vm-benchmark/benches/criterion.rs +++ b/core/tests/vm-benchmark/benches/criterion.rs @@ -1,5 +1,5 @@ use criterion::{black_box, criterion_group, criterion_main, Criterion}; -use vm_benchmark_harness::{cut_to_allowed_bytecode_size, get_deploy_tx, BenchmarkingVm}; +use zksync_vm_benchmark_harness::{cut_to_allowed_bytecode_size, get_deploy_tx, BenchmarkingVm}; fn benches_in_folder(c: &mut Criterion) { for path in std::fs::read_dir("deployment_benchmarks").unwrap() { diff --git a/core/tests/vm-benchmark/benches/diy_benchmark.rs b/core/tests/vm-benchmark/benches/diy_benchmark.rs index c1c627cbcb4a..1601de5eb85f 100644 --- a/core/tests/vm-benchmark/benches/diy_benchmark.rs +++ b/core/tests/vm-benchmark/benches/diy_benchmark.rs @@ -2,7 +2,7 @@ use std::time::{Duration, Instant}; use criterion::black_box; use vise::{Gauge, LabeledFamily, Metrics}; -use vm_benchmark_harness::{cut_to_allowed_bytecode_size, get_deploy_tx, BenchmarkingVm}; +use zksync_vm_benchmark_harness::{cut_to_allowed_bytecode_size, get_deploy_tx, BenchmarkingVm}; fn main() { let mut results = vec![]; diff --git a/core/tests/vm-benchmark/benches/fill_bootloader.rs b/core/tests/vm-benchmark/benches/fill_bootloader.rs index 40e6727cb4ba..7bc8dab9974a 100644 --- a/core/tests/vm-benchmark/benches/fill_bootloader.rs +++ b/core/tests/vm-benchmark/benches/fill_bootloader.rs @@ -1,7 +1,7 @@ use std::time::Instant; use criterion::black_box; -use vm_benchmark_harness::{ +use zksync_vm_benchmark_harness::{ cut_to_allowed_bytecode_size, get_deploy_tx_with_gas_limit, BenchmarkingVm, }; diff --git a/core/tests/vm-benchmark/benches/iai.rs b/core/tests/vm-benchmark/benches/iai.rs index a7c8a9c2ecd4..f0ba43f26853 100644 --- a/core/tests/vm-benchmark/benches/iai.rs +++ b/core/tests/vm-benchmark/benches/iai.rs @@ -1,5 +1,5 @@ use iai::black_box; -use vm_benchmark_harness::{cut_to_allowed_bytecode_size, get_deploy_tx, BenchmarkingVm}; +use zksync_vm_benchmark_harness::{cut_to_allowed_bytecode_size, get_deploy_tx, BenchmarkingVm}; fn run_bytecode(path: &str) { let test_contract = std::fs::read(path).expect("failed to read file"); diff --git a/core/tests/vm-benchmark/harness/Cargo.toml b/core/tests/vm-benchmark/harness/Cargo.toml index da786ee391b1..acd5f37cbc7b 100644 --- a/core/tests/vm-benchmark/harness/Cargo.toml +++ b/core/tests/vm-benchmark/harness/Cargo.toml @@ -1,11 +1,12 @@ [package] -name = "vm-benchmark-harness" +name = "zksync_vm_benchmark_harness" version.workspace = true edition.workspace = true license.workspace = true +publish = false [dependencies] -multivm.workspace = true +zksync_multivm.workspace = true zksync_types.workspace = true zksync_state.workspace = true zksync_utils.workspace = true diff --git a/core/tests/vm-benchmark/harness/src/instruction_counter.rs b/core/tests/vm-benchmark/harness/src/instruction_counter.rs index 8ab861c56ae8..017b13da44ca 100644 --- a/core/tests/vm-benchmark/harness/src/instruction_counter.rs +++ b/core/tests/vm-benchmark/harness/src/instruction_counter.rs @@ -1,6 +1,6 @@ use std::{cell::RefCell, rc::Rc}; -use multivm::{ +use zksync_multivm::{ interface::{dyn_tracers::vm_1_5_0::DynTracer, tracer::TracerExecutionStatus}, vm_latest::{BootloaderState, HistoryMode, SimpleMemory, VmTracer, ZkSyncVmState}, }; @@ -13,6 +13,7 @@ pub struct InstructionCounter { /// A tracer that counts the number of instructions executed by the VM. impl InstructionCounter { + #[allow(dead_code)] // FIXME pub fn new(output: Rc>) -> Self { Self { count: 0, output } } @@ -32,7 +33,7 @@ impl VmTracer for InstructionCounter { &mut self, _state: &mut ZkSyncVmState, _bootloader_state: &BootloaderState, - _stop_reason: multivm::interface::tracer::VmExecutionStopReason, + _stop_reason: zksync_multivm::interface::tracer::VmExecutionStopReason, ) { *self.output.borrow_mut() = self.count; } diff --git a/core/tests/vm-benchmark/harness/src/lib.rs b/core/tests/vm-benchmark/harness/src/lib.rs index 8218b585d502..a30221cfa0be 100644 --- a/core/tests/vm-benchmark/harness/src/lib.rs +++ b/core/tests/vm-benchmark/harness/src/lib.rs @@ -1,6 +1,8 @@ use std::{cell::RefCell, rc::Rc}; -use multivm::{ +use once_cell::sync::Lazy; +use zksync_contracts::{deployer_contract, BaseSystemContracts}; +use zksync_multivm::{ interface::{ L2BlockEnv, TxExecutionMode, VmExecutionMode, VmExecutionResultAndLogs, VmInterface, }, @@ -8,8 +10,6 @@ use multivm::{ vm_fast::Vm, vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, }; -use once_cell::sync::Lazy; -use zksync_contracts::{deployer_contract, BaseSystemContracts}; use zksync_state::InMemoryStorage; use zksync_types::{ block::L2BlockHasher, @@ -70,7 +70,7 @@ impl BenchmarkingVm { let timestamp = unix_timestamp_ms(); Self(Vm::new( - multivm::interface::L1BatchEnv { + zksync_multivm::interface::L1BatchEnv { previous_batch_hash: None, number: L1BatchNumber(1), timestamp, @@ -87,7 +87,7 @@ impl BenchmarkingVm { max_virtual_blocks_to_create: 100, }, }, - multivm::interface::SystemEnv { + zksync_multivm::interface::SystemEnv { zk_porter_available: false, version: ProtocolVersionId::latest(), base_system_smart_contracts: SYSTEM_CONTRACTS.clone(), @@ -173,7 +173,7 @@ mod tests { assert!(matches!( res.result, - multivm::interface::ExecutionResult::Success { .. } + zksync_multivm::interface::ExecutionResult::Success { .. } )); } } diff --git a/core/tests/vm-benchmark/src/find_slowest.rs b/core/tests/vm-benchmark/src/find_slowest.rs index 2bc2a894d2d0..97a6acd5acd9 100644 --- a/core/tests/vm-benchmark/src/find_slowest.rs +++ b/core/tests/vm-benchmark/src/find_slowest.rs @@ -3,7 +3,7 @@ use std::{ time::{Duration, Instant}, }; -use vm_benchmark_harness::*; +use zksync_vm_benchmark_harness::*; fn main() { let mut results = vec![]; diff --git a/core/tests/vm-benchmark/src/instruction_counts.rs b/core/tests/vm-benchmark/src/instruction_counts.rs index a80d8a7ffd67..c038c8f2bf6b 100644 --- a/core/tests/vm-benchmark/src/instruction_counts.rs +++ b/core/tests/vm-benchmark/src/instruction_counts.rs @@ -2,7 +2,7 @@ use std::path::Path; -use vm_benchmark_harness::{cut_to_allowed_bytecode_size, get_deploy_tx, BenchmarkingVm}; +use zksync_vm_benchmark_harness::{cut_to_allowed_bytecode_size, get_deploy_tx, BenchmarkingVm}; fn main() { // using source file location because this is just a script, the binary isn't meant to be reused diff --git a/core/tests/vm-benchmark/src/main.rs b/core/tests/vm-benchmark/src/main.rs index 99105e078ea7..925ec78ceb3c 100644 --- a/core/tests/vm-benchmark/src/main.rs +++ b/core/tests/vm-benchmark/src/main.rs @@ -1,4 +1,4 @@ -use vm_benchmark_harness::*; +use zksync_vm_benchmark_harness::*; fn main() { let test_contract = std::fs::read( diff --git a/core/tests/vm-benchmark/src/with_prometheus.rs b/core/tests/vm-benchmark/src/with_prometheus.rs index 1fcf5652c6dd..f9b79adedc09 100644 --- a/core/tests/vm-benchmark/src/with_prometheus.rs +++ b/core/tests/vm-benchmark/src/with_prometheus.rs @@ -1,23 +1,27 @@ use std::time::Duration; -use metrics_exporter_prometheus::PrometheusBuilder; +use tokio::sync::watch; +use zksync_vlog::prometheus::PrometheusExporterConfig; pub fn with_prometheus(f: F) { + tokio::runtime::Runtime::new() + .unwrap() + .block_on(with_prometheus_async(f)); +} + +async fn with_prometheus_async(f: F) { println!("Pushing results to Prometheus"); let endpoint = "http://vmagent.stage.matterlabs.corp/api/v1/import/prometheus/metrics/job/vm-benchmark"; + let (stop_sender, stop_receiver) = watch::channel(false); + let prometheus_config = + PrometheusExporterConfig::push(endpoint.to_owned(), Duration::from_millis(100)); + tokio::spawn(prometheus_config.run(stop_receiver)); - tokio::runtime::Runtime::new().unwrap().block_on(async { - PrometheusBuilder::new() - .with_push_gateway(endpoint, Duration::from_millis(100), None, None) - .unwrap() - .install() - .unwrap(); - - f(); + f(); - println!("Waiting for push to happen..."); - tokio::time::sleep(Duration::from_secs(1)).await; - }); + println!("Waiting for push to happen..."); + tokio::time::sleep(Duration::from_secs(1)).await; + stop_sender.send_replace(true); } diff --git a/deny.toml b/deny.toml index b50b165b72f5..1e4a30ad6231 100644 --- a/deny.toml +++ b/deny.toml @@ -6,9 +6,7 @@ vulnerability = "deny" unmaintained = "warn" yanked = "warn" notice = "warn" -ignore = [ - "RUSTSEC-2023-0018", -] +ignore = [] [licenses] unlicensed = "deny" @@ -23,13 +21,14 @@ allow = [ "BSD-2-Clause", "BSD-3-Clause", "Zlib", + "OpenSSL", ] copyleft = "warn" allow-osi-fsf-free = "neither" default = "deny" confidence-threshold = 0.8 exceptions = [ - { name = "ring", allow = ["OpenSSL"] }, + { name = "ring", allow = ["OpenSSL"] }, ] unused-allowed-license = "allow" diff --git a/docker/contract-verifier/Dockerfile b/docker/contract-verifier/Dockerfile index c0466f348a6f..736409bac5fe 100644 --- a/docker/contract-verifier/Dockerfile +++ b/docker/contract-verifier/Dockerfile @@ -34,7 +34,7 @@ RUN mkdir -p /etc/zksolc-bin/vm-1.5.0-a167aa3 && \ chmod +x /etc/zksolc-bin/vm-1.5.0-a167aa3/zksolc # install zksolc 1.5.x -RUN for VERSION in $(seq -f "v1.5.%g" 0 0); do \ +RUN for VERSION in $(seq -f "v1.5.%g" 0 1); do \ mkdir -p /etc/zksolc-bin/$VERSION && \ wget https://github.com/matter-labs/zksolc-bin/raw/main/linux-amd64/zksolc-linux-amd64-musl-$VERSION -O /etc/zksolc-bin/$VERSION/zksolc && \ chmod +x /etc/zksolc-bin/$VERSION/zksolc; \ @@ -54,6 +54,13 @@ RUN for VERSION in $(seq -f "v1.4.%g" 0 1); do \ chmod +x /etc/zkvyper-bin/$VERSION/zkvyper; \ done +# install zkvyper 1.5.x +RUN for VERSION in $(seq -f "v1.5.%g" 0 2); do \ + mkdir -p /etc/zkvyper-bin/$VERSION && \ + wget https://github.com/matter-labs/zkvyper-bin/raw/main/linux-amd64/zkvyper-linux-amd64-musl-$VERSION -O /etc/zkvyper-bin/$VERSION/zkvyper && \ + chmod +x /etc/zkvyper-bin/$VERSION/zkvyper; \ + done + # install solc COPY docker/contract-verifier/install-all-solc.sh install-all-solc.sh RUN bash ./install-all-solc.sh diff --git a/docker/local-node/Dockerfile b/docker/local-node/Dockerfile index c0592f89d563..2e6b09ef3d10 100644 --- a/docker/local-node/Dockerfile +++ b/docker/local-node/Dockerfile @@ -64,7 +64,7 @@ COPY package.json / # Copy DAL - needed to setup database schemas. COPY core/lib/dal core/lib/dal -COPY prover/prover_dal prover/prover_dal +COPY prover/crates/lib/prover_dal prover/crates/lib/prover_dal RUN mkdir /etc/env/l1-inits && mkdir /etc/env/l2-inits diff --git a/docker/proof-fri-compressor/Dockerfile b/docker/proof-fri-compressor/Dockerfile index afa8477dcf72..2cf131abb4b8 100644 --- a/docker/proof-fri-compressor/Dockerfile +++ b/docker/proof-fri-compressor/Dockerfile @@ -14,7 +14,7 @@ FROM debian:bookworm-slim RUN apt-get update && apt-get install -y curl libpq5 ca-certificates && rm -rf /var/lib/apt/lists/* # copy VK required for proof wrapping -COPY prover/vk_setup_data_generator_server_fri/data/ /prover/vk_setup_data_generator_server_fri/data/ +COPY prover/crates/bin/vk_setup_data_generator_server_fri/data/ /prover/crates/bin/vk_setup_data_generator_server_fri/data/ # copy universal setup key required for proof compression COPY setup_2\^26.key /setup_2\^26.key diff --git a/docker/proof-fri-gpu-compressor/Dockerfile b/docker/proof-fri-gpu-compressor/Dockerfile index 8249f123081b..e6d2e0f11627 100644 --- a/docker/proof-fri-gpu-compressor/Dockerfile +++ b/docker/proof-fri-gpu-compressor/Dockerfile @@ -37,7 +37,7 @@ FROM nvidia/cuda:12.2.0-runtime-ubuntu22.04 RUN apt-get update && apt-get install -y curl libpq5 ca-certificates && rm -rf /var/lib/apt/lists/* # copy VK required for proof wrapping -COPY prover/vk_setup_data_generator_server_fri/data/ /prover/vk_setup_data_generator_server_fri/data/ +COPY prover/crates/bin/vk_setup_data_generator_server_fri/data/ /prover/crates/bin/vk_setup_data_generator_server_fri/data/ COPY setup_2\^24.key /setup_2\^24.key diff --git a/docker/prover-fri-gateway/Dockerfile b/docker/prover-fri-gateway/Dockerfile index f5dfa027b418..c53f27818687 100644 --- a/docker/prover-fri-gateway/Dockerfile +++ b/docker/prover-fri-gateway/Dockerfile @@ -11,7 +11,7 @@ FROM debian:bookworm-slim RUN apt-get update && apt-get install -y curl libpq5 ca-certificates && rm -rf /var/lib/apt/lists/* # copy VK required for proof wrapping -COPY prover/vk_setup_data_generator_server_fri/data/ /prover/vk_setup_data_generator_server_fri/data/ +COPY prover/crates/bin/vk_setup_data_generator_server_fri/data/ /prover/crates/bin/vk_setup_data_generator_server_fri/data/ COPY --from=builder /usr/src/zksync/prover/target/release/zksync_prover_fri_gateway /usr/bin/ diff --git a/docker/prover-fri/Dockerfile b/docker/prover-fri/Dockerfile index 98a0d2d831dc..2dde8d9794ce 100644 --- a/docker/prover-fri/Dockerfile +++ b/docker/prover-fri/Dockerfile @@ -11,7 +11,7 @@ FROM debian:bookworm-slim RUN apt-get update && apt-get install -y curl libpq5 ca-certificates && rm -rf /var/lib/apt/lists/* # copy VK required for protocol version -COPY prover/vk_setup_data_generator_server_fri/data/ /prover/vk_setup_data_generator_server_fri/data/ +COPY prover/crates/bin/vk_setup_data_generator_server_fri/data/ /prover/crates/bin/vk_setup_data_generator_server_fri/data/ COPY --from=builder /usr/src/zksync/prover/target/release/zksync_prover_fri /usr/bin/ diff --git a/docker/prover-gpu-fri-gar/Dockerfile b/docker/prover-gpu-fri-gar/Dockerfile index bd70be7ee4b4..248f6aaf35fe 100644 --- a/docker/prover-gpu-fri-gar/Dockerfile +++ b/docker/prover-gpu-fri-gar/Dockerfile @@ -9,7 +9,7 @@ COPY *.bin / RUN apt-get update && apt-get install -y libpq5 ca-certificates openssl && rm -rf /var/lib/apt/lists/* # copy finalization hints required for assembly generation -COPY --from=prover prover/vk_setup_data_generator_server_fri/data/ /prover/vk_setup_data_generator_server_fri/data/ +COPY --from=prover prover/crates/bin/vk_setup_data_generator_server_fri/data/ /prover/crates/bin/vk_setup_data_generator_server_fri/data/ COPY --from=prover /usr/bin/zksync_prover_fri /usr/bin/ ENTRYPOINT ["zksync_prover_fri"] diff --git a/docker/prover-gpu-fri/Dockerfile b/docker/prover-gpu-fri/Dockerfile index 1093ed9e4ebf..0894c1c0c47d 100644 --- a/docker/prover-gpu-fri/Dockerfile +++ b/docker/prover-gpu-fri/Dockerfile @@ -31,7 +31,7 @@ FROM nvidia/cuda:12.2.0-runtime-ubuntu22.04 RUN apt-get update && apt-get install -y curl libpq5 ca-certificates && rm -rf /var/lib/apt/lists/* # copy finalization hints required for assembly generation -COPY prover/vk_setup_data_generator_server_fri/data/ /prover/vk_setup_data_generator_server_fri/data/ +COPY prover/crates/bin/vk_setup_data_generator_server_fri/data/ /prover/crates/bin/vk_setup_data_generator_server_fri/data/ COPY --from=builder /usr/src/zksync/prover/target/release/zksync_prover_fri /usr/bin/ diff --git a/docker/witness-generator/Dockerfile b/docker/witness-generator/Dockerfile index 595168702b70..3f8affbd2a9b 100644 --- a/docker/witness-generator/Dockerfile +++ b/docker/witness-generator/Dockerfile @@ -11,7 +11,7 @@ FROM debian:bookworm-slim RUN apt-get update && apt-get install -y curl libpq5 ca-certificates && rm -rf /var/lib/apt/lists/* -COPY prover/vk_setup_data_generator_server_fri/data/ /prover/vk_setup_data_generator_server_fri/data/ +COPY prover/crates/bin/vk_setup_data_generator_server_fri/data/ /prover/crates/bin/vk_setup_data_generator_server_fri/data/ COPY --from=builder /usr/src/zksync/prover/target/release/zksync_witness_generator /usr/bin/ diff --git a/docker/witness-vector-generator/Dockerfile b/docker/witness-vector-generator/Dockerfile index 9064595fcbed..d1bc1e29c5fa 100644 --- a/docker/witness-vector-generator/Dockerfile +++ b/docker/witness-vector-generator/Dockerfile @@ -12,7 +12,7 @@ FROM debian:bookworm-slim RUN apt-get update && apt-get install -y curl libpq5 ca-certificates && rm -rf /var/lib/apt/lists/* # copy finalization hints required for witness vector generation -COPY prover/vk_setup_data_generator_server_fri/data/ /prover/vk_setup_data_generator_server_fri/data/ +COPY prover/crates/bin/vk_setup_data_generator_server_fri/data/ /prover/crates/bin/vk_setup_data_generator_server_fri/data/ COPY --from=builder /usr/src/zksync/prover/target/release/zksync_witness_vector_generator /usr/bin/ diff --git a/docker/zk-environment/Dockerfile b/docker/zk-environment/Dockerfile index 9c9393ed5188..c5cb35cf1a07 100644 --- a/docker/zk-environment/Dockerfile +++ b/docker/zk-environment/Dockerfile @@ -84,7 +84,7 @@ RUN mkdir -p /etc/apt/keyrings && \ wget -c -O - https://deb.nodesource.com/gpgkey/nodesource-repo.gpg.key | gpg --dearmor -o /etc/apt/keyrings/nodesource.gpg && \ echo "deb [signed-by=/etc/apt/keyrings/nodesource.gpg] https://deb.nodesource.com/node_$NODE_MAJOR.x nodistro main" | tee /etc/apt/sources.list.d/nodesource.list && \ apt-get update && apt-get install nodejs -y && \ - npm install -g yarn && npm install -g cspell + npm install -g yarn # Install Rust and required cargo packages ENV RUSTUP_HOME=/usr/local/rustup \ @@ -105,7 +105,6 @@ RUN wget -c -O - https://sh.rustup.rs | bash -s -- -y && \ RUN cargo install --version=0.7.3 sqlx-cli RUN cargo install cargo-nextest -RUN cargo install cargo-spellcheck # Copy compiler (both solc and zksolc) binaries # Obtain `solc` 0.8.20. diff --git a/docs/guides/advanced/docker_and_ci.md b/docs/guides/advanced/docker_and_ci.md new file mode 100644 index 000000000000..ff1c7843b8b1 --- /dev/null +++ b/docs/guides/advanced/docker_and_ci.md @@ -0,0 +1,73 @@ +# Docker and CI + +How to efficiently debug CI issues locally. + +This document will be useful in case you struggle with reproducing some CI issues on your local machine. + +In most cases, this is due to the fact that your local machine has some arifacts, configs, files that you might have set +in the past, that are missing from the CI. + +## Basic docker commands + +- `docker ps` - prints the list of currently running containers +- `docker run` - starts a new docker container +- `docker exec` - connects to a running container and executes the command. +- `docker kill` - stops the container. +- `docker cp` - allows copying files between your system and docker container. + +Usually docker containers have a specific binary that they run, but for debugging we often want to start a bash instead. + +The command below starts a new docker containers, and instead of running its binary - runs `/bin/bash` in interactive +mode. + +``` +docker run -it matterlabs/zk-environment:latest2.0-lightweight-nightly /bin/bash +``` + +Connects to **already running** job, and gets you the interactive shell. + +``` +docker exec -i -it local-setup-zksync-1 /bin/bash +``` + +## Debugging CI + +Many of the tests require postgres & reth - you initialize them with: + +``` +docker compose up -d + +``` + +You should see something like this: + +``` +[+] Running 3/3 + ⠿ Network zksync-era_default Created 0.0s + ⠿ Container zksync-era-postgres-1 Started 0.3s + ⠿ Container zksync-era-reth-1 Started 0.3s +``` + +Start the docker with the 'basic' imge + +``` +# We tell it to connect to the same 'subnetwork' as other containers (zksync-era_default). +# the IN_DOCKER variable is changing different urls (like postgres) from localhost to postgres - so that it can connect to those +# containers above. +docker run --network zksync-era_default -e IN_DOCKER=1 -it matterlabs/zk-environment:latest2.0-lightweight-nightly /bin/bash +# and then inside, run: + +git clone https://github.com/matter-labs/zksync-era.git . +git checkout YOUR_BRANCH +zk +``` + +After this, you can run any commands you need. + +When you see a command like `ci_run zk contract build` in the CI - this simply means that it executed +`zk contract build` inside that docker container. + +**IMPORTANT** - by default, docker is running in the mode, where it does NOT persist the changes. So if you exit that +shell, all the changes will be removed (so when you restart, you'll end up in the same pristine condition). You can +'commit' your changes into a new docker image, using `docker commit XXX some_name`, where XXX is your container id from +`docker ps`. Afterwards you can 'start' this docker image with `docker run ... some_name`. diff --git a/docs/guides/advanced/zk_intuition.md b/docs/guides/advanced/zk_intuition.md index e567ebf7ca82..6e0224a3237f 100644 --- a/docs/guides/advanced/zk_intuition.md +++ b/docs/guides/advanced/zk_intuition.md @@ -144,7 +144,8 @@ version 1.4.0. [bellman cuda repo]: https://github.com/matter-labs/era-bellman-cuda [example ecrecover circuit]: https://github.com/matter-labs/era-sync_vm/blob/v1.3.2/src/glue/ecrecover_circuit/mod.rs#L157 -[separate witness binary]: https://github.com/matter-labs/zksync-era/blob/main/prover/witness_generator/src/main.rs +[separate witness binary]: + https://github.com/matter-labs/zksync-era/blob/main/prover/crates/bin/witness_generator/src/main.rs [zkevm_test_harness witness]: https://github.com/matter-labs/era-zkevm_test_harness/blob/fb47657ae3b6ff6e4bb5199964d3d37212978200/src/external_calls.rs#L579 [heavy_ops_service repo]: https://github.com/matter-labs/era-heavy-ops-service diff --git a/docs/guides/development.md b/docs/guides/development.md index 5e53877993dd..c859017848b5 100644 --- a/docs/guides/development.md +++ b/docs/guides/development.md @@ -89,56 +89,6 @@ Currently the following criteria are checked: - Other code should always be formatted via `zk fmt`. - Dummy Prover should not be staged for commit (see below for the explanation). -## Spell Checking - -In our development workflow, we utilize a spell checking process to ensure the quality and accuracy of our documentation -and code comments. This is achieved using two primary tools: `cspell` and `cargo-spellcheck`. This section outlines how -to use these tools and configure them for your needs. - -### Using the Spellcheck Command - -The spell check command `zk spellcheck` is designed to check for spelling errors in our documentation and code. To run -the spell check, use the following command: - -``` -zk spellcheck -Options: ---pattern : Specifies the glob pattern for files to check. Default is docs/**/*. ---use-cargo: Utilize cargo spellcheck. ---use-cspell: Utilize cspell. -``` - -### General Rules - -**Code References in Comments**: When referring to code elements within development comments, they should be wrapped in -backticks. For example, reference a variable as `block_number`. - -**Code Blocks in Comments**: For larger blocks of pseudocode or commented-out code, use code blocks formatted as -follows: - -```` -// ``` -// let overhead_for_pubdata = { -// let numerator: U256 = overhead_for_block_gas * total_gas_limit -// + gas_per_pubdata_byte_limit * U256::from(MAX_PUBDATA_PER_BLOCK); -// let denominator = -// gas_per_pubdata_byte_limit * U256::from(MAX_PUBDATA_PER_BLOCK) + overhead_for_block_gas; -// ``` -```` - -**Language Settings**: We use the Hunspell language setting of `en_US`. - -**CSpell Usage**: For spell checking within the `docs/` directory, we use `cspell`. The configuration for this tool is -found in `cspell.json`. It's tailored to check our documentation for spelling errors. - -**Cargo-Spellcheck for Rust and Dev Comments**: For Rust code and development comments, `cargo-spellcheck` is used. Its -configuration is maintained in `era.cfg`. - -### Adding Words to the Dictionary - -To add a new word to the spell checker dictionary, navigate to `/spellcheck/era.dic` and include the word. Ensure that -the word is relevant and necessary to be included in the dictionary to maintain the integrity of our documentation. - ## Using Dummy Prover By default, the chosen prover is a "dummy" one, meaning that it doesn't actually compute proofs but rather uses mocks to diff --git a/docs/guides/external-node/00_quick_start.md b/docs/guides/external-node/00_quick_start.md index 826c296fcd93..3902fdc15560 100644 --- a/docs/guides/external-node/00_quick_start.md +++ b/docs/guides/external-node/00_quick_start.md @@ -63,6 +63,12 @@ The HTTP JSON-RPC API can be accessed on port `3060` and WebSocket API can be ac > > Those are requirements for a freshly started node and the the state grows about 1TB per month for mainnet +> [!NOTE] +> +> To stop state growth, you can enable state pruning by uncommenting `EN_PRUNING_ENABLED: true` in docker compose file, +> you can read more about pruning in +> [08_pruning.md](https://github.com/matter-labs/zksync-era/blob/main/docs/guides/external-node/08_pruning.md) + - 32 GB of RAM and a relatively modern CPU - 30 GB of storage for testnet nodes - 300 GB of storage for mainnet nodes diff --git a/docs/guides/external-node/07_snapshots_recovery.md b/docs/guides/external-node/07_snapshots_recovery.md new file mode 100644 index 000000000000..99de05ff2c11 --- /dev/null +++ b/docs/guides/external-node/07_snapshots_recovery.md @@ -0,0 +1,97 @@ +# Snapshots Recovery + +Instead of initializing a node using a Postgres dump, it's possible to configure a node to recover from a protocol-level +snapshot. This process is much faster and requires much less storage. Postgres database of a mainnet node recovered from +a snapshot is only about 300GB. Note that without [pruning](08_pruning.md) enabled, the node state will continuously +grow at a rate about 15GB per day. + +## How it works + +A snapshot is effectively a point-in-time snapshot of the VM state at the end of a certain L1 batch. Snapshots are +created for the latest L1 batches periodically (roughly twice a day) and are stored in a public GCS bucket. + +Recovery from a snapshot consists of several parts. + +- **Postgres** recovery is the initial stage. The node API is not functioning during this stage. The stage is expected + to take about 1 hour on the mainnet. +- **Merkle tree** recovery starts once Postgres is fully recovered. Merkle tree recovery can take about 3 hours on the + mainnet. Ordinarily, Merkle tree recovery is a blocker for node synchronization; i.e., the node will not process + blocks newer than the snapshot block until the Merkle tree is recovered. +- Recovering RocksDB-based **VM state cache** is concurrent with Merkle tree recovery and also depends on Postgres + recovery. It takes about 1 hour on the mainnet. Unlike Merkle tree recovery, VM state cache is not necessary for node + operation (the node will get the state from Postgres is if it is absent), although it considerably speeds up VM + execution. + +After Postgres recovery is completed, the node becomes operational, providing Web3 API etc. It still needs some time to +catch up executing blocks after the snapshot (i.e, roughly several hours worth of blocks / transactions). This may take +order of 1–2 hours on the mainnet. In total, recovery process and catch-up thus should take roughly 5–6 hours. + +## Current limitations + +Nodes recovered from snapshot don't have any historical data from before the recovery. There is currently no way to +back-fill this historic data. E.g., if a node has recovered from a snapshot for L1 batch 500,000; then, it will not have +data for L1 batches 499,999, 499,998, etc. The relevant Web3 methods, such as `eth_getBlockByNumber`, will return an +error mentioning the first locally retained block or L1 batch if queried this missing data. The same error messages are +used for [pruning](08_pruning.md) because logically, recovering from a snapshot is equivalent to pruning node storage to +the snapshot L1 batch. + +## Configuration + +To enable snapshot recovery on mainnet, you need to set environment variables for a node before starting it for the +first time: + +```yaml +EN_SNAPSHOTS_RECOVERY_ENABLED: 'true' +EN_SNAPSHOTS_OBJECT_STORE_BUCKET_BASE_URL: 'zksync-era-mainnet-external-node-snapshots' +EN_SNAPSHOTS_OBJECT_STORE_MODE: 'GCSAnonymousReadOnly' +``` + +For the Sepolia testnet, use: + +```yaml +EN_SNAPSHOTS_RECOVERY_ENABLED: 'true' +EN_SNAPSHOTS_OBJECT_STORE_BUCKET_BASE_URL: 'zksync-era-boojnet-external-node-snapshots' +EN_SNAPSHOTS_OBJECT_STORE_MODE: 'GCSAnonymousReadOnly' +``` + +For a working examples of a fully configured Nodes recovering from snapshots, see +[Docker Compose examples](docker-compose-examples) and [_Quick Start_](00_quick_start.md). + +If a node is already recovered (does not matter whether from a snapshot or from a Postgres dump), setting these env +variables will have no effect; the node will never reset its state. + +## Monitoring recovery + +Snapshot recovery information is logged with the following targets: + +- **Recovery orchestration:** `zksync_external_node::init` +- **Postgres recovery:** `zksync_snapshots_applier` +- **Merkle tree recovery:** `zksync_metadata_calculator::recovery`, `zksync_merkle_tree::recovery` + +An example of snapshot recovery logs during the first node start: + +```text +2024-06-20T07:25:32.466926Z INFO zksync_external_node::init: Node has neither genesis L1 batch, nor snapshot recovery info +2024-06-20T07:25:32.466946Z INFO zksync_external_node::init: Chosen node initialization strategy: SnapshotRecovery +2024-06-20T07:25:32.466951Z WARN zksync_external_node::init: Proceeding with snapshot recovery. This is an experimental feature; use at your own risk +2024-06-20T07:25:32.475547Z INFO zksync_snapshots_applier: Found snapshot with data up to L1 batch #7, L2 block #27, version 0, storage logs are divided into 10 chunk(s) +2024-06-20T07:25:32.516142Z INFO zksync_snapshots_applier: Applied factory dependencies in 27.768291ms +2024-06-20T07:25:32.527363Z INFO zksync_snapshots_applier: Recovering storage log chunks with 10 max concurrency +2024-06-20T07:25:32.608539Z INFO zksync_snapshots_applier: Recovered 3007 storage logs in total; checking overall consistency... +2024-06-20T07:25:32.612967Z INFO zksync_snapshots_applier: Retrieved 2 tokens from main node +2024-06-20T07:25:32.616142Z INFO zksync_external_node::init: Recovered Postgres from snapshot in 148.523709ms +2024-06-20T07:25:32.645399Z INFO zksync_metadata_calculator::recovery: Recovering Merkle tree from Postgres snapshot in 1 chunks with max concurrency 10 +2024-06-20T07:25:32.650478Z INFO zksync_metadata_calculator::recovery: Filtered recovered key chunks; 1 / 1 chunks remaining +2024-06-20T07:25:32.681327Z INFO zksync_metadata_calculator::recovery: Recovered 1/1 Merkle tree chunks, there are 0 left to process +2024-06-20T07:25:32.784597Z INFO zksync_metadata_calculator::recovery: Recovered Merkle tree from snapshot in 144.040125ms +``` + +(Obviously, timestamps and numbers in the logs will differ.) + +Recovery logic also exports some metrics, the main of which are as follows: + +| Metric name | Type | Labels | Description | +| ------------------------------------------------------- | --------- | ------------ | --------------------------------------------------------------------- | +| `snapshots_applier_storage_logs_chunks_left_to_process` | Gauge | - | Number of storage log chunks left to process during Postgres recovery | +| `db_pruner_pruning_chunk_duration_seconds` | Histogram | `prune_type` | Latency of a single pruning iteration | +| `merkle_tree_pruning_deleted_stale_key_versions` | Gauge | `bound` | Versions (= L1 batches) pruned from the Merkle tree | diff --git a/docs/guides/external-node/08_pruning.md b/docs/guides/external-node/08_pruning.md new file mode 100644 index 000000000000..83c127f3826d --- /dev/null +++ b/docs/guides/external-node/08_pruning.md @@ -0,0 +1,92 @@ +# Pruning + +It is possible to configure a ZKsync node to periodically prune all data from L1 batches older than a configurable +threshold. Data is pruned both from Postgres and from tree (RocksDB). Pruning happens continuously (i.e., does not +require stopping the node) in the background during normal node operation. It is designed to not significantly impact +node performance. + +Types of pruned data in Postgres include: + +- Block and L1 batch headers +- Transactions +- EVM logs aka events +- Overwritten storage logs +- Transaction traces + +Pruned data is no longer available via Web3 API of the node. The relevant Web3 methods, such as `eth_getBlockByNumber`, +will return an error mentioning the first retained block or L1 batch if queried pruned data. + +## Interaction with snapshot recovery + +Pruning and [snapshot recovery](07_snapshots_recovery.md) are independent features. Pruning works both for archival +nodes restored from a Postgres dump, and nodes recovered from a snapshot. Conversely, a node recovered from a snapshot +may have pruning disabled; this would mean that it retains all data starting from the snapshot indefinitely (but not +earlier data, see [snapshot recovery limitations](07_snapshots_recovery.md#current-limitations)). + +A rough guide whether to choose the recovery option and/or pruning is as follows: + +- If you need a node with data retention period of up to a few days, set up a node from a snapshot with pruning enabled + and wait for it to have enough data. +- If you need a node with the entire rollup history, using a Postgres dump is the only option, and pruning should be + disabled. +- If you need a node with significant data retention (order of months), the best option right now is using a Postgres + dump. You may enable pruning for such a node, but beware that full pruning may take significant amount of time (order + of weeks or months). In the future, we intend to offer pre-pruned Postgres dumps with a few months of data. + +## Configuration + +You can enable pruning by setting the environment variable + +```yaml +EN_PRUNING_ENABLED: 'true' +``` + +By default, the node will keep L1 batch data for 7 days determined by the batch timestamp (always equal to the timestamp +of the first block in the batch). You can configure the retention period using: + +```yaml +EN_PRUNING_DATA_RETENTION_SEC: '259200' # 3 days +``` + +The retention period can be set to any value, but for mainnet values under 21h will be ignored because a batch can only +be pruned after it has been executed on Ethereum. + +Pruning can be disabled or enabled and the data retention period can be freely changed during the node lifetime. + +## Storage requirements for pruned nodes + +The storage requirements depend on how long you configure to retain the data, but are roughly: + +- **40GB + ~5GB/day of retained data** of disk space needed on machine that runs the node +- **300GB + ~15GB/day of retained data** of disk space for Postgres + +> [!NOTE] +> +> When pruning an existing archival node, Postgres will be unable to reclaim disk space automatically. To reclaim disk +> space, you need to manually run `VACUUM FULL`, which requires an `ACCESS EXCLUSIVE` lock. You can read more about it +> in [Postgres docs](https://www.postgresql.org/docs/current/sql-vacuum.html). + +## Monitoring pruning + +Pruning information is logged with the following targets: + +- **Postgres pruning:** `zksync_node_db_pruner` +- **Merkle tree pruning:** `zksync_metadata_calculator::pruning`, `zksync_merkle_tree::pruning`. + +To check whether Postgres pruning works as intended, you should look for logs like this: + +```text +2024-06-20T07:26:03.415382Z INFO zksync_node_db_pruner: Soft pruned db l1_batches up to 8 and L2 blocks up to 29, operation took 14.850042ms +2024-06-20T07:26:04.433574Z INFO zksync_node_db_pruner::metrics: Performed pruning of database, deleted 1 L1 batches, 2 L2 blocks, 68 storage logs, 383 events, 27 call traces, 12 L2-to-L1 logs +2024-06-20T07:26:04.436516Z INFO zksync_node_db_pruner: Hard pruned db l1_batches up to 8 and L2 blocks up to 29, operation took 18.653083ms +``` + +(Obviously, timestamps and numbers in the logs will differ.) + +Pruning logic also exports some metrics, the main of which are as follows: + +| Metric name | Type | Labels | Description | +| ------------------------------------------------ | --------- | ------------ | --------------------------------------------------- | +| `db_pruner_not_pruned_l1_batches_count` | Gauge | - | Number of retained L1 batches | +| `db_pruner_pruning_chunk_duration_seconds` | Histogram | `prune_type` | Latency of a single pruning iteration | +| `merkle_tree_pruning_deleted_stale_key_versions` | Gauge | `bound` | Versions (= L1 batches) pruned from the Merkle tree | diff --git a/docs/guides/external-node/09_decentralization.md b/docs/guides/external-node/09_decentralization.md new file mode 100644 index 000000000000..fa780ba9ff55 --- /dev/null +++ b/docs/guides/external-node/09_decentralization.md @@ -0,0 +1,91 @@ +# Decentralization + +In the default setup the ZKsync node will fetch data from the ZKsync API endpoint maintained by Matter Labs. To reduce +the reliance on this centralized endpoint we have developed a decentralized p2p networking stack (aka gossipnet) which +will eventually be used instead of ZKsync API for synchronizing data. + +On the gossipnet, the data integrity will be protected by the BFT (byzantine fault tolerant) consensus algorithm +(currently data is signed just by the main node though). + +## Enabling gossipnet on your node + +> [!NOTE] +> +> Because the data transmitted over the gossipnet is signed by the main node (and eventually by the consensus quorum), +> the signatures need to be backfilled to the node's local storage the first time you switch from centralized (ZKsync +> API based) synchronization to the decentralized (gossipnet based) synchronization (this is a one-time thing). With the +> current implementation it may take a couple of hours and gets faster the more nodes you add to the +> `gossip_static_outbound` list (see below). We are working to remove this inconvenience. + +> [!NOTE] +> +> The minimal supported server version for this is +> [24.11.0](https://github.com/matter-labs/zksync-era/releases/tag/core-v24.11.0) + +### Generating secrets + +Each participant node of the gossipnet has to have an identity (a public/secret key pair). When running your node for +the first time, generate the secrets by running: + +``` +cargo run -p zksync_external_node -- generate-secrets > consensus_secrets.yaml +chmod 600 consensus_secrets.yaml +``` + +> [!NOTE] +> +> NEVER reveal the secret keys used by your node. Otherwise someone can impersonate your node on the gossipnet. If you +> suspect that your secret key has been leaked, you can generate fresh keys using the same tool. +> +> If you want someone else to connect to your node, give them your PUBLIC key instead. Both public and secret keys are +> present in the `consensus_secrets.yaml` (public keys are in comments). + +### Preparing configuration file + +Copy the template of the consensus configuration file (for +[mainnet](https://github.com/matter-labs/zksync-era/blob/main/docs/guides/external-node/prepared_configs/mainnet_consensus_config.yaml) +or +[testnet](https://github.com/matter-labs/zksync-era/blob/main/docs/guides/external-node/prepared_configs/testnet_consensus_config.yaml) +). + +> [!NOTE] +> +> You need to fill in the `public_addr` field. This is the address that will (not implemented yet) be advertised over +> gossipnet to other nodes, so that they can establish connections to your node. If you don't want to expose your node +> to the public internet, you can use IP in your local network. + +Currently the config contains the following fields (refer to config +[schema](https://github.com/matter-labs/zksync-era/blob/990676c5f84afd2ff8cd337f495c82e8d1f305a4/core/lib/protobuf_config/src/proto/core/consensus.proto#L66) +for more details): + +- `server_addr` - local TCP socket address that the node should listen on for incoming connections. Note that this is an + additional TCP port that will be opened by the node. +- `public_addr` - the public address of your node that will be advertised over the gossipnet. +- `max_payload_size` - limit (in bytes) on the sized of the ZKsync ERA block received from the gossipnet. This protects + your node from getting DoS`ed by too large network messages. Use the value from the template. +- `gossip_dynamic_inbound_limit` - maximal number of unauthenticated concurrent inbound connections that can be + established to your node. This is a DDoS protection measure. +- `gossip_static_outbound` - list of trusted peers that your node should always try to connect to. The template contains + the nodes maintained by Matterlabs, but you can add more if you know any. Note that the list contains both the network + address AND the public key of the node - this prevents spoofing attacks. + +### Setting environment variables + +Uncomment (or add) the following lines in your `.env` config: + +``` +EN_CONSENSUS_CONFIG_PATH=... +EN_CONSENSUS_SECRETS_PATH=... +``` + +These variables should point to your consensus config and secrets files that we have just created. Tweak the paths to +the files if you have placed them differently. + +### Add `--enable-consensus` flag to your entry point + +For the consensus configuration to take effect you have to add `--enable-consensus` flag to the command line when +running the node: + +``` +cargo run -p zksync_external_node -- --enable-consensus +``` diff --git a/docs/guides/external-node/docker-compose-examples/mainnet-external-node-docker-compose.yml b/docs/guides/external-node/docker-compose-examples/mainnet-external-node-docker-compose.yml index 8b48ff5ebca7..a3e823b260a1 100644 --- a/docs/guides/external-node/docker-compose-examples/mainnet-external-node-docker-compose.yml +++ b/docs/guides/external-node/docker-compose-examples/mainnet-external-node-docker-compose.yml @@ -74,6 +74,7 @@ services: EN_MAIN_NODE_URL: https://zksync2-mainnet.zksync.io EN_L1_CHAIN_ID: 1 EN_L2_CHAIN_ID: 324 + # EN_PRUNING_ENABLED: true EN_STATE_CACHE_PATH: "./db/ext-node/state_keeper" EN_MERKLE_TREE_PATH: "./db/ext-node/lightweight" diff --git a/docs/guides/external-node/docker-compose-examples/testnet-external-node-docker-compose.yml b/docs/guides/external-node/docker-compose-examples/testnet-external-node-docker-compose.yml index f0402c290ebf..e7ebaafb3c40 100644 --- a/docs/guides/external-node/docker-compose-examples/testnet-external-node-docker-compose.yml +++ b/docs/guides/external-node/docker-compose-examples/testnet-external-node-docker-compose.yml @@ -74,6 +74,7 @@ services: EN_MAIN_NODE_URL: https://sepolia.era.zksync.dev EN_L1_CHAIN_ID: 11155111 EN_L2_CHAIN_ID: 300 + # EN_PRUNING_ENABLED: true EN_STATE_CACHE_PATH: "./db/ext-node/state_keeper" EN_MERKLE_TREE_PATH: "./db/ext-node/lightweight" diff --git a/docs/guides/external-node/prepared_configs/mainnet-config.env b/docs/guides/external-node/prepared_configs/mainnet-config.env index 35278205b96f..bce812084665 100644 --- a/docs/guides/external-node/prepared_configs/mainnet-config.env +++ b/docs/guides/external-node/prepared_configs/mainnet-config.env @@ -70,6 +70,9 @@ RUST_LOG=zksync_core=debug,zksync_dal=info,zksync_eth_client=info,zksync_merkle_ RUST_BACKTRACE=full RUST_LIB_BACKTRACE=1 +# Settings related to gossip network, see `09_decentralization.md` +#EN_CONSENSUS_CONFIG_PATH=./mainnet_consensus_config.yaml +#EN_CONSENSUS_SECRETS_PATH=./consensus_secrets.yaml # ------------------------------------------------------------------------ # -------------- THE FOLLOWING VARIABLES DEPEND ON THE ENV --------------- diff --git a/docs/guides/external-node/prepared_configs/mainnet_consensus_config.yaml b/docs/guides/external-node/prepared_configs/mainnet_consensus_config.yaml new file mode 100644 index 000000000000..6d61ef3963eb --- /dev/null +++ b/docs/guides/external-node/prepared_configs/mainnet_consensus_config.yaml @@ -0,0 +1,10 @@ +server_addr: '0.0.0.0:3054' +public_addr: ':3054' +max_payload_size: 5000000 +gossip_dynamic_inbound_limit: 100 +gossip_static_outbound: + # preconfigured ENs owned by Matterlabs that you can connect to + - key: 'node:public:ed25519:68d29127ab03408bf5c838553b19c32bdb3aaaae9bf293e5e078c3a0d265822a' + addr: 'external-node-consensus-mainnet.zksync.dev:3054' + - key: 'node:public:ed25519:b521e1bb173d04bc83d46b859d1296378e94a40427a6beb9e7fdd17cbd934c11' + addr: 'external-node-moby-consensus-mainnet.zksync.dev:3054' diff --git a/docs/guides/external-node/prepared_configs/testnet-sepolia-config.env b/docs/guides/external-node/prepared_configs/testnet-sepolia-config.env index 98e2ee6bd510..182012e2850c 100644 --- a/docs/guides/external-node/prepared_configs/testnet-sepolia-config.env +++ b/docs/guides/external-node/prepared_configs/testnet-sepolia-config.env @@ -70,6 +70,9 @@ RUST_LOG=zksync_core=debug,zksync_dal=info,zksync_eth_client=info,zksync_merkle_ RUST_BACKTRACE=full RUST_LIB_BACKTRACE=1 +# Settings related to gossip network, see `09_decentralization.md` +#EN_CONSENSUS_CONFIG_PATH=./testnet_consensus_config.yaml +#EN_CONSENSUS_SECRETS_PATH=./consensus_secrets.yaml # ------------------------------------------------------------------------ # -------------- THE FOLLOWING VARIABLES DEPEND ON THE ENV --------------- diff --git a/docs/guides/external-node/prepared_configs/testnet_consensus_config.yaml b/docs/guides/external-node/prepared_configs/testnet_consensus_config.yaml new file mode 100644 index 000000000000..25461b5dfc45 --- /dev/null +++ b/docs/guides/external-node/prepared_configs/testnet_consensus_config.yaml @@ -0,0 +1,10 @@ +server_addr: '0.0.0.0:3054' +public_addr: ':3054' +max_payload_size: 5000000 +gossip_dynamic_inbound_limit: 100 +gossip_static_outbound: + # preconfigured ENs owned by Matterlabs that you can connect to + - key: 'node:public:ed25519:4a94067664e7b8d0927ab1443491dab71a1d0c63f861099e1852f2b6d0831c3e' + addr: 'external-node-consensus-sepolia.zksync.dev:3054' + - key: 'node:public:ed25519:cfbbebc74127099680584f07a051a2573e2dd7463abdd000d31aaa44a7985045' + addr: 'external-node-moby-consensus-sepolia.zksync.dev:3054' diff --git a/docs/guides/setup-dev.md b/docs/guides/setup-dev.md index 4e005fc2795f..aafd96cda40a 100644 --- a/docs/guides/setup-dev.md +++ b/docs/guides/setup-dev.md @@ -17,12 +17,12 @@ sudo usermod -aG docker YOUR_USER ## You might need to re-connect (due to usermod change). # Node & yarn -nvm install 18 +nvm install 20 npm install -g yarn yarn set version 1.22.19 # SQL tools -cargo install sqlx-cli --version 0.7.3 +cargo install sqlx-cli --version 0.7.4 # Stop default postgres (as we'll use the docker one) sudo systemctl stop postgresql # Start docker. diff --git a/etc/env/base/base_token_adjuster.toml b/etc/env/base/base_token_adjuster.toml new file mode 100644 index 000000000000..b1b997eb67ac --- /dev/null +++ b/etc/env/base/base_token_adjuster.toml @@ -0,0 +1,8 @@ +# Configuration for the Base Token Adjuster crate + +[base_token_adjuster] + +# How often to poll external price feeds for the base token price. +price_polling_interval_ms = "30000" + +price_cache_update_interval_ms = "2000" diff --git a/etc/env/base/chain.toml b/etc/env/base/chain.toml index 88a4c71bbb9b..0cb8213119b3 100644 --- a/etc/env/base/chain.toml +++ b/etc/env/base/chain.toml @@ -82,9 +82,9 @@ max_pubdata_per_batch = 100000 # Also, the fair L2 gas price is expected to only include the proving/computation price for the operator and not the costs that come from # processing the batch on L1. # - `V2`, the second model that was used in ZKsync Era. There the pubdata price might be independent from the L1 gas price. Also, -# The fair L2 gas price is expected to both the proving/computation price for the operator and the costs that come from +# The fair L2 gas price is expected to be both the proving/computation price for the operator and the costs that come from # processing the batch on L1. -fee_model_version = "V1" +fee_model_version = "V2" # Max number of computational gas that validation step is allowed to take. validation_computational_gas_limit = 300000 diff --git a/etc/env/base/contracts.toml b/etc/env/base/contracts.toml index 491bd19ea4bf..daa317a8bc90 100644 --- a/etc/env/base/contracts.toml +++ b/etc/env/base/contracts.toml @@ -32,7 +32,7 @@ PRIORITY_TX_MAX_GAS_LIMIT = 72000000 DEPLOY_L2_BRIDGE_COUNTERPART_GAS_LIMIT = 10000000 GENESIS_ROLLUP_LEAF_INDEX = "54" GENESIS_PROTOCOL_VERSION = "24" -GENESIS_PROTOCOL_SEMANTIC_VERSION = "0.24.1" +GENESIS_PROTOCOL_SEMANTIC_VERSION = "0.24.2" L1_WETH_BRIDGE_IMPL_ADDR = "0x5E6D086F5eC079ADFF4FB3774CDf3e8D6a34F7E9" L1_WETH_BRIDGE_PROXY_ADDR = "0x5E6D086F5eC079ADFF4FB3774CDf3e8D6a34F7E9" L1_WETH_TOKEN_ADDR = "0x5E6D086F5eC079ADFF4FB3774CDf3e8D6a34F7E9" @@ -62,6 +62,7 @@ SNARK_WRAPPER_VK_HASH = "0x14f97b81e54b35fe673d8708cc1a19e1ea5b5e348e12d31e39824 SHARED_BRIDGE_UPGRADE_STORAGE_SWITCH = 0 ERA_CHAIN_ID = 9 ERA_DIAMOND_PROXY_ADDR = "0x0000000000000000000000000000000000000000" +CHAIN_ADMIN_ADDR = "0x0000000000000000000000000000000000000000" [contracts.test] dummy_verifier = true easy_priority_mode = false diff --git a/etc/env/base/external_price_api.toml b/etc/env/base/external_price_api.toml new file mode 100644 index 000000000000..635195fd7608 --- /dev/null +++ b/etc/env/base/external_price_api.toml @@ -0,0 +1,8 @@ +# Configuration for the External Price API crate + +[external_price_api_client] + +# What source to use for the external price API. Currently only options are "forced", "no-op", and "coingecko". +source = "no-op" + +client_timeout_ms = 10000 diff --git a/etc/env/base/rust.toml b/etc/env/base/rust.toml index ee4a69721cd3..1bb69374ab1a 100644 --- a/etc/env/base/rust.toml +++ b/etc/env/base/rust.toml @@ -57,6 +57,8 @@ zksync_health_check=debug,\ zksync_proof_fri_compressor=info,\ vise_exporter=debug,\ snapshots_creator=debug,\ +zksync_base_token_adjuster=debug,\ +zksync_external_price_api=debug,\ """ # `RUST_BACKTRACE` variable diff --git a/etc/env/base/vm_runner.toml b/etc/env/base/vm_runner.toml index c8f259efc3b7..dd8e9915280b 100644 --- a/etc/env/base/vm_runner.toml +++ b/etc/env/base/vm_runner.toml @@ -9,3 +9,11 @@ db_path = "./db/main/protective_reads" window_size = 3 # All batches before this one (inclusive) are always considered to be processed. first_processed_batch = 0 + +[vm_runner.bwip] +# Path to the directory that contains RocksDB with bwip writer cache. +db_path = "./db/main/basic_witness_input_producer" +# Amount of batches that can be processed in parallel. +window_size = 3 +# All batches before this one (inclusive) are always considered to be processed. +first_processed_batch = 0 diff --git a/etc/env/configs/dev_validium.toml b/etc/env/configs/dev_validium.toml index d1b415180bce..5ed4ccb38e41 100644 --- a/etc/env/configs/dev_validium.toml +++ b/etc/env/configs/dev_validium.toml @@ -10,6 +10,9 @@ max_pubdata_per_batch=100000 fee_model_version="V2" l1_batch_commit_data_generator_mode="Validium" +[eth_sender] +sender_pubdata_sending_mode="Custom" + # This override will be removed soon but it is needed for now. [eth_sender.gas_adjuster] max_blob_base_fee=0 diff --git a/etc/env/configs/dev_validium_docker.toml b/etc/env/configs/dev_validium_docker.toml index 4392ca8d2711..7e985cb974ab 100644 --- a/etc/env/configs/dev_validium_docker.toml +++ b/etc/env/configs/dev_validium_docker.toml @@ -19,6 +19,9 @@ fee_model_version = "V2" l1_batch_commit_data_generator_mode = "Validium" miniblock_iteration_interval = 50 +[eth_sender] +sender_pubdata_sending_mode="Custom" + [eth_client] web3_url = "http://reth:8545" diff --git a/etc/env/consensus_config.yaml b/etc/env/consensus_config.yaml index 4a1f24c58e71..304ea31fac9c 100644 --- a/etc/env/consensus_config.yaml +++ b/etc/env/consensus_config.yaml @@ -1,5 +1,5 @@ -server_addr: '127.0.0.1:3054' -public_addr: '127.0.0.1:3054' +server_addr: "127.0.0.1:3054" +public_addr: "127.0.0.1:3054" max_payload_size: 2500000 gossip_dynamic_inbound_limit: 1 # LOCALHOST TEST CONFIGURATION ONLY, don't copy to other environments. @@ -7,6 +7,11 @@ genesis_spec: chain_id: 1337 protocol_version: 1 validators: - - key: 'validator:public:bls12_381:b14e3126668ae79e689a2d65c56522889a3812ef5433097c33bd7af601b073dcdddf46e188883aa381725c49e08f90c705df1f78bf918e1978912cebeadff0d0084b1a4fe2ddee243e826348045f528803207f5de303c6a95bc1a701a190dbcf' + - key: "validator:public:bls12_381:b14e3126668ae79e689a2d65c56522889a3812ef5433097c33bd7af601b073dcdddf46e188883aa381725c49e08f90c705df1f78bf918e1978912cebeadff0d0084b1a4fe2ddee243e826348045f528803207f5de303c6a95bc1a701a190dbcf" + weight: 1 + leader: "validator:public:bls12_381:b14e3126668ae79e689a2d65c56522889a3812ef5433097c33bd7af601b073dcdddf46e188883aa381725c49e08f90c705df1f78bf918e1978912cebeadff0d0084b1a4fe2ddee243e826348045f528803207f5de303c6a95bc1a701a190dbcf" + attesters: + - key: "attester:public:secp256k1:038b2762ad382b35090bb42992a3f6442d638425ea5528e800ffe5a9f7d4185589" + weight: 1 + - key: "attester:public:secp256k1:0330914e41b225fb6f8518d5278f0c014a9861018c8054f9bb425bb6538538f1c9" weight: 1 - leader: 'validator:public:bls12_381:b14e3126668ae79e689a2d65c56522889a3812ef5433097c33bd7af601b073dcdddf46e188883aa381725c49e08f90c705df1f78bf918e1978912cebeadff0d0084b1a4fe2ddee243e826348045f528803207f5de303c6a95bc1a701a190dbcf' diff --git a/etc/env/consensus_secrets.yaml b/etc/env/consensus_secrets.yaml index fdceef5e8e47..8235185d5fd2 100644 --- a/etc/env/consensus_secrets.yaml +++ b/etc/env/consensus_secrets.yaml @@ -1,4 +1,6 @@ # 'validator:public:bls12_381:b14e3126668ae79e689a2d65c56522889a3812ef5433097c33bd7af601b073dcdddf46e188883aa381725c49e08f90c705df1f78bf918e1978912cebeadff0d0084b1a4fe2ddee243e826348045f528803207f5de303c6a95bc1a701a190dbcf' -validator_key: 'validator:secret:bls12_381:3cf20d771450fcd0cbb3839b21cab41161af1554e35d8407a53b0a5d98ff04d4' +validator_key: "validator:secret:bls12_381:3cf20d771450fcd0cbb3839b21cab41161af1554e35d8407a53b0a5d98ff04d4" # 'node:public:ed25519:a9995979f228c91e4f387f7e141a9afe409196ee0c4fca0045c1c6b6e7892cb5' -node_key: 'node:secret:ed25519:9a40791b5a6b1627fc538b1ddecfa843bd7c4cd01fc0a4d0da186f9d3e740d7c' +node_key: "node:secret:ed25519:9a40791b5a6b1627fc538b1ddecfa843bd7c4cd01fc0a4d0da186f9d3e740d7c" +# 'attester:public:secp256k1:038b2762ad382b35090bb42992a3f6442d638425ea5528e800ffe5a9f7d4185589' +attester_key: "attester:secret:secp256k1:efc2431bd337d8ed1a16a21aa1f9916fade00cb9d1e849d493735df21e2d75ed" diff --git a/etc/env/ecosystems/mainnet.yaml b/etc/env/ecosystems/mainnet.yaml new file mode 100644 index 000000000000..1fa9930c29e6 --- /dev/null +++ b/etc/env/ecosystems/mainnet.yaml @@ -0,0 +1,19 @@ +ecosystem_contracts: + bridgehub_proxy_addr: 0x303a465B659cBB0ab36eE643eA362c509EEb5213 + state_transition_proxy_addr: 0xc2eE6b6af7d616f6e27ce7F4A451Aedc2b0F5f5C + transparent_proxy_admin_addr: 0xC2a36181fB524a6bEfE639aFEd37A67e77d62cf1 + validator_timelock_addr: 0x5D8ba173Dc6C3c90C8f7C04C9288BeF5FDbAd06E + diamond_cut_data: 0x000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000600000000000000000000000003c8be122b2cf684230c54f891c917a8d7dc3bef80000000000000000000000000000000000000000000000000000000000000c400000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000008800000000000000000000000000000000000000000000000000000000000000a20000000000000000000000000f6f26b416ce7ae5e5fe224be332c7ae4e1f3450a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000d0e18b6810000000000000000000000000000000000000000000000000000000064bf8d6600000000000000000000000000000000000000000000000000000000a9f6d9410000000000000000000000000000000000000000000000000000000027ae4c16000000000000000000000000000000000000000000000000000000004dd18bf5000000000000000000000000000000000000000000000000000000001cc5d10300000000000000000000000000000000000000000000000000000000be6f11cf00000000000000000000000000000000000000000000000000000000e76db86500000000000000000000000000000000000000000000000000000000235d9eb50000000000000000000000000000000000000000000000000000000021f603d7000000000000000000000000000000000000000000000000000000004623c91d000000000000000000000000000000000000000000000000000000001733894500000000000000000000000000000000000000000000000000000000fc57565f00000000000000000000000000000000000000000000000000000000000000000000000000000000e60e94fccb18a81d501a38959e532c0a85a1be8900000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000291de72e3400000000000000000000000000000000000000000000000000000000ea6c029c00000000000000000000000000000000000000000000000000000000cdffacc60000000000000000000000000000000000000000000000000000000052ef6b2c00000000000000000000000000000000000000000000000000000000adfca15e000000000000000000000000000000000000000000000000000000007a0ed627000000000000000000000000000000000000000000000000000000006e9960c30000000000000000000000000000000000000000000000000000000098acd7a600000000000000000000000000000000000000000000000000000000086a56f8000000000000000000000000000000000000000000000000000000003591c1a00000000000000000000000000000000000000000000000000000000079823c9a00000000000000000000000000000000000000000000000000000000d86970d800000000000000000000000000000000000000000000000000000000fd791f3c00000000000000000000000000000000000000000000000000000000e5355c75000000000000000000000000000000000000000000000000000000009d1b5a81000000000000000000000000000000000000000000000000000000007b30c8da00000000000000000000000000000000000000000000000000000000d046815600000000000000000000000000000000000000000000000000000000631f4bac000000000000000000000000000000000000000000000000000000000ec6b0b70000000000000000000000000000000000000000000000000000000033ce93fe0000000000000000000000000000000000000000000000000000000006d49e5b00000000000000000000000000000000000000000000000000000000f5c1182c000000000000000000000000000000000000000000000000000000005518c73b00000000000000000000000000000000000000000000000000000000db1f0bf900000000000000000000000000000000000000000000000000000000b8c2f66f00000000000000000000000000000000000000000000000000000000ef3f0bae00000000000000000000000000000000000000000000000000000000fe26699e000000000000000000000000000000000000000000000000000000003960738200000000000000000000000000000000000000000000000000000000af6a2dcd00000000000000000000000000000000000000000000000000000000a1954fc50000000000000000000000000000000000000000000000000000000046657fe90000000000000000000000000000000000000000000000000000000018e3a9410000000000000000000000000000000000000000000000000000000029b98c6700000000000000000000000000000000000000000000000000000000bd7c541200000000000000000000000000000000000000000000000000000000c3bbd2d700000000000000000000000000000000000000000000000000000000e81e0ba100000000000000000000000000000000000000000000000000000000facd743b000000000000000000000000000000000000000000000000000000009cd939e40000000000000000000000000000000000000000000000000000000056142d7a00000000000000000000000000000000000000000000000000000000b22dd78e0000000000000000000000000000000000000000000000000000000074f4d30d00000000000000000000000000000000000000000000000000000000000000000000000000000000cdb6228b616eef8df47d69a372c4f725c43e718c000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000812f43dab000000000000000000000000000000000000000000000000000000006c0960f900000000000000000000000000000000000000000000000000000000b473318e00000000000000000000000000000000000000000000000000000000042901c700000000000000000000000000000000000000000000000000000000263b7f8e00000000000000000000000000000000000000000000000000000000e4948f4300000000000000000000000000000000000000000000000000000000eb67241900000000000000000000000000000000000000000000000000000000c924de3500000000000000000000000000000000000000000000000000000000000000000000000000000000ad193ade635576d8e9f7ada71af2137b16c640750000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000008701f58c5000000000000000000000000000000000000000000000000000000006edd4f1200000000000000000000000000000000000000000000000000000000c3d93e7c000000000000000000000000000000000000000000000000000000006f497ac6000000000000000000000000000000000000000000000000000000007f61885c00000000000000000000000000000000000000000000000000000000c37533bb0000000000000000000000000000000000000000000000000000000097c09d34000000000000000000000000000000000000000000000000000000000f23da430000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001c000000000000000000000000070f3fbf8a427155185ec90bed8a3434203de9604f520cd5b37e74e19fdb369c8d676a04dce8a19457497ac6686d2bb95d94109c8f9664f4324c1400fa5c3822d667f30e873f53f1b8033180cd15fe41c1e2355c60000000000000000000000000000000000000000000000000000000000000000010008e742608b21bf7eb23c1a9d0602047e3618b464c9b59c0fba3b3d7ab66e01000563374c277a2c1e34659a2a1e87371bb6d852ce142022d497bfb50b9e3200000000000000000000000000000000000000000000000000000000044aa200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f4240000000000000000000000000000000000000000000000000000000000001d4c00000000000000000000000000000000000000000000000000000000004c4b40000000000000000000000000000000000000000000000000000000000000182b8000000000000000000000000000000000000000000000000000000000ee6b2800000000000000000000000000000000000000000000000000000000000000000 +bridges: + erc20: + l1_address: 0x57891966931Eb4Bb6FB81430E6cE0A03AAbDe063 + shared: + l1_address: 0xD7f9f54194C633F36CCD5F3da84ad4a1c38cB2cB +l1: + default_upgrade_addr: 0x4d376798Ba8F69cEd59642c3AE8687c7457e855d + diamond_proxy_addr: 0x32400084c286cf3e17e7b677ea9583e60a000324 + governance_addr: 0x0b622A2061EaccAE1c664eBC3E868b8438e03F61 + multicall3_addr: 0xca11bde05977b3631167028862be2a173976ca11 + verifier_addr: 0x70F3FBf8a427155185Ec90BED8a3434203de9604 + validator_timelock_addr: 0x5D8ba173Dc6C3c90C8f7C04C9288BeF5FDbAd06E + base_token_addr: '0x0000000000000000000000000000000000000000' diff --git a/etc/env/ecosystems/stage.yaml b/etc/env/ecosystems/stage.yaml new file mode 100644 index 000000000000..f540cb272f7e --- /dev/null +++ b/etc/env/ecosystems/stage.yaml @@ -0,0 +1,21 @@ +create2_factory_addr: 0xce0042b868300000d44a59004da54a005ffdcf9f +create2_factory_salt: 0x8c8c6108a96a14b59963a18367250dc2042dfe62da8767d72ffddb03f269ffcc +ecosystem_contracts: + bridgehub_proxy_addr: 0x236D1c3Ff32Bd0Ca26b72Af287E895627c0478cE + state_transition_proxy_addr: 0x8b448ac7cd0f18F3d8464E2645575772a26A3b6b + transparent_proxy_admin_addr: 0xCb7F8e556Ef02771eA32F54e767D6F9742ED31c2 + validator_timelock_addr: 0x8D65310fe158734eEA3197FF9a6211F9Bba3D0A8 + diamond_cut_data: 0x0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000006000000000000000000000000017384fd6cc64468b69df514a940cac89b602d01c0000000000000000000000000000000000000000000000000000000000000c400000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000008800000000000000000000000000000000000000000000000000000000000000a2000000000000000000000000096b40174102c93155cdb46a5e4691eeb6c4e1b7b000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000d0e18b6810000000000000000000000000000000000000000000000000000000064bf8d6600000000000000000000000000000000000000000000000000000000a9f6d9410000000000000000000000000000000000000000000000000000000027ae4c16000000000000000000000000000000000000000000000000000000004dd18bf5000000000000000000000000000000000000000000000000000000001cc5d10300000000000000000000000000000000000000000000000000000000be6f11cf00000000000000000000000000000000000000000000000000000000e76db86500000000000000000000000000000000000000000000000000000000235d9eb50000000000000000000000000000000000000000000000000000000021f603d7000000000000000000000000000000000000000000000000000000004623c91d000000000000000000000000000000000000000000000000000000001733894500000000000000000000000000000000000000000000000000000000fc57565f00000000000000000000000000000000000000000000000000000000000000000000000000000000183a8459e2a4440f364bec5040d8327bbb619be300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000291de72e3400000000000000000000000000000000000000000000000000000000ea6c029c00000000000000000000000000000000000000000000000000000000cdffacc60000000000000000000000000000000000000000000000000000000052ef6b2c00000000000000000000000000000000000000000000000000000000adfca15e000000000000000000000000000000000000000000000000000000007a0ed627000000000000000000000000000000000000000000000000000000006e9960c30000000000000000000000000000000000000000000000000000000098acd7a600000000000000000000000000000000000000000000000000000000086a56f8000000000000000000000000000000000000000000000000000000003591c1a00000000000000000000000000000000000000000000000000000000079823c9a00000000000000000000000000000000000000000000000000000000d86970d800000000000000000000000000000000000000000000000000000000fd791f3c00000000000000000000000000000000000000000000000000000000e5355c75000000000000000000000000000000000000000000000000000000009d1b5a81000000000000000000000000000000000000000000000000000000007b30c8da00000000000000000000000000000000000000000000000000000000d046815600000000000000000000000000000000000000000000000000000000631f4bac000000000000000000000000000000000000000000000000000000000ec6b0b70000000000000000000000000000000000000000000000000000000033ce93fe0000000000000000000000000000000000000000000000000000000006d49e5b00000000000000000000000000000000000000000000000000000000f5c1182c000000000000000000000000000000000000000000000000000000005518c73b00000000000000000000000000000000000000000000000000000000db1f0bf900000000000000000000000000000000000000000000000000000000b8c2f66f00000000000000000000000000000000000000000000000000000000ef3f0bae00000000000000000000000000000000000000000000000000000000fe26699e000000000000000000000000000000000000000000000000000000003960738200000000000000000000000000000000000000000000000000000000af6a2dcd00000000000000000000000000000000000000000000000000000000a1954fc50000000000000000000000000000000000000000000000000000000046657fe90000000000000000000000000000000000000000000000000000000018e3a9410000000000000000000000000000000000000000000000000000000029b98c6700000000000000000000000000000000000000000000000000000000bd7c541200000000000000000000000000000000000000000000000000000000c3bbd2d700000000000000000000000000000000000000000000000000000000e81e0ba100000000000000000000000000000000000000000000000000000000facd743b000000000000000000000000000000000000000000000000000000009cd939e40000000000000000000000000000000000000000000000000000000056142d7a00000000000000000000000000000000000000000000000000000000b22dd78e0000000000000000000000000000000000000000000000000000000074f4d30d00000000000000000000000000000000000000000000000000000000000000000000000000000000d60696fa25ee7a4b6d476ff705684ced7aab7f97000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000812f43dab000000000000000000000000000000000000000000000000000000006c0960f900000000000000000000000000000000000000000000000000000000b473318e00000000000000000000000000000000000000000000000000000000042901c700000000000000000000000000000000000000000000000000000000263b7f8e00000000000000000000000000000000000000000000000000000000e4948f4300000000000000000000000000000000000000000000000000000000eb67241900000000000000000000000000000000000000000000000000000000c924de3500000000000000000000000000000000000000000000000000000000000000000000000000000000200caf816bcdd94123d3c18488741d4e4fa40ba60000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000008701f58c5000000000000000000000000000000000000000000000000000000006edd4f1200000000000000000000000000000000000000000000000000000000c3d93e7c000000000000000000000000000000000000000000000000000000006f497ac6000000000000000000000000000000000000000000000000000000007f61885c00000000000000000000000000000000000000000000000000000000c37533bb0000000000000000000000000000000000000000000000000000000097c09d34000000000000000000000000000000000000000000000000000000000f23da430000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001c0000000000000000000000000ac3a2dc46cea843f0a9d6554f8804aed18ff0795f520cd5b37e74e19fdb369c8d676a04dce8a19457497ac6686d2bb95d94109c8f9664f4324c1400fa5c3822d667f30e873f53f1b8033180cd15fe41c1e2355c60000000000000000000000000000000000000000000000000000000000000000010008e742608b21bf7eb23c1a9d0602047e3618b464c9b59c0fba3b3d7ab66e01000563374c277a2c1e34659a2a1e87371bb6d852ce142022d497bfb50b9e3200000000000000000000000000000000000000000000000000000000044aa200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f4240000000000000000000000000000000000000000000000000000000000001d4c00000000000000000000000000000000000000000000000000000000004c4b40000000000000000000000000000000000000000000000000000000000000182b8000000000000000000000000000000000000000000000000000000000ee6b280000000000000000000000000273bdccdd979510adf4fb801d92f64b243c01fe2 +bridges: + erc20: + l1_address: 0x7303B5Ce64f1ADB0558572611a0b90620b6dd5F4 + shared: + l1_address: 0x6F03861D12E6401623854E494beACd66BC46e6F0 +l1: + default_upgrade_addr: 0xc029cE1EB5C61C4a3B2a6EE920bb3B7b026bc00b + diamond_proxy_addr: 0x9A6DE0f62Aa270A8bCB1e2610078650D539B1Ef9 + governance_addr: 0xEE73438083629026FAfA1f5F5bBE2bBD6Bad6331 + multicall3_addr: 0xca11bde05977b3631167028862be2a173976ca11 + verifier_addr: 0x82856fED36d36e1d4db24398bC2056C440cB45FC + validator_timelock_addr: 0x8D65310fe158734eEA3197FF9a6211F9Bba3D0A8 + base_token_addr: '0x0000000000000000000000000000000000000000' diff --git a/etc/env/ecosystems/testnet.yaml b/etc/env/ecosystems/testnet.yaml new file mode 100644 index 000000000000..72192d6b5528 --- /dev/null +++ b/etc/env/ecosystems/testnet.yaml @@ -0,0 +1,23 @@ +create2_factory_addr: 0xce0042b868300000d44a59004da54a005ffdcf9f +create2_factory_salt: 0x8c8c6108a96a14b59963a18367250dc2042dfe62da8767d72ffddb03f269ffcc +ecosystem_contracts: + bridgehub_proxy_addr: 0x35A54c8C757806eB6820629bc82d90E056394C92 + state_transition_proxy_addr: 0x4e39E90746A9ee410A8Ce173C7B96D3AfEd444a5 + transparent_proxy_admin_addr: 0x0358BACa94dcD7931B7BA7aAf8a5Ac6090E143a5 + validator_timelock_addr: 0xD3876643180A79d0A56d0900C060528395f34453 + diamond_cut_data: 0x0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000006000000000000000000000000027a7f18106281fe53d371958e8bc3f833694d24a0000000000000000000000000000000000000000000000000000000000000c400000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000008800000000000000000000000000000000000000000000000000000000000000a2000000000000000000000000096b40174102c93155cdb46a5e4691eeb6c4e1b7b000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000d0e18b6810000000000000000000000000000000000000000000000000000000064bf8d6600000000000000000000000000000000000000000000000000000000a9f6d9410000000000000000000000000000000000000000000000000000000027ae4c16000000000000000000000000000000000000000000000000000000004dd18bf5000000000000000000000000000000000000000000000000000000001cc5d10300000000000000000000000000000000000000000000000000000000be6f11cf00000000000000000000000000000000000000000000000000000000e76db86500000000000000000000000000000000000000000000000000000000235d9eb50000000000000000000000000000000000000000000000000000000021f603d7000000000000000000000000000000000000000000000000000000004623c91d000000000000000000000000000000000000000000000000000000001733894500000000000000000000000000000000000000000000000000000000fc57565f00000000000000000000000000000000000000000000000000000000000000000000000000000000183a8459e2a4440f364bec5040d8327bbb619be300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000291de72e3400000000000000000000000000000000000000000000000000000000ea6c029c00000000000000000000000000000000000000000000000000000000cdffacc60000000000000000000000000000000000000000000000000000000052ef6b2c00000000000000000000000000000000000000000000000000000000adfca15e000000000000000000000000000000000000000000000000000000007a0ed627000000000000000000000000000000000000000000000000000000006e9960c30000000000000000000000000000000000000000000000000000000098acd7a600000000000000000000000000000000000000000000000000000000086a56f8000000000000000000000000000000000000000000000000000000003591c1a00000000000000000000000000000000000000000000000000000000079823c9a00000000000000000000000000000000000000000000000000000000d86970d800000000000000000000000000000000000000000000000000000000fd791f3c00000000000000000000000000000000000000000000000000000000e5355c75000000000000000000000000000000000000000000000000000000009d1b5a81000000000000000000000000000000000000000000000000000000007b30c8da00000000000000000000000000000000000000000000000000000000d046815600000000000000000000000000000000000000000000000000000000631f4bac000000000000000000000000000000000000000000000000000000000ec6b0b70000000000000000000000000000000000000000000000000000000033ce93fe0000000000000000000000000000000000000000000000000000000006d49e5b00000000000000000000000000000000000000000000000000000000f5c1182c000000000000000000000000000000000000000000000000000000005518c73b00000000000000000000000000000000000000000000000000000000db1f0bf900000000000000000000000000000000000000000000000000000000b8c2f66f00000000000000000000000000000000000000000000000000000000ef3f0bae00000000000000000000000000000000000000000000000000000000fe26699e000000000000000000000000000000000000000000000000000000003960738200000000000000000000000000000000000000000000000000000000af6a2dcd00000000000000000000000000000000000000000000000000000000a1954fc50000000000000000000000000000000000000000000000000000000046657fe90000000000000000000000000000000000000000000000000000000018e3a9410000000000000000000000000000000000000000000000000000000029b98c6700000000000000000000000000000000000000000000000000000000bd7c541200000000000000000000000000000000000000000000000000000000c3bbd2d700000000000000000000000000000000000000000000000000000000e81e0ba100000000000000000000000000000000000000000000000000000000facd743b000000000000000000000000000000000000000000000000000000009cd939e40000000000000000000000000000000000000000000000000000000056142d7a00000000000000000000000000000000000000000000000000000000b22dd78e0000000000000000000000000000000000000000000000000000000074f4d30d00000000000000000000000000000000000000000000000000000000000000000000000000000000550cf73f4b50aa0df0257f2d07630d48fa00f73a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000812f43dab000000000000000000000000000000000000000000000000000000006c0960f900000000000000000000000000000000000000000000000000000000b473318e00000000000000000000000000000000000000000000000000000000042901c700000000000000000000000000000000000000000000000000000000263b7f8e00000000000000000000000000000000000000000000000000000000e4948f4300000000000000000000000000000000000000000000000000000000eb67241900000000000000000000000000000000000000000000000000000000c924de3500000000000000000000000000000000000000000000000000000000000000000000000000000000200caf816bcdd94123d3c18488741d4e4fa40ba60000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000008701f58c5000000000000000000000000000000000000000000000000000000006edd4f1200000000000000000000000000000000000000000000000000000000c3d93e7c000000000000000000000000000000000000000000000000000000006f497ac6000000000000000000000000000000000000000000000000000000007f61885c00000000000000000000000000000000000000000000000000000000c37533bb0000000000000000000000000000000000000000000000000000000097c09d34000000000000000000000000000000000000000000000000000000000f23da430000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001c0000000000000000000000000ac3a2dc46cea843f0a9d6554f8804aed18ff0795f520cd5b37e74e19fdb369c8d676a04dce8a19457497ac6686d2bb95d94109c8f9664f4324c1400fa5c3822d667f30e873f53f1b8033180cd15fe41c1e2355c60000000000000000000000000000000000000000000000000000000000000000010008e742608b21bf7eb23c1a9d0602047e3618b464c9b59c0fba3b3d7ab66e01000563374c277a2c1e34659a2a1e87371bb6d852ce142022d497bfb50b9e3200000000000000000000000000000000000000000000000000000000044aa200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f4240000000000000000000000000000000000000000000000000000000000001d4c00000000000000000000000000000000000000000000000000000000004c4b40000000000000000000000000000000000000000000000000000000000000182b8000000000000000000000000000000000000000000000000000000000ee6b280000000000000000000000000273bdccdd979510adf4fb801d92f64b243c01fe2 +bridges: + erc20: + l1_address: 0x2Ae09702F77a4940621572fBcDAe2382D44a2cbA + shared: + l1_address: 0x3E8b2fe58675126ed30d0d12dea2A9bda72D18Ae + weth: + l1_address: 0x7b79995e5f793A07Bc00c21412e50Ecae098E7f9 +l1: + default_upgrade_addr: 0x27A7F18106281fE53d371958E8bC3f833694D24a + diamond_proxy_addr: 0x9A6DE0f62Aa270A8bCB1e2610078650D539B1Ef9 + governance_addr: 0x62e77441531b4B045a6B6f4891be4AdBA7eD4d88 + multicall3_addr: 0xca11bde05977b3631167028862be2a173976ca11 + verifier_addr: 0xAC3a2Dc46ceA843F0A9d6554f8804AeD18ff0795 + validator_timelock_addr: 0xD3876643180A79d0A56d0900C060528395f34453 + base_token_addr: '0x0000000000000000000000000000000000000000' diff --git a/etc/env/en_consensus_secrets.yaml b/etc/env/en_consensus_secrets.yaml index 3f3407a7035b..00a433b07116 100644 --- a/etc/env/en_consensus_secrets.yaml +++ b/etc/env/en_consensus_secrets.yaml @@ -1,2 +1,4 @@ # 'node:public:ed25519:2621c2ae111901d4a9b46e96e64f71282b9209fc6b5e4df3d4208d3de28a482d' -node_key: 'node:secret:ed25519:19bc1ddd9fd2921d1b919e7dcfa465babdcf61a60a21e5df9b3f105bd9cfcb2c' +node_key: "node:secret:ed25519:19bc1ddd9fd2921d1b919e7dcfa465babdcf61a60a21e5df9b3f105bd9cfcb2c" +# 'attester:public:secp256k1:0330914e41b225fb6f8518d5278f0c014a9861018c8054f9bb425bb6538538f1c9' +attester_key: "attester:secret:secp256k1:899b0caa073f5db0a07e1fe953c94b05256f2c92fd03f0c33ef870622bc778ab" diff --git a/etc/env/file_based/contracts.yaml b/etc/env/file_based/contracts.yaml index e6f175a2727a..791fb27f7a71 100644 --- a/etc/env/file_based/contracts.yaml +++ b/etc/env/file_based/contracts.yaml @@ -6,6 +6,7 @@ l1: multicall3_addr: "0x0000000000000000000000000000000000000000" validator_timelock_addr: "0x0000000000000000000000000000000000000000" base_token_addr: "0x0000000000000000000000000000000000000000" + chain_admin_addr: "0x0000000000000000000000000000000000000000" l2: testnet_paymaster_addr: "0x0000000000000000000000000000000000000000" bridges: diff --git a/etc/env/file_based/external_node.yaml b/etc/env/file_based/external_node.yaml new file mode 100644 index 000000000000..675baf739686 --- /dev/null +++ b/etc/env/file_based/external_node.yaml @@ -0,0 +1,6 @@ +l1_chain_id: 9 +l2_chain_id: 270 +l1_batch_commit_data_generator_mode: Rollup + +main_node_url: http://localhost:3050 +main_node_rate_limit_rps: 1000 diff --git a/etc/env/file_based/general.yaml b/etc/env/file_based/general.yaml index 03cba74c97c8..7914ece95c70 100644 --- a/etc/env/file_based/general.yaml +++ b/etc/env/file_based/general.yaml @@ -63,6 +63,7 @@ api: estimate_gas_scale_factor: 1.2 estimate_gas_acceptable_overestimation: 1000 max_tx_size: 1000000 + api_namespaces: [ eth,net,web3,zks,pubsub,debug ] max_response_body_size_overrides: - method: eth_getTransactionReceipt # no size specified, meaning no size limit - method: zks_getProof @@ -91,6 +92,7 @@ state_keeper: validation_computational_gas_limit: 300000 save_call_traces: true max_circuits_per_batch: 24100 + protective_reads_persistence_enabled: true mempool: delay_interval: 100 sync_interval_ms: 10 @@ -128,7 +130,7 @@ eth: aggregated_block_execute_deadline: 10 timestamp_criteria_max_allowed_lag: 30 max_eth_tx_data_size: 120000 - aggregated_proof_sizes: [ 1,4 ] + aggregated_proof_sizes: [ 1 ] max_aggregated_tx_gas: 4000000 max_acceptable_priority_fee_in_gwei: 100000000000 pubdata_sending_mode: BLOBS @@ -179,10 +181,11 @@ witness_generator: generation_timeout_in_secs: 900 max_attempts: 10 shall_save_to_public_bucket: true + prometheus_listener_port: 3116 witness_vector_generator: prover_instance_wait_timeout_in_secs: 200 prover_instance_poll_time_in_milli_secs: 250 - prometheus_listener_port: 3314 + prometheus_listener_port: 3420 prometheus_pushgateway_url: http://127.0.0.1:9091 prometheus_push_interval_ms: 100 specialized_group_id: 100 @@ -194,7 +197,7 @@ data_handler: prover_gateway: api_url: http://127.0.0.1:3320 api_poll_duration_secs: 1000 - prometheus_listener_port: 3314 + prometheus_listener_port: 3310 prometheus_pushgateway_url: http://127.0.0.1:9091 prometheus_push_interval_ms: 100 proof_compressor: @@ -293,6 +296,12 @@ prover_group: aggregation_round: 1 - circuit_id: 18 aggregation_round: 1 +base_token_adjuster: + price_polling_interval_ms: 30000 + price_cache_update_interval_ms: 2000 +external_price_api_client: + source: "no-op" + client_timeout_ms: 10000 house_keeper: l1_batch_metrics_reporting_interval_ms: 10000 @@ -311,13 +320,13 @@ house_keeper: fri_gpu_prover_archiver_archive_after_secs: 172800 prometheus: - listener_port: 3312 + listener_port: 3314 pushgateway_url: http://127.0.0.1:9091 push_interval_ms: 100 observability: log_format: plain - log_directives: "zksync_node_test_utils=info,zksync_state_keeper=info,zksync_reorg_detector=info,zksync_consistency_checker=info,zksync_metadata_calculator=info,zksync_node_sync=info,zksync_node_consensus=info,zksync_contract_verification_server=info,zksync_node_api_server=info,zksync_tee_verifier_input_producer=info,zksync_node_framework=info,zksync_block_reverter=info,zksync_commitment_generator=info,zksync_node_db_pruner=info,zksync_eth_sender=info,zksync_node_fee_model=info,zksync_node_genesis=info,zksync_house_keeper=info,zksync_proof_data_handler=info,zksync_shared_metrics=info,zksync_node_test_utils=info,zksync_vm_runner=info,zksync_consensus_bft=info,zksync_consensus_network=info,zksync_consensus_storage=info,zksync_core_leftovers=debug,zksync_server=debug,zksync_contract_verifier=debug,zksync_dal=info,zksync_db_connection=info,zksync_eth_client=info,zksync_eth_watch=debug,zksync_storage=info,zksync_db_manager=info,zksync_merkle_tree=info,zksync_state=debug,zksync_utils=debug,zksync_queued_job_processor=info,zksync_types=info,zksync_mempool=debug,loadnext=info,vm=info,zksync_object_store=info,zksync_external_node=info,zksync_witness_generator=info,zksync_prover_fri=info,zksync_witness_vector_generator=info,zksync_web3_decl=debug,zksync_health_check=debug,zksync_proof_fri_compressor=info,vise_exporter=debug,snapshots_creator=debug" + log_directives: "zksync_node_test_utils=info,zksync_state_keeper=info,zksync_reorg_detector=info,zksync_consistency_checker=info,zksync_metadata_calculator=info,zksync_node_sync=info,zksync_node_consensus=info,zksync_contract_verification_server=info,zksync_node_api_server=info,zksync_tee_verifier_input_producer=info,zksync_node_framework=info,zksync_block_reverter=info,zksync_commitment_generator=info,zksync_node_db_pruner=info,zksync_eth_sender=info,zksync_node_fee_model=info,zksync_node_genesis=info,zksync_house_keeper=info,zksync_proof_data_handler=info,zksync_shared_metrics=info,zksync_node_test_utils=info,zksync_vm_runner=info,zksync_consensus_bft=info,zksync_consensus_network=info,zksync_consensus_storage=info,zksync_core_leftovers=debug,zksync_server=debug,zksync_contract_verifier=debug,zksync_dal=info,zksync_db_connection=info,zksync_eth_client=info,zksync_eth_watch=debug,zksync_storage=info,zksync_db_manager=info,zksync_merkle_tree=info,zksync_state=debug,zksync_utils=debug,zksync_queued_job_processor=info,zksync_types=info,zksync_mempool=debug,loadnext=info,vm=info,zksync_object_store=info,zksync_external_node=info,zksync_witness_generator=info,zksync_prover_fri=info,zksync_witness_vector_generator=info,zksync_web3_decl=debug,zksync_health_check=debug,zksync_proof_fri_compressor=info,vise_exporter=error,snapshots_creator=debug,zksync_base_token_adjuster=debug,zksync_external_price_api=debug" sentry: url: unset panic_interval: 1800 @@ -332,6 +341,28 @@ protective_reads_writer: window_size: 3 first_processed_batch: 0 +basic_witness_input_producer: + db_path: "./db/main/basic_witness_input_producer" + window_size: 3 + first_processed_batch: 0 + +snapshot_recovery: + enabled: false + postgres: + max_concurrency: 10 + tree: + chunk_size: 200000 + experimental: + tree_recovery_parallel_persistence_buffer: 1 + drop_storage_key_preimages: true +pruning: + enabled: true + chunk_size: 10 + removal_delay_sec: 60 + data_retention_sec: 3600 + +commitment_generator: + max_parallelism: 10 core_object_store: file_backed: diff --git a/etc/env/file_based/genesis.yaml b/etc/env/file_based/genesis.yaml index e3513a8b6421..4f084648c7ca 100644 --- a/etc/env/file_based/genesis.yaml +++ b/etc/env/file_based/genesis.yaml @@ -1,7 +1,7 @@ genesis_root: 0xabdb766b18a479a5c783a4b80e12686bc8ea3cc2d8a3050491b701d72370ebb5 genesis_rollup_leaf_index: 54 genesis_batch_commitment: 0x2d00e5f8d77afcebf58a6b82ae56ba967566fe7dfbcb6760319fb0d215d18ffd -genesis_protocol_semantic_version: '0.24.0' +genesis_protocol_semantic_version: '0.24.1' # deprecated genesis_protocol_version: 24 default_aa_hash: 0x01000563374c277a2c1e34659a2a1e87371bb6d852ce142022d497bfb50b9e32 @@ -10,9 +10,9 @@ l1_chain_id: 9 l2_chain_id: 270 fee_account: '0x0000000000000000000000000000000000000001' prover: - recursion_scheduler_level_vk_hash: 0x712bb009b5d5dc81c79f827ca0abff87b43506a8efed6028a818911d4b1b521f + recursion_scheduler_level_vk_hash: 0x14f97b81e54b35fe673d8708cc1a19e1ea5b5e348e12d31e39824ed4f42bbca2 recursion_node_level_vk_hash: 0xf520cd5b37e74e19fdb369c8d676a04dce8a19457497ac6686d2bb95d94109c8 - recursion_leaf_level_vk_hash: 0xffb19d007c67b9000b40b372e7a7a55a47d11c92588515598d6cad4052c75ebb + recursion_leaf_level_vk_hash: 0xf9664f4324c1400fa5c3822d667f30e873f53f1b8033180cd15fe41c1e2355c6 recursion_circuits_set_vks_hash: '0x0000000000000000000000000000000000000000000000000000000000000000' dummy_verifier: true l1_batch_commit_data_generator_mode: Rollup diff --git a/etc/nix/README.md b/etc/nix/README.md new file mode 100644 index 000000000000..a7cce422e6e6 --- /dev/null +++ b/etc/nix/README.md @@ -0,0 +1,86 @@ +# Declarative and Reproducible builds with Nix + +This directory contains the nix build recipes for various components of this project. Most importantly it is used to +reproducible build `zksync_tee_prover` reproducibly and create a container containing all what is needed to run it on an +SGX machine. + +## Prerequisites + +Install [nix](https://zero-to-nix.com/start/install). + +In `~/.config/nix/nix.conf` + +```ini +experimental-features = nix-command flakes +sandbox = true +``` + +or on nixos in `/etc/nixos/configuration.nix` add the following lines: + +```nix +{ + nix = { + extraOptions = '' + experimental-features = nix-command flakes + sandbox = true + ''; + }; +} +``` + +## Build + +Build various components of this project with `nix`. + +### Build as the CI would + +```shell +nix run github:nixos/nixpkgs/nixos-23.11#nixci +``` + +### Build individual parts + +```shell +nix build .#zksync +``` + +or + +```shell +nix build .#zksync.contract_verifier +nix build .#zksync.external_node +nix build .#zksync.server +nix build .#zksync.snapshots_creator +nix build .#zksync.block_reverter +``` + +or + +```shell +nix build .#tee_prover +nix build .#container-tee-prover-dcap +nix build .#container-tee-prover-azure +``` + +## Develop + +`nix` can provide the build environment for this project. + +```shell +nix develop +``` + +optionally create `.envrc` for `direnv` to automatically load the environment when entering the main directory: + +```shell +$ cat < .envrc +use flake .# +EOF +$ direnv allow +``` + +### Format for commit + +```shell +nix run .#fmt +``` diff --git a/etc/nix/container-tee_prover.nix b/etc/nix/container-tee_prover.nix new file mode 100644 index 000000000000..ab2b12c48db0 --- /dev/null +++ b/etc/nix/container-tee_prover.nix @@ -0,0 +1,48 @@ +{ pkgs +, nixsgxLib +, teepot +, tee_prover +, container-name +, isAzure ? true +, tag ? null +}: +let + name = container-name; + entrypoint = "${teepot.teepot.tee_key_preexec}/bin/tee-key-preexec"; +in +nixsgxLib.mkSGXContainer { + inherit name; + inherit tag; + + packages = [ teepot.teepot.tee_key_preexec tee_prover ]; + inherit entrypoint; + inherit isAzure; + + manifest = { + loader = { + argv = [ + entrypoint + "${tee_prover}/bin/zksync_tee_prover" + ]; + + log_level = "error"; + + env = { + TEE_API_URL.passthrough = true; + API_PROMETHEUS_LISTENER_PORT.passthrough = true; + API_PROMETHEUS_PUSHGATEWAY_URL.passthrough = true; + API_PROMETHEUS_PUSH_INTERVAL_MS.passthrough = true; + + ### DEBUG ### + RUST_BACKTRACE = "1"; + RUST_LOG = "warning,zksync_tee_prover=debug"; + }; + }; + + sgx = { + edmm_enable = false; + enclave_size = "32G"; + max_threads = 128; + }; + }; +} diff --git a/etc/nix/devshell.nix b/etc/nix/devshell.nix new file mode 100644 index 000000000000..046cd210d162 --- /dev/null +++ b/etc/nix/devshell.nix @@ -0,0 +1,37 @@ +{ pkgs +, zksync +, commonArgs +}: +pkgs.mkShell { + inputsFrom = [ zksync ]; + + packages = with pkgs; [ + docker-compose + nodejs + yarn + axel + postgresql + python3 + solc + sqlx-cli + ]; + + inherit (commonArgs) env hardeningEnable; + + shellHook = '' + export ZKSYNC_HOME=$PWD + export PATH=$ZKSYNC_HOME/bin:$PATH + + if [ "x$NIX_LD" = "x" ]; then + export NIX_LD=$(<${pkgs.clangStdenv.cc}/nix-support/dynamic-linker) + fi + if [ "x$NIX_LD_LIBRARY_PATH" = "x" ]; then + export NIX_LD_LIBRARY_PATH="$ZK_NIX_LD_LIBRARY_PATH" + else + export NIX_LD_LIBRARY_PATH="$NIX_LD_LIBRARY_PATH:$ZK_NIX_LD_LIBRARY_PATH" + fi + ''; + + ZK_NIX_LD_LIBRARY_PATH = pkgs.lib.makeLibraryPath [ ]; +} + diff --git a/etc/nix/tee_prover.nix b/etc/nix/tee_prover.nix new file mode 100644 index 000000000000..50273b91fb5a --- /dev/null +++ b/etc/nix/tee_prover.nix @@ -0,0 +1,10 @@ +{ cargoArtifacts +, craneLib +, commonArgs +}: +craneLib.buildPackage (commonArgs // { + pname = "zksync_tee_prover"; + version = (builtins.fromTOML (builtins.readFile ../../core/bin/zksync_tee_prover/Cargo.toml)).package.version; + cargoExtraArgs = "-p zksync_tee_prover --bin zksync_tee_prover"; + inherit cargoArtifacts; +}) diff --git a/etc/nix/zksync.nix b/etc/nix/zksync.nix new file mode 100644 index 000000000000..c5fffc48b09d --- /dev/null +++ b/etc/nix/zksync.nix @@ -0,0 +1,40 @@ +{ cargoArtifacts +, craneLib +, commonArgs +}: +craneLib.buildPackage (commonArgs // { + pname = "zksync"; + version = (builtins.fromTOML (builtins.readFile ../../core/bin/zksync_tee_prover/Cargo.toml)).package.version; + cargoExtraArgs = "--all"; + inherit cargoArtifacts; + + outputs = [ + "out" + "contract_verifier" + "external_node" + "server" + "snapshots_creator" + "block_reverter" + ]; + + postInstall = '' + mkdir -p $out/nix-support + for i in $outputs; do + [[ $i == "out" ]] && continue + mkdir -p "''${!i}/bin" + echo "''${!i}" >> $out/nix-support/propagated-user-env-packages + if [[ -e "$out/bin/zksync_$i" ]]; then + mv "$out/bin/zksync_$i" "''${!i}/bin" + else + mv "$out/bin/$i" "''${!i}/bin" + fi + done + + mkdir -p $external_node/nix-support + echo "block_reverter" >> $external_node/nix-support/propagated-user-env-packages + + mv $out/bin/merkle_tree_consistency_checker $server/bin + mkdir -p $server/nix-support + echo "block_reverter" >> $server/nix-support/propagated-user-env-packages + ''; +}) diff --git a/etc/utils/package.json b/etc/utils/package.json index 6ce76330c8ea..a239c29e3d51 100644 --- a/etc/utils/package.json +++ b/etc/utils/package.json @@ -5,9 +5,11 @@ "main": "build/index.js", "types": "build/index.d.ts", "scripts": { - "build": "tsc" + "build": "tsc", + "watch": "tsc -w" }, "dependencies": { - "chalk": "^4.0.0" + "chalk": "^4.0.0", + "yaml": "^2.4.2" } } diff --git a/etc/utils/src/file-configs.ts b/etc/utils/src/file-configs.ts new file mode 100644 index 000000000000..1675745bca5d --- /dev/null +++ b/etc/utils/src/file-configs.ts @@ -0,0 +1,132 @@ +import * as path from 'path'; +import * as fs from 'fs'; +import * as yaml from 'yaml'; + +export function shouldLoadConfigFromFile() { + const chain = process.env.CHAIN_NAME; + if (chain) { + return { + loadFromFile: true, + chain + } as const; + } else { + return { + loadFromFile: false + } as const; + } +} + +export const configNames = [ + 'contracts.yaml', + 'general.yaml', + 'genesis.yaml', + 'secrets.yaml', + 'wallets.yaml', + 'external_node.yaml' +] as const; + +export type ConfigName = (typeof configNames)[number]; + +export function loadEcosystem(pathToHome: string) { + const configPath = path.join(pathToHome, '/ZkStack.yaml'); + if (!fs.existsSync(configPath)) { + return []; + } + return yaml.parse( + fs.readFileSync(configPath, { + encoding: 'utf-8' + }) + ); +} + +export function loadConfig({ + pathToHome, + chain, + configsFolder, + configsFolderSuffix, + config +}: { + pathToHome: string; + chain: string; + configsFolder?: string; + configsFolderSuffix?: string; + config: ConfigName; +}) { + const configPath = path.join( + getConfigsFolderPath({ pathToHome, chain, configsFolder, configsFolderSuffix }), + config + ); + if (!fs.existsSync(configPath)) { + return []; + } + return yaml.parse( + fs.readFileSync(configPath, { + encoding: 'utf-8' + }), + { + customTags: (tags) => + tags.filter((tag) => { + if (typeof tag === 'string') { + return true; + } + if (tag.format !== 'HEX') { + return true; + } + return false; + }) + } + ); +} + +export function getConfigPath({ + pathToHome, + chain, + configsFolder, + config +}: { + pathToHome: string; + chain: string; + configsFolder?: string; + config: ConfigName; +}) { + return path.join(getConfigsFolderPath({ pathToHome, chain, configsFolder }), config); +} + +export function getAllConfigsPath({ + pathToHome, + chain, + configsFolder +}: { + pathToHome: string; + chain: string; + configsFolder?: string; +}) { + const configPaths = {} as Record; + configNames.forEach((config) => { + configPaths[config] = getConfigPath({ pathToHome, chain, configsFolder, config }); + }); + return configPaths; +} + +export function getConfigsFolderPath({ + pathToHome, + chain, + configsFolder, + configsFolderSuffix +}: { + pathToHome: string; + chain: string; + configsFolder?: string; + configsFolderSuffix?: string; +}) { + return path.join(pathToHome, 'chains', chain, configsFolder ?? 'configs', configsFolderSuffix ?? ''); +} + +export function replaceAggregatedBlockExecuteDeadline(pathToHome: string, fileConfig: any, value: number) { + const generalConfigPath = getConfigPath({ pathToHome, chain: fileConfig.chain, config: 'general.yaml' }); + const generalConfig = fs.readFileSync(generalConfigPath, 'utf8'); + const regex = /aggregated_block_execute_deadline:\s*\d+/g; + const newGeneralConfig = generalConfig.replace(regex, `aggregated_block_execute_deadline: ${value}`); + + fs.writeFileSync(generalConfigPath, newGeneralConfig, 'utf8'); +} diff --git a/etc/utils/src/index.ts b/etc/utils/src/index.ts index 38d980cb1509..28cd864a1bf6 100644 --- a/etc/utils/src/index.ts +++ b/etc/utils/src/index.ts @@ -1,4 +1,4 @@ -import { exec as _exec, spawn as _spawn } from 'child_process'; +import { exec as _exec, spawn as _spawn, type ProcessEnvOptions } from 'child_process'; import { promisify } from 'util'; import fs from 'fs'; import readline from 'readline'; @@ -53,9 +53,17 @@ export function spawn(command: string) { // executes a command in background and returns a child process handle // by default pipes data to parent's stdio but this can be overridden -export function background(command: string, stdio: any = 'inherit') { +export function background({ + command, + stdio = 'inherit', + cwd +}: { + command: string; + stdio: any; + cwd?: ProcessEnvOptions['cwd']; +}) { command = command.replace(/\n/g, ' '); - return _spawn(command, { stdio: stdio, shell: true, detached: true }); + return _spawn(command, { stdio: stdio, shell: true, detached: true, cwd }); } export async function confirmAction() { diff --git a/etc/utils/src/server.ts b/etc/utils/src/server.ts new file mode 100644 index 000000000000..94184f0db9b6 --- /dev/null +++ b/etc/utils/src/server.ts @@ -0,0 +1,23 @@ +import { background } from '.'; + +// TODO: change to use `zk_inception` once migration is complete +const BASE_COMMAND = 'zk_inception server'; +const BASE_COMMAND_WITH_ZK = 'zk server'; + +export function runServerInBackground({ + components, + stdio, + cwd, + useZkInception +}: { + components?: string[]; + stdio: any; + cwd?: Parameters[0]['cwd']; + useZkInception?: boolean; +}) { + let command = useZkInception ? BASE_COMMAND : BASE_COMMAND_WITH_ZK; + if (components && components.length > 0) { + command += ` --components=${components.join(',')}`; + } + background({ command, stdio, cwd }); +} diff --git a/etc/utils/tsconfig.json b/etc/utils/tsconfig.json index f96df8d60edb..66a070f64259 100644 --- a/etc/utils/tsconfig.json +++ b/etc/utils/tsconfig.json @@ -8,8 +8,5 @@ "noEmitOnError": true, "skipLibCheck": true, "declaration": true - }, - "files": [ - "src/index.ts" - ] + } } diff --git a/flake.lock b/flake.lock index 8b345701bbc6..fe16e2254b51 100644 --- a/flake.lock +++ b/flake.lock @@ -1,5 +1,95 @@ { "nodes": { + "crane": { + "inputs": { + "nixpkgs": [ + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1720226507, + "narHash": "sha256-yHVvNsgrpyNTXZBEokL8uyB2J6gB1wEx0KOJzoeZi1A=", + "owner": "ipetkov", + "repo": "crane", + "rev": "0aed560c5c0a61c9385bddff471a13036203e11c", + "type": "github" + }, + "original": { + "owner": "ipetkov", + "repo": "crane", + "type": "github" + } + }, + "crane_2": { + "inputs": { + "nixpkgs": [ + "teepot-flake", + "nixsgx-flake", + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1716156051, + "narHash": "sha256-TjUX7WWRcrhuUxDHsR8pDR2N7jitqZehgCVSy3kBeS8=", + "owner": "ipetkov", + "repo": "crane", + "rev": "7443df1c478947bf96a2e699209f53b2db26209d", + "type": "github" + }, + "original": { + "owner": "ipetkov", + "repo": "crane", + "type": "github" + } + }, + "flake-compat": { + "flake": false, + "locked": { + "lastModified": 1650374568, + "narHash": "sha256-Z+s0J8/r907g149rllvwhb4pKi8Wam5ij0st8PwAh+E=", + "owner": "edolstra", + "repo": "flake-compat", + "rev": "b4a34015c698c7793d592d66adbab377907a2be8", + "type": "github" + }, + "original": { + "owner": "edolstra", + "repo": "flake-compat", + "type": "github" + } + }, + "flake-compat_2": { + "flake": false, + "locked": { + "lastModified": 1650374568, + "narHash": "sha256-Z+s0J8/r907g149rllvwhb4pKi8Wam5ij0st8PwAh+E=", + "owner": "edolstra", + "repo": "flake-compat", + "rev": "b4a34015c698c7793d592d66adbab377907a2be8", + "type": "github" + }, + "original": { + "owner": "edolstra", + "repo": "flake-compat", + "type": "github" + } + }, + "flake-compat_3": { + "flake": false, + "locked": { + "lastModified": 1650374568, + "narHash": "sha256-Z+s0J8/r907g149rllvwhb4pKi8Wam5ij0st8PwAh+E=", + "owner": "edolstra", + "repo": "flake-compat", + "rev": "b4a34015c698c7793d592d66adbab377907a2be8", + "type": "github" + }, + "original": { + "owner": "edolstra", + "repo": "flake-compat", + "type": "github" + } + }, "flake-utils": { "inputs": { "systems": "systems" @@ -18,10 +108,103 @@ "type": "github" } }, + "flake-utils-plus": { + "inputs": { + "flake-utils": "flake-utils_2" + }, + "locked": { + "lastModified": 1715533576, + "narHash": "sha256-fT4ppWeCJ0uR300EH3i7kmgRZnAVxrH+XtK09jQWihk=", + "owner": "gytis-ivaskevicius", + "repo": "flake-utils-plus", + "rev": "3542fe9126dc492e53ddd252bb0260fe035f2c0f", + "type": "github" + }, + "original": { + "owner": "gytis-ivaskevicius", + "repo": "flake-utils-plus", + "rev": "3542fe9126dc492e53ddd252bb0260fe035f2c0f", + "type": "github" + } + }, + "flake-utils-plus_2": { + "inputs": { + "flake-utils": "flake-utils_3" + }, + "locked": { + "lastModified": 1715533576, + "narHash": "sha256-fT4ppWeCJ0uR300EH3i7kmgRZnAVxrH+XtK09jQWihk=", + "owner": "gytis-ivaskevicius", + "repo": "flake-utils-plus", + "rev": "3542fe9126dc492e53ddd252bb0260fe035f2c0f", + "type": "github" + }, + "original": { + "owner": "gytis-ivaskevicius", + "repo": "flake-utils-plus", + "rev": "3542fe9126dc492e53ddd252bb0260fe035f2c0f", + "type": "github" + } + }, + "flake-utils-plus_3": { + "inputs": { + "flake-utils": "flake-utils_6" + }, + "locked": { + "lastModified": 1715533576, + "narHash": "sha256-fT4ppWeCJ0uR300EH3i7kmgRZnAVxrH+XtK09jQWihk=", + "owner": "gytis-ivaskevicius", + "repo": "flake-utils-plus", + "rev": "3542fe9126dc492e53ddd252bb0260fe035f2c0f", + "type": "github" + }, + "original": { + "owner": "gytis-ivaskevicius", + "repo": "flake-utils-plus", + "rev": "3542fe9126dc492e53ddd252bb0260fe035f2c0f", + "type": "github" + } + }, "flake-utils_2": { "inputs": { "systems": "systems_2" }, + "locked": { + "lastModified": 1694529238, + "narHash": "sha256-zsNZZGTGnMOf9YpHKJqMSsa0dXbfmxeoJ7xHlrt+xmY=", + "owner": "numtide", + "repo": "flake-utils", + "rev": "ff7b65b44d01cf9ba6a71320833626af21126384", + "type": "github" + }, + "original": { + "owner": "numtide", + "repo": "flake-utils", + "type": "github" + } + }, + "flake-utils_3": { + "inputs": { + "systems": "systems_3" + }, + "locked": { + "lastModified": 1694529238, + "narHash": "sha256-zsNZZGTGnMOf9YpHKJqMSsa0dXbfmxeoJ7xHlrt+xmY=", + "owner": "numtide", + "repo": "flake-utils", + "rev": "ff7b65b44d01cf9ba6a71320833626af21126384", + "type": "github" + }, + "original": { + "owner": "numtide", + "repo": "flake-utils", + "type": "github" + } + }, + "flake-utils_4": { + "inputs": { + "systems": "systems_4" + }, "locked": { "lastModified": 1705309234, "narHash": "sha256-uNRRNRKmJyCRC/8y1RqBkqWBLM034y4qN7EprSdmgyA=", @@ -36,13 +219,49 @@ "type": "github" } }, + "flake-utils_5": { + "inputs": { + "systems": "systems_5" + }, + "locked": { + "lastModified": 1710146030, + "narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=", + "owner": "numtide", + "repo": "flake-utils", + "rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a", + "type": "github" + }, + "original": { + "owner": "numtide", + "repo": "flake-utils", + "type": "github" + } + }, + "flake-utils_6": { + "inputs": { + "systems": "systems_6" + }, + "locked": { + "lastModified": 1694529238, + "narHash": "sha256-zsNZZGTGnMOf9YpHKJqMSsa0dXbfmxeoJ7xHlrt+xmY=", + "owner": "numtide", + "repo": "flake-utils", + "rev": "ff7b65b44d01cf9ba6a71320833626af21126384", + "type": "github" + }, + "original": { + "owner": "numtide", + "repo": "flake-utils", + "type": "github" + } + }, "nixpkgs": { "locked": { - "lastModified": 1717952948, - "narHash": "sha256-mJi4/gjiwQlSaxjA6AusXBN/6rQRaPCycR7bd8fydnQ=", + "lastModified": 1719956923, + "narHash": "sha256-nNJHJ9kfPdzYsCOlHOnbiiyKjZUW5sWbwx3cakg3/C4=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "2819fffa7fa42156680f0d282c60d81e8fb185b7", + "rev": "706eef542dec88cc0ed25b9075d3037564b2d164", "type": "github" }, "original": { @@ -54,11 +273,27 @@ }, "nixpkgs_2": { "locked": { - "lastModified": 1706487304, - "narHash": "sha256-LE8lVX28MV2jWJsidW13D2qrHU/RUUONendL2Q/WlJg=", + "lastModified": 1719707984, + "narHash": "sha256-RoxIr/fbndtuKqulGvNCcuzC6KdAib85Q8gXnjzA1dw=", + "owner": "nixos", + "repo": "nixpkgs", + "rev": "7dca15289a1c2990efbe4680f0923ce14139b042", + "type": "github" + }, + "original": { + "owner": "nixos", + "ref": "nixos-24.05", + "repo": "nixpkgs", + "type": "github" + } + }, + "nixpkgs_3": { + "locked": { + "lastModified": 1718428119, + "narHash": "sha256-WdWDpNaq6u1IPtxtYHHWpl5BmabtpmLnMAx0RdJ/vo8=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "90f456026d284c22b3e3497be980b2e47d0b28ac", + "rev": "e6cea36f83499eb4e9cd184c8a8e823296b50ad5", "type": "github" }, "original": { @@ -68,24 +303,115 @@ "type": "github" } }, + "nixpkgs_4": { + "locked": { + "lastModified": 1719707984, + "narHash": "sha256-RoxIr/fbndtuKqulGvNCcuzC6KdAib85Q8gXnjzA1dw=", + "owner": "nixos", + "repo": "nixpkgs", + "rev": "7dca15289a1c2990efbe4680f0923ce14139b042", + "type": "github" + }, + "original": { + "owner": "nixos", + "ref": "nixos-24.05", + "repo": "nixpkgs", + "type": "github" + } + }, + "nixpkgs_5": { + "locked": { + "lastModified": 1717281328, + "narHash": "sha256-evZPzpf59oNcDUXxh2GHcxHkTEG4fjae2ytWP85jXRo=", + "owner": "nixos", + "repo": "nixpkgs", + "rev": "b3b2b28c1daa04fe2ae47c21bb76fd226eac4ca1", + "type": "github" + }, + "original": { + "owner": "nixos", + "ref": "nixos-24.05", + "repo": "nixpkgs", + "type": "github" + } + }, + "nixsgx-flake": { + "inputs": { + "nixpkgs": "nixpkgs_2", + "snowfall-lib": "snowfall-lib" + }, + "locked": { + "lastModified": 1719923509, + "narHash": "sha256-3buuJSKCVT0o42jpreoflYA+Rlp/4eQKATEAY+pPeh8=", + "owner": "matter-labs", + "repo": "nixsgx", + "rev": "520ad6227523c5720468726f9e945cecdb7a37aa", + "type": "github" + }, + "original": { + "owner": "matter-labs", + "repo": "nixsgx", + "type": "github" + } + }, + "nixsgx-flake_2": { + "inputs": { + "nixpkgs": "nixpkgs_4", + "snowfall-lib": "snowfall-lib_2" + }, + "locked": { + "lastModified": 1719916365, + "narHash": "sha256-RzCFbGAHq6rTY4ctrmazGIx59qXtfrVfEnIe+L0leTo=", + "owner": "matter-labs", + "repo": "nixsgx", + "rev": "0309a20ee5bf12b7390aa6795409b448420e80f2", + "type": "github" + }, + "original": { + "owner": "matter-labs", + "repo": "nixsgx", + "type": "github" + } + }, + "nixsgx-flake_3": { + "inputs": { + "nixpkgs": "nixpkgs_5", + "snowfall-lib": "snowfall-lib_3" + }, + "locked": { + "lastModified": 1717758565, + "narHash": "sha256-yscuZ3ixjwTkqS6ew5cB3Uvy9e807szRlMoPSyQuRJM=", + "owner": "matter-labs", + "repo": "nixsgx", + "rev": "49a1ae79d92ccb6ed7cabfe5c5042b1399e3cd3e", + "type": "github" + }, + "original": { + "owner": "matter-labs", + "repo": "nixsgx", + "type": "github" + } + }, "root": { "inputs": { + "crane": "crane", "flake-utils": "flake-utils", "nixpkgs": "nixpkgs", - "rust-overlay": "rust-overlay" + "nixsgx-flake": "nixsgx-flake", + "rust-overlay": "rust-overlay", + "teepot-flake": "teepot-flake" } }, "rust-overlay": { "inputs": { - "flake-utils": "flake-utils_2", - "nixpkgs": "nixpkgs_2" + "nixpkgs": "nixpkgs_3" }, "locked": { - "lastModified": 1718072316, - "narHash": "sha256-p33h73iQ1HkLalCplV5MH0oP3HXRaH3zufnFqb5//ps=", + "lastModified": 1720059535, + "narHash": "sha256-h/O3PoV3KvQG4tC5UpANBZOsptAZCzEGiwyi+3oSpYc=", "owner": "oxalica", "repo": "rust-overlay", - "rev": "bedc47af18fc41bb7d2edc2b212d59ca36253f59", + "rev": "8deeed2dfa21837c7792b46b6a9b2e73f97b472b", "type": "github" }, "original": { @@ -94,6 +420,101 @@ "type": "github" } }, + "rust-overlay_2": { + "inputs": { + "flake-utils": "flake-utils_4", + "nixpkgs": [ + "teepot-flake", + "nixsgx-flake", + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1717985971, + "narHash": "sha256-24h/qKp0aeI+Ew13WdRF521kY24PYa5HOvw0mlrABjk=", + "owner": "oxalica", + "repo": "rust-overlay", + "rev": "abfe5b3126b1b7e9e4daafc1c6478d17f0b584e7", + "type": "github" + }, + "original": { + "owner": "oxalica", + "repo": "rust-overlay", + "type": "github" + } + }, + "snowfall-lib": { + "inputs": { + "flake-compat": "flake-compat", + "flake-utils-plus": "flake-utils-plus", + "nixpkgs": [ + "nixsgx-flake", + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1719005984, + "narHash": "sha256-mpFl3Jv4fKnn+5znYXG6SsBjfXHJdRG5FEqNSPx0GLA=", + "owner": "snowfallorg", + "repo": "lib", + "rev": "c6238c83de101729c5de3a29586ba166a9a65622", + "type": "github" + }, + "original": { + "owner": "snowfallorg", + "repo": "lib", + "type": "github" + } + }, + "snowfall-lib_2": { + "inputs": { + "flake-compat": "flake-compat_2", + "flake-utils-plus": "flake-utils-plus_2", + "nixpkgs": [ + "teepot-flake", + "nixsgx-flake", + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1719005984, + "narHash": "sha256-mpFl3Jv4fKnn+5znYXG6SsBjfXHJdRG5FEqNSPx0GLA=", + "owner": "snowfallorg", + "repo": "lib", + "rev": "c6238c83de101729c5de3a29586ba166a9a65622", + "type": "github" + }, + "original": { + "owner": "snowfallorg", + "repo": "lib", + "type": "github" + } + }, + "snowfall-lib_3": { + "inputs": { + "flake-compat": "flake-compat_3", + "flake-utils-plus": "flake-utils-plus_3", + "nixpkgs": [ + "teepot-flake", + "vault-auth-tee-flake", + "nixsgx-flake", + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1716675292, + "narHash": "sha256-7TFvVE4HR/b65/0AAhewYHEJzUXxIEJn82ow5bCkrDo=", + "owner": "snowfallorg", + "repo": "lib", + "rev": "5d6e9f235735393c28e1145bec919610b172a20f", + "type": "github" + }, + "original": { + "owner": "snowfallorg", + "repo": "lib", + "type": "github" + } + }, "systems": { "locked": { "lastModified": 1681028828, @@ -123,6 +544,121 @@ "repo": "default", "type": "github" } + }, + "systems_3": { + "locked": { + "lastModified": 1681028828, + "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", + "owner": "nix-systems", + "repo": "default", + "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", + "type": "github" + }, + "original": { + "owner": "nix-systems", + "repo": "default", + "type": "github" + } + }, + "systems_4": { + "locked": { + "lastModified": 1681028828, + "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", + "owner": "nix-systems", + "repo": "default", + "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", + "type": "github" + }, + "original": { + "owner": "nix-systems", + "repo": "default", + "type": "github" + } + }, + "systems_5": { + "locked": { + "lastModified": 1681028828, + "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", + "owner": "nix-systems", + "repo": "default", + "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", + "type": "github" + }, + "original": { + "owner": "nix-systems", + "repo": "default", + "type": "github" + } + }, + "systems_6": { + "locked": { + "lastModified": 1681028828, + "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", + "owner": "nix-systems", + "repo": "default", + "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", + "type": "github" + }, + "original": { + "owner": "nix-systems", + "repo": "default", + "type": "github" + } + }, + "teepot-flake": { + "inputs": { + "crane": "crane_2", + "nixpkgs": [ + "teepot-flake", + "nixsgx-flake", + "nixpkgs" + ], + "nixsgx-flake": "nixsgx-flake_2", + "rust-overlay": "rust-overlay_2", + "snowfall-lib": [ + "teepot-flake", + "nixsgx-flake", + "snowfall-lib" + ], + "vault-auth-tee-flake": "vault-auth-tee-flake" + }, + "locked": { + "lastModified": 1720011517, + "narHash": "sha256-1oo9Z47CNdqDgtGNE1LC+6CQ+VXcy7TtFFnvifBnVLE=", + "owner": "matter-labs", + "repo": "teepot", + "rev": "8dadc1f76b7dd8a98be7781e8206fed5268dd0e6", + "type": "github" + }, + "original": { + "owner": "matter-labs", + "repo": "teepot", + "type": "github" + } + }, + "vault-auth-tee-flake": { + "inputs": { + "flake-utils": "flake-utils_5", + "nixpkgs": [ + "teepot-flake", + "nixsgx-flake", + "nixpkgs" + ], + "nixsgx-flake": "nixsgx-flake_3" + }, + "locked": { + "lastModified": 1718012107, + "narHash": "sha256-uKiUBaEOj9f3NCn6oTw5VqoZJxsTXSoAn2IWVB/LSS0=", + "owner": "matter-labs", + "repo": "vault-auth-tee", + "rev": "b10204436bc2fbad74c5716bd265fad74acc197c", + "type": "github" + }, + "original": { + "owner": "matter-labs", + "repo": "vault-auth-tee", + "type": "github" + } } }, "root": "root", diff --git a/flake.nix b/flake.nix index 0287d4cf09d1..cc14faebfed5 100644 --- a/flake.nix +++ b/flake.nix @@ -1,229 +1,144 @@ ################################################################################################### # -# To build the rust components with this flake, run: -# $ nix build .#cargoDeps -# set `cargoHash` below to the result of the build -# then -# $ nix build .#zksync_server -# or -# $ nix build .#zksync_server.contract_verifier -# $ nix build .#zksync_server.external_node -# $ nix build .#zksync_server.server -# $ nix build .#zksync_server.snapshots_creator -# $ nix build .#zksync_server.block_reverter -# -# To enter the development shell, run: -# $ nix develop -# -# To vendor the dependencies manually, run: -# $ nix shell .#cargo-vendor -c cargo vendor --no-merge-sources +# see `README.md` in `etc/nix` # ################################################################################################### { description = "ZKsync-era"; + + nixConfig = { + extra-substituters = [ "https://attic.teepot.org/tee-pot" ]; + extra-trusted-public-keys = [ "tee-pot:SS6HcrpG87S1M6HZGPsfo7d1xJccCGev7/tXc5+I4jg=" ]; + }; + inputs = { nixpkgs.url = "github:NixOS/nixpkgs/nixos-24.05"; + teepot-flake.url = "github:matter-labs/teepot"; + nixsgx-flake.url = "github:matter-labs/nixsgx"; flake-utils.url = "github:numtide/flake-utils"; rust-overlay.url = "github:oxalica/rust-overlay"; + crane = { + url = "github:ipetkov/crane?tag=v0.17.3"; + inputs.nixpkgs.follows = "nixpkgs"; + }; }; - outputs = { self, nixpkgs, flake-utils, rust-overlay }: - flake-utils.lib.eachDefaultSystem (system: - let - ########################################################################################### - # This changes every time `Cargo.lock` changes. Set to `null` to force re-vendoring - cargoHash = null; - # cargoHash = "sha256-LloF3jrvFkOlZ2lQXB+/sFthfJQLLu8BvHBE88gRvFc="; - ########################################################################################### - officialRelease = false; - - versionSuffix = - if officialRelease - then "" - else "pre${builtins.substring 0 8 (self.lastModifiedDate or self.lastModified or "19700101")}_${self.shortRev or "dirty"}"; - - pkgs = import nixpkgs { inherit system; overlays = [ rust-overlay.overlays.default ]; }; - - # patched version of cargo to support `cargo vendor` for vendoring dependencies - # see https://github.com/matter-labs/zksync-era/issues/1086 - # used as `cargo vendor --no-merge-sources` - cargo-vendor = pkgs.rustPlatform.buildRustPackage { - pname = "cargo-vendor"; - version = "0.78.0"; - src = pkgs.fetchFromGitHub { - owner = "haraldh"; - repo = "cargo"; - rev = "3ee1557d2bd95ca9d0224c5dbf1d1e2d67186455"; - hash = "sha256-A8xrOG+NmF8dQ7tA9I2vJSNHlYxsH44ZRXdptLblCXk="; + + outputs = { self, nixpkgs, teepot-flake, nixsgx-flake, flake-utils, rust-overlay, crane }: + let + officialRelease = false; + hardeningEnable = [ "fortify3" "pie" "relro" ]; + + out = system: + let + pkgs = import nixpkgs { + inherit system; + overlays = [ + rust-overlay.overlays.default + nixsgx-flake.overlays.default + teepot-flake.overlays.default + ]; }; - doCheck = false; - cargoHash = "sha256-LtuNtdoX+FF/bG5LQc+L2HkFmgCtw5xM/m0/0ShlX2s="; - nativeBuildInputs = [ - pkgs.pkg-config - pkgs.rustPlatform.bindgenHook - ]; - buildInputs = [ - pkgs.openssl - ]; - }; - # custom import-cargo-lock to import Cargo.lock file and vendor dependencies - # see https://github.com/matter-labs/zksync-era/issues/1086 - import-cargo-lock = { lib, cacert, runCommand }: { src, cargoHash ? null }: - runCommand "import-cargo-lock" - { - inherit src; - nativeBuildInputs = [ cargo-vendor cacert ]; - preferLocalBuild = true; - outputHashMode = "recursive"; - outputHashAlgo = "sha256"; - outputHash = if cargoHash != null then cargoHash else lib.fakeSha256; - } - '' - mkdir -p $out/.cargo - mkdir -p $out/cargo-vendor-dir - - HOME=$(pwd) - pushd ${src} - HOME=$HOME cargo vendor --no-merge-sources $out/cargo-vendor-dir > $out/.cargo/config - sed -i -e "s#$out#import-cargo-lock#g" $out/.cargo/config - cp $(pwd)/Cargo.lock $out/Cargo.lock - popd - '' - ; - cargoDeps = pkgs.buildPackages.callPackage import-cargo-lock { } { inherit src; inherit cargoHash; }; - - rustVersion = pkgs.rust-bin.fromRustupToolchainFile ./rust-toolchain; - - stdenv = pkgs.stdenvAdapters.useMoldLinker pkgs.clangStdenv; - - rustPlatform = pkgs.makeRustPlatform { - cargo = rustVersion; - rustc = rustVersion; - inherit stdenv; - }; - zksync_server_cargoToml = builtins.fromTOML (builtins.readFile ./core/bin/zksync_server/Cargo.toml); - - hardeningEnable = [ "fortify3" "pie" "relro" ]; - - src = with pkgs.lib.fileset; toSource { - root = ./.; - fileset = unions [ - ./Cargo.lock - ./Cargo.toml - ./core - ./prover - ./.github/release-please/manifest.json - ]; - }; + appliedOverlay = self.overlays.default pkgs pkgs; + in + { + formatter = pkgs.nixpkgs-fmt; - zksync_server = with pkgs; stdenv.mkDerivation { - pname = "zksync"; - version = zksync_server_cargoToml.package.version + versionSuffix; - - updateAutotoolsGnuConfigScriptsPhase = ":"; - - nativeBuildInputs = [ - pkg-config - rustPlatform.bindgenHook - rustPlatform.cargoSetupHook - rustPlatform.cargoBuildHook - rustPlatform.cargoInstallHook - ]; - - buildInputs = [ - libclang - openssl - snappy.dev - lz4.dev - bzip2.dev - ]; - - inherit src; - cargoBuildFlags = "--all"; - cargoBuildType = "release"; - - inherit cargoDeps; - - inherit hardeningEnable; - - outputs = [ - "out" - "contract_verifier" - "external_node" - "server" - "snapshots_creator" - "block_reverter" - ]; - - postInstall = '' - mkdir -p $out/nix-support - for i in $outputs; do - [[ $i == "out" ]] && continue - mkdir -p "''${!i}/bin" - echo "''${!i}" >> $out/nix-support/propagated-user-env-packages - if [[ -e "$out/bin/zksync_$i" ]]; then - mv "$out/bin/zksync_$i" "''${!i}/bin" - else - mv "$out/bin/$i" "''${!i}/bin" - fi - done - - mkdir -p $external_node/nix-support - echo "block_reverter" >> $external_node/nix-support/propagated-user-env-packages - - mv $out/bin/merkle_tree_consistency_checker $server/bin - mkdir -p $server/nix-support - echo "block_reverter" >> $server/nix-support/propagated-user-env-packages - ''; - }; - in - { - formatter = pkgs.nixpkgs-fmt; - - packages = { - inherit zksync_server; - default = zksync_server; - inherit cargo-vendor; - inherit cargoDeps; + packages = { + # to ease potential cross-compilation, the overlay is used + inherit (appliedOverlay.zksync-era) zksync tee_prover container-tee-prover-azure container-tee-prover-dcap; + default = appliedOverlay.zksync-era.zksync; + }; + + devShells.default = appliedOverlay.zksync-era.devShell; }; + in + flake-utils.lib.eachDefaultSystem out // { + overlays.default = final: prev: + # to ease potential cross-compilation, the overlay is used + let + pkgs = final; + + rustVersion = pkgs.rust-bin.fromRustupToolchainFile ./rust-toolchain; + + rustPlatform = pkgs.makeRustPlatform { + cargo = rustVersion; + rustc = rustVersion; + }; - devShells = with pkgs; { - default = pkgs.mkShell.override { inherit stdenv; } { - inputsFrom = [ zksync_server ]; - - packages = [ - docker-compose - nodejs - yarn - axel - postgresql - python3 - solc - sqlx-cli - mold + craneLib = (crane.mkLib pkgs).overrideToolchain rustVersion; + NIX_OUTPATH_USED_AS_RANDOM_SEED = "aaaaaaaaaa"; + + commonArgs = { + nativeBuildInputs = with pkgs;[ + pkg-config + rustPlatform.bindgenHook + ]; + + buildInputs = with pkgs;[ + libclang.dev + openssl.dev + snappy.dev + lz4.dev + bzip2.dev ]; + src = with pkgs.lib.fileset; toSource { + root = ./.; + fileset = unions [ + ./Cargo.lock + ./Cargo.toml + ./core + ./prover + ./zk_toolbox + ./.github/release-please/manifest.json + ]; + }; + + env = { + OPENSSL_NO_VENDOR = "1"; + inherit NIX_OUTPATH_USED_AS_RANDOM_SEED; + }; + + doCheck = false; + strictDeps = true; inherit hardeningEnable; + }; - shellHook = '' - export ZKSYNC_HOME=$PWD - export PATH=$ZKSYNC_HOME/bin:$PATH - export RUSTFLAGS='-C link-arg=-fuse-ld=${pkgs.mold}/bin/mold' - export CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_LINKER="clang" - - if [ "x$NIX_LD" = "x" ]; then - export NIX_LD="$(<${clangStdenv.cc}/nix-support/dynamic-linker)" - fi - if [ "x$NIX_LD_LIBRARY_PATH" = "x" ]; then - export NIX_LD_LIBRARY_PATH="$ZK_NIX_LD_LIBRARY_PATH" - else - export NIX_LD_LIBRARY_PATH="$NIX_LD_LIBRARY_PATH:$ZK_NIX_LD_LIBRARY_PATH" - fi - ''; - - ZK_NIX_LD_LIBRARY_PATH = lib.makeLibraryPath [ ]; + cargoArtifacts = craneLib.buildDepsOnly (commonArgs // { + pname = "zksync-era-workspace"; + }); + in + { + zksync-era = rec { + devShell = pkgs.callPackage ./etc/nix/devshell.nix { + inherit zksync; + inherit commonArgs; + }; + + zksync = pkgs.callPackage ./etc/nix/zksync.nix { + inherit cargoArtifacts; + inherit craneLib; + inherit commonArgs; + }; + tee_prover = pkgs.callPackage ./etc/nix/tee_prover.nix { + inherit cargoArtifacts; + inherit craneLib; + inherit commonArgs; + }; + + container-tee-prover-azure = pkgs.callPackage ./etc/nix/container-tee_prover.nix { + inherit tee_prover; + isAzure = true; + container-name = "zksync-tee-prover-azure"; + }; + container-tee-prover-dcap = pkgs.callPackage ./etc/nix/container-tee_prover.nix { + inherit tee_prover; + isAzure = false; + container-name = "zksync-tee-prover-dcap"; + }; }; }; - }); + }; } diff --git a/infrastructure/protocol-upgrade/src/hyperchain-upgrade.ts b/infrastructure/protocol-upgrade/src/hyperchain-upgrade.ts index 102c67e509fe..027108e456c2 100644 --- a/infrastructure/protocol-upgrade/src/hyperchain-upgrade.ts +++ b/infrastructure/protocol-upgrade/src/hyperchain-upgrade.ts @@ -37,6 +37,7 @@ async function hyperchainUpgrade1() { 'CONTRACTS_HYPERCHAIN_UPGRADE_ADDR', 'CONTRACTS_GENESIS_UPGRADE_ADDR', 'CONTRACTS_GOVERNANCE_ADDR', + 'CONTRACTS_CHAIN_ADMIN_ADDR', 'CONTRACTS_ADMIN_FACET_ADDR', 'CONTRACTS_EXECUTOR_FACET_ADDR', 'CONTRACTS_GETTERS_FACET_ADDR', diff --git a/infrastructure/protocol-upgrade/src/transaction.ts b/infrastructure/protocol-upgrade/src/transaction.ts index 604e98ee3bf6..4aaed4186d75 100644 --- a/infrastructure/protocol-upgrade/src/transaction.ts +++ b/infrastructure/protocol-upgrade/src/transaction.ts @@ -1,5 +1,4 @@ -import { BigNumberish } from '@ethersproject/bignumber'; -import { BytesLike, ethers } from 'ethers'; +import { BytesLike, ethers, BigNumberish } from 'ethers'; import { ForceDeployUpgraderFactory as ForceDeployUpgraderFactoryL2 } from 'l2-contracts/typechain'; import { DefaultUpgradeFactory as DefaultUpgradeFactoryL1, diff --git a/infrastructure/zk/package.json b/infrastructure/zk/package.json index dc6aded093a2..29d47184fa09 100644 --- a/infrastructure/zk/package.json +++ b/infrastructure/zk/package.json @@ -31,7 +31,6 @@ "@types/tabtab": "^3.0.1", "hardhat": "=2.22.2", "typescript": "^4.3.5", - "cspell": "^8.3.2", "sql-formatter": "^13.1.0" } } diff --git a/infrastructure/zk/src/config.ts b/infrastructure/zk/src/config.ts index 5ca7fb1ce59f..d1ffc5fa3f0c 100644 --- a/infrastructure/zk/src/config.ts +++ b/infrastructure/zk/src/config.ts @@ -134,7 +134,7 @@ export function pushConfig(environment?: string, diff?: string) { env.modify('API_WEB3_JSON_RPC_HTTP_URL', `http://127.0.0.1:${3050 + 2 * difference}`, l2InitFile, false); env.modify('API_WEB3_JSON_RPC_WS_PORT', `${3050 + 1 + 2 * difference}`, l2InitFile, false); - env.modify('API_WEB3_JSON_RPC_WS_URL', `http://127.0.0.1:${3050 + 1 + 2 * difference}`, l2InitFile, false); + env.modify('API_WEB3_JSON_RPC_WS_URL', `ws://127.0.0.1:${3050 + 1 + 2 * difference}`, l2InitFile, false); env.modify('API_EXPLORER_PORT', `${3070 + 2 * difference}`, l2InitFile, false); env.modify('API_EXPLORER_URL', `http://127.0.0.1:${3070 + 2 * difference}`, l2InitFile, false); diff --git a/infrastructure/zk/src/contract.ts b/infrastructure/zk/src/contract.ts index a76da74b01ef..b9b4a1861c0c 100644 --- a/infrastructure/zk/src/contract.ts +++ b/infrastructure/zk/src/contract.ts @@ -155,6 +155,7 @@ async function _deployL1(onlyVerifier: boolean): Promise { 'CONTRACTS_DEFAULT_UPGRADE_ADDR', 'CONTRACTS_GENESIS_UPGRADE_ADDR', 'CONTRACTS_GOVERNANCE_ADDR', + 'CONTRACTS_CHAIN_ADMIN_ADDR', 'CONTRACTS_ADMIN_FACET_ADDR', 'CONTRACTS_EXECUTOR_FACET_ADDR', 'CONTRACTS_GETTERS_FACET_ADDR', diff --git a/infrastructure/zk/src/database.ts b/infrastructure/zk/src/database.ts index 2d11bca447d2..c818bd3be93f 100644 --- a/infrastructure/zk/src/database.ts +++ b/infrastructure/zk/src/database.ts @@ -10,7 +10,7 @@ export async function reset(opts: DbOpts) { export enum DalPath { CoreDal = 'core/lib/dal', - ProverDal = 'prover/prover_dal' + ProverDal = 'prover/crates/lib/prover_dal' } export interface DbOpts { diff --git a/infrastructure/zk/src/docker.ts b/infrastructure/zk/src/docker.ts index 7f42fca1d022..19b03bcb2111 100644 --- a/infrastructure/zk/src/docker.ts +++ b/infrastructure/zk/src/docker.ts @@ -75,13 +75,7 @@ function defaultTagList(image: string, imageTagSha: string, imageTagShaTS: strin 'server-v2', 'external-node', 'contract-verifier', - 'witness-generator', - 'prover-fri', - 'prover-gpu-fri', - 'witness-vector-generator', 'prover-fri-gateway', - 'proof-fri-compressor', - 'proof-fri-gpu-compressor', 'snapshots-creator' ].includes(image) ? ['latest', 'latest2.0', `2.0-${imageTagSha}`, `${imageTagSha}`, `2.0-${imageTagShaTS}`, `${imageTagShaTS}`] diff --git a/infrastructure/zk/src/format_sql.ts b/infrastructure/zk/src/format_sql.ts index 7f18d4a46388..09f655f54867 100644 --- a/infrastructure/zk/src/format_sql.ts +++ b/infrastructure/zk/src/format_sql.ts @@ -159,7 +159,7 @@ async function formatFile(filePath: string, check: boolean) { export async function formatSqlxQueries(check: boolean) { process.chdir(`${process.env.ZKSYNC_HOME}`); const { stdout: filesRaw } = await utils.exec( - 'find core/lib/dal -type f -name "*.rs" && find prover/prover_dal -type f -name "*.rs"' + 'find core/lib/dal -type f -name "*.rs" && find prover/crates/lib/prover_dal -type f -name "*.rs"' ); const files = filesRaw.trim().split('\n'); const formatResults = await Promise.all(files.map((file) => formatFile(file, check))); diff --git a/infrastructure/zk/src/index.ts b/infrastructure/zk/src/index.ts index 0c11c110c6e3..5aef41cca388 100644 --- a/infrastructure/zk/src/index.ts +++ b/infrastructure/zk/src/index.ts @@ -23,7 +23,6 @@ import { command as db } from './database'; import { command as verifyUpgrade } from './verify-upgrade'; import { proverCommand } from './prover_setup'; import { command as status } from './status'; -import { command as spellcheck } from './spellcheck'; import { command as setupEn } from './setup_en'; import * as env from './env'; @@ -50,7 +49,6 @@ const COMMANDS = [ proverCommand, env.command, status, - spellcheck, setupEn, completion(program as Command) ]; diff --git a/infrastructure/zk/src/lint.ts b/infrastructure/zk/src/lint.ts index 84c2c4535c59..7a24881c0f96 100644 --- a/infrastructure/zk/src/lint.ts +++ b/infrastructure/zk/src/lint.ts @@ -35,7 +35,7 @@ async function clippy() { async function proverClippy() { process.chdir(`${process.env.ZKSYNC_HOME}/prover`); - await utils.spawn('cargo clippy --tests --locked -- -D warnings -A incomplete_features'); + await utils.spawn('cargo clippy --tests --locked -- -D warnings'); } async function toolboxClippy() { diff --git a/infrastructure/zk/src/server.ts b/infrastructure/zk/src/server.ts index 872aff2eb5c3..8b10559361ae 100644 --- a/infrastructure/zk/src/server.ts +++ b/infrastructure/zk/src/server.ts @@ -14,16 +14,9 @@ export async function server(rebuildTree: boolean, uring: boolean, components?: if (rebuildTree || components || useNodeFramework) { options += ' --'; } - if (rebuildTree) { - clean('db'); - options += ' --rebuild-tree'; - } if (components) { options += ` --components=${components}`; } - if (useNodeFramework) { - options += ' --use-node-framework'; - } await utils.spawn(`cargo run --bin zksync_server --release ${options}`); } @@ -78,11 +71,9 @@ export async function genesisFromBinary() { export const serverCommand = new Command('server') .description('start zksync server') .option('--genesis', 'generate genesis data via server') - .option('--rebuild-tree', 'rebuilds merkle tree from database logs', 'rebuild_tree') .option('--uring', 'enables uring support for RocksDB') .option('--components ', 'comma-separated list of components to run') .option('--chain-name ', 'environment name') - .option('--use-node-framework', 'use node framework for server') .action(async (cmd: Command) => { cmd.chainName ? env.reload(cmd.chainName) : env.load(); if (cmd.genesis) { diff --git a/infrastructure/zk/src/spellcheck.ts b/infrastructure/zk/src/spellcheck.ts deleted file mode 100644 index 8bf78869788d..000000000000 --- a/infrastructure/zk/src/spellcheck.ts +++ /dev/null @@ -1,44 +0,0 @@ -import { Command } from 'commander'; -import * as utils from 'utils'; - -export async function runSpellCheck(pattern: string, useCargo: boolean, useCSpell: boolean) { - // Default commands for cSpell and cargo spellcheck - const cSpellCommand = `cspell "${pattern}" --config=./checks-config/cspell.json`; - const cargoCommand = `cargo spellcheck --cfg=./checks-config/era.cfg --code 1`; - // Necessary to run cargo spellcheck in the prover directory explicitly as - // it is not included in the root cargo.toml file - const cargoCommandForProver = `cargo spellcheck --cfg=../checks-config/era.cfg --code 1`; - - try { - let results = []; - - // Run cspell over all **/*.md files - if (useCSpell || (!useCargo && !useCSpell)) { - results.push(await utils.spawn(cSpellCommand)); - } - - // Run cargo spellcheck in core and prover directories - if (useCargo || (!useCargo && !useCSpell)) { - results.push(await utils.spawn(cargoCommand)); - results.push(await utils.spawn('cd prover && ' + cargoCommandForProver)); - } - - // Check results and exit with error code if any command failed - if (results.some((code) => code !== 0)) { - console.error('Spell check failed'); - process.exit(1); - } - } catch (error) { - console.error('Error occurred during spell checking:', error); - process.exit(1); - } -} - -export const command = new Command('spellcheck') - .option('--pattern ', 'Glob pattern for files to check', '**/*.md') - .option('--use-cargo', 'Use cargo spellcheck') - .option('--use-cspell', 'Use cspell') - .description('Run spell check on specified files') - .action((cmd) => { - runSpellCheck(cmd.pattern, cmd.useCargo, cmd.useCSpell); - }); diff --git a/package.json b/package.json index b15675264d3e..af745160c30d 100644 --- a/package.json +++ b/package.json @@ -37,18 +37,16 @@ "zk": "yarn workspace zk" }, "devDependencies": { - "@ethersproject/bignumber": "~5.5.0", "@typescript-eslint/eslint-plugin": "^6.7.4", "@typescript-eslint/parser": "^4.10.0", "babel-eslint": "^10.1.0", - "eslint": "^7.16.0", "eslint-config-alloy": "^3.8.2", + "eslint": "^7.16.0", "markdownlint-cli": "^0.24.0", "npm-run-all": "^4.1.5", - "prettier": "^2.3.2", "prettier-plugin-solidity": "=1.0.0-dev.22", + "prettier": "^2.3.2", "solhint": "^3.3.2", - "sql-formatter": "^13.1.0", - "zksync-ethers": "5.8.0-beta.5" + "sql-formatter": "^13.1.0" } } diff --git a/prover/CHANGELOG.md b/prover/CHANGELOG.md index ea16d1cfa453..dc9bb315cb10 100644 --- a/prover/CHANGELOG.md +++ b/prover/CHANGELOG.md @@ -1,5 +1,69 @@ # Changelog +## [16.1.0](https://github.com/matter-labs/zksync-era/compare/prover-v16.0.0...prover-v16.1.0) (2024-07-24) + + +### Features + +* **prover:** Make it possible to run prover out of GCP ([#2448](https://github.com/matter-labs/zksync-era/issues/2448)) ([c9da549](https://github.com/matter-labs/zksync-era/commit/c9da5497e2aa9d85f204ab7b74fefcfe941793ff)) +* remove leftovers after BWIP ([#2456](https://github.com/matter-labs/zksync-era/issues/2456)) ([990676c](https://github.com/matter-labs/zksync-era/commit/990676c5f84afd2ff8cd337f495c82e8d1f305a4)) + + +### Bug Fixes + +* **prover:** BWG optimizations ([#2469](https://github.com/matter-labs/zksync-era/issues/2469)) ([d8851c8](https://github.com/matter-labs/zksync-era/commit/d8851c8af2cd4b595f4edb9c36c81e2310835a77)) + +## [16.0.0](https://github.com/matter-labs/zksync-era/compare/prover-v15.1.0...prover-v16.0.0) (2024-07-11) + + +### ⚠ BREAKING CHANGES + +* **prover:** Bump prover protocol patch ([#2428](https://github.com/matter-labs/zksync-era/issues/2428)) + +### Features + +* L1 batch signing (BFT-474) ([#2414](https://github.com/matter-labs/zksync-era/issues/2414)) ([ab699db](https://github.com/matter-labs/zksync-era/commit/ab699dbe8cffa8bd291d6054579061b47fd4aa0e)) +* **prover:** Bump prover protocol patch ([#2428](https://github.com/matter-labs/zksync-era/issues/2428)) ([1dffae9](https://github.com/matter-labs/zksync-era/commit/1dffae90d0d6a56434bb076135ac2a957ab20b83)) + +## [15.1.0](https://github.com/matter-labs/zksync-era/compare/prover-v15.0.0...prover-v15.1.0) (2024-07-10) + + +### Features + +* **api:** Retry `read_value` ([#2352](https://github.com/matter-labs/zksync-era/issues/2352)) ([256a43c](https://github.com/matter-labs/zksync-era/commit/256a43cdd01619b89e348419bc361454ba4fdabb)) +* Base Token Fundamentals ([#2204](https://github.com/matter-labs/zksync-era/issues/2204)) ([39709f5](https://github.com/matter-labs/zksync-era/commit/39709f58071ac77bfd447145e1c3342b7da70560)) +* BWIP ([#2258](https://github.com/matter-labs/zksync-era/issues/2258)) ([75bdfcc](https://github.com/matter-labs/zksync-era/commit/75bdfcc0ef4a99d93ac152db12a59ef2b2af0d27)) +* change `zkSync` occurences to `ZKsync` ([#2227](https://github.com/matter-labs/zksync-era/issues/2227)) ([0b4104d](https://github.com/matter-labs/zksync-era/commit/0b4104dbb996ec6333619ea05f3a99e6d4f3b8fa)) +* **config:** Make getaway_url optional ([#2412](https://github.com/matter-labs/zksync-era/issues/2412)) ([200bc82](https://github.com/matter-labs/zksync-era/commit/200bc825032b18ad9d8f3f49d4eb7cb0e1b5b645)) +* consensus support for pruning (BFT-473) ([#2334](https://github.com/matter-labs/zksync-era/issues/2334)) ([abc4256](https://github.com/matter-labs/zksync-era/commit/abc4256570b899e2b47ed8362e69ae0150247490)) +* **contract-verifier:** Add file based config for contract verifier ([#2415](https://github.com/matter-labs/zksync-era/issues/2415)) ([f4410e3](https://github.com/matter-labs/zksync-era/commit/f4410e3254dafdfe400e1c2c420f664ba951e2cd)) +* **en:** file based configs for en ([#2110](https://github.com/matter-labs/zksync-era/issues/2110)) ([7940fa3](https://github.com/matter-labs/zksync-era/commit/7940fa32a27ee4de43753c7083f92ca8c2ebe86b)) +* Make all core workspace crate names start with zksync_ ([#2294](https://github.com/matter-labs/zksync-era/issues/2294)) ([8861f29](https://github.com/matter-labs/zksync-era/commit/8861f2994b674be3c654511416452c0a555d0f73)) +* Minimal External API Fetcher ([#2383](https://github.com/matter-labs/zksync-era/issues/2383)) ([9f255c0](https://github.com/matter-labs/zksync-era/commit/9f255c073cfdab60832fcf9a6d3a4a9258641ef3)) +* **prover:** Add file based config for compressor ([#2353](https://github.com/matter-labs/zksync-era/issues/2353)) ([1d6f87d](https://github.com/matter-labs/zksync-era/commit/1d6f87dde88ee1b09e42d57a8d285eb257068bae)) +* **prover:** Add file based config for prover fri ([#2184](https://github.com/matter-labs/zksync-era/issues/2184)) ([f851615](https://github.com/matter-labs/zksync-era/commit/f851615ab3753bb9353fd4456a6e49d55d67c626)) +* **prover:** Add file based config for witness vector generator ([#2337](https://github.com/matter-labs/zksync-era/issues/2337)) ([f86eb13](https://github.com/matter-labs/zksync-era/commit/f86eb132aa2f5b75c45a65189e9664d3d1e2682f)) +* **prover:** Add file based config support for vk-setup-data-generator-server-fri ([#2371](https://github.com/matter-labs/zksync-era/issues/2371)) ([b0e72c9](https://github.com/matter-labs/zksync-era/commit/b0e72c9ecbb659850f7dd27386984b99877e7a5c)) +* **prover:** Add prometheus port to witness generator config ([#2385](https://github.com/matter-labs/zksync-era/issues/2385)) ([d0e1add](https://github.com/matter-labs/zksync-era/commit/d0e1addfccf6b5d3b21facd6bb74455f098f0177)) +* **prover:** Add prover_cli stats command ([#2362](https://github.com/matter-labs/zksync-era/issues/2362)) ([fe65319](https://github.com/matter-labs/zksync-era/commit/fe65319da0f26ca45e95f067c1e8b97cf7874c45)) +* Remove cached commitments, add BWIP to docs ([#2400](https://github.com/matter-labs/zksync-era/issues/2400)) ([e652e4d](https://github.com/matter-labs/zksync-era/commit/e652e4d8548570d060fa4c901c75745b7ea6b296)) +* Remove initialize_components function ([#2284](https://github.com/matter-labs/zksync-era/issues/2284)) ([0a38891](https://github.com/matter-labs/zksync-era/commit/0a388911914bfcf58785e394db9d5ddce3afdef0)) +* snark proof is already verified inside wrap_proof function ([#1903](https://github.com/matter-labs/zksync-era/issues/1903)) ([2c8cf35](https://github.com/matter-labs/zksync-era/commit/2c8cf35bc1b03f82073bad9e28ebb409d48bad98)) +* Switch to using crates.io deps ([#2409](https://github.com/matter-labs/zksync-era/issues/2409)) ([27fabaf](https://github.com/matter-labs/zksync-era/commit/27fabafbec66bf4cb65c4fa9e3fab4c3c981d0f2)) +* **tee:** TEE Prover Gateway ([#2333](https://github.com/matter-labs/zksync-era/issues/2333)) ([f8df34d](https://github.com/matter-labs/zksync-era/commit/f8df34d9bff5e165fe40d4f67afa582a84038303)) +* upgraded encoding of transactions in consensus Payload. ([#2245](https://github.com/matter-labs/zksync-era/issues/2245)) ([cb6a6c8](https://github.com/matter-labs/zksync-era/commit/cb6a6c88de54806d0f4ae4af7ea873a911605780)) +* Validium with DA ([#2010](https://github.com/matter-labs/zksync-era/issues/2010)) ([fe03d0e](https://github.com/matter-labs/zksync-era/commit/fe03d0e254a98fea60ecb7485a7de9e7fdecaee1)) +* **zk_toolbox:** Add prover run ([#2272](https://github.com/matter-labs/zksync-era/issues/2272)) ([598ef7b](https://github.com/matter-labs/zksync-era/commit/598ef7b73cf141007d2cf031b21fce4744eec44f)) + + +### Bug Fixes + +* Fix rustls setup for jsonrpsee clients ([#2417](https://github.com/matter-labs/zksync-era/issues/2417)) ([a040f09](https://github.com/matter-labs/zksync-era/commit/a040f099cd9863d47d49cbdb3360e53a82e0423e)) +* **proof_compressor:** Fix backward compatibility ([#2356](https://github.com/matter-labs/zksync-era/issues/2356)) ([76508c4](https://github.com/matter-labs/zksync-era/commit/76508c42e83770ee50a0a9ced03b437687d383cd)) +* prover Cargo.lock ([#2280](https://github.com/matter-labs/zksync-era/issues/2280)) ([05c6f35](https://github.com/matter-labs/zksync-era/commit/05c6f357eee591262e3ddd870fcde0fe50ce05cc)) +* **prover_cli:** Fix Minor Bugs in Prover CLI ([#2264](https://github.com/matter-labs/zksync-era/issues/2264)) ([440f2a7](https://github.com/matter-labs/zksync-era/commit/440f2a7ae0def22bab65c4bb5c531b3234841b76)) +* **prover_cli:** Remove outdated fix for circuit id in node wg ([#2248](https://github.com/matter-labs/zksync-era/issues/2248)) ([db8e71b](https://github.com/matter-labs/zksync-era/commit/db8e71b55393b3d0e419886b62712b61305ac030)) + ## [15.0.0](https://github.com/matter-labs/zksync-era/compare/prover-v14.5.0...prover-v15.0.0) (2024-06-14) diff --git a/prover/Cargo.lock b/prover/Cargo.lock index d909f156d8f9..8d705371bf33 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -28,41 +28,6 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" -[[package]] -name = "aead" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d122413f284cf2d62fb1b7db97e02edb8cda96d769b16e443a4f6195e35662b0" -dependencies = [ - "crypto-common", - "generic-array", -] - -[[package]] -name = "aes" -version = "0.8.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b169f7a6d4742236a0a00c541b845991d0ac43e546831af1249753ab4c3aa3a0" -dependencies = [ - "cfg-if 1.0.0", - "cipher", - "cpufeatures", -] - -[[package]] -name = "aes-gcm" -version = "0.10.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "831010a0f742e1209b3bcea8fab6a8e149051ba6099432c8cb2cc117dec3ead1" -dependencies = [ - "aead", - "aes", - "cipher", - "ctr", - "ghash", - "subtle", -] - [[package]] name = "ahash" version = "0.7.8" @@ -96,21 +61,6 @@ dependencies = [ "memchr", ] -[[package]] -name = "alloc-no-stdlib" -version = "2.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc7bb162ec39d46ab1ca8c77bf72e890535becd1751bb45f64c597edb4c8c6b3" - -[[package]] -name = "alloc-stdlib" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94fb8275041c72129eb51b7d0322c29b8387a0386127718b096429201a5d6ece" -dependencies = [ - "alloc-no-stdlib", -] - [[package]] name = "allocator-api2" version = "0.2.18" @@ -244,33 +194,6 @@ version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" -[[package]] -name = "async-compression" -version = "0.4.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd066d0b4ef8ecb03a55319dc13aa6910616d0f44008a045bb1835af830abff5" -dependencies = [ - "brotli", - "flate2", - "futures-core", - "memchr", - "pin-project-lite", - "tokio", - "zstd", - "zstd-safe", -] - -[[package]] -name = "async-lock" -version = "3.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff6e472cdea888a4bd64f342f09b3f50e1886d32afe8df3d663c01140b811b18" -dependencies = [ - "event-listener 5.3.1", - "event-listener-strategy", - "pin-project-lite", -] - [[package]] name = "async-stream" version = "0.3.5" @@ -313,6 +236,12 @@ dependencies = [ "num-traits", ] +[[package]] +name = "atomic-waker" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" + [[package]] name = "atty" version = "0.2.14" @@ -330,6 +259,33 @@ version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" +[[package]] +name = "aws-lc-rs" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8a47f2fb521b70c11ce7369a6c5fa4bd6af7e5d62ec06303875bafe7c6ba245" +dependencies = [ + "aws-lc-sys", + "mirai-annotations", + "paste", + "zeroize", +] + +[[package]] +name = "aws-lc-sys" +version = "0.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2927c7af777b460b7ccd95f8b67acd7b4c04ec8896bf0c8e80ba30523cffc057" +dependencies = [ + "bindgen 0.69.4", + "cc", + "cmake", + "dunce", + "fs_extra", + "libc", + "paste", +] + [[package]] name = "axum" version = "0.6.20" @@ -341,9 +297,9 @@ dependencies = [ "bitflags 1.3.2", "bytes", "futures-util", - "http", - "http-body", - "hyper", + "http 0.2.12", + "http-body 0.4.6", + "hyper 0.14.29", "itoa", "matchit", "memchr", @@ -352,11 +308,7 @@ dependencies = [ "pin-project-lite", "rustversion", "serde", - "serde_json", - "serde_path_to_error", - "serde_urlencoded", - "sync_wrapper", - "tokio", + "sync_wrapper 0.1.2", "tower", "tower-layer", "tower-service", @@ -371,14 +323,26 @@ dependencies = [ "async-trait", "bytes", "futures-util", - "http", - "http-body", + "http 0.2.12", + "http-body 0.4.6", "mime", "rustversion", "tower-layer", "tower-service", ] +[[package]] +name = "backon" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d67782c3f868daa71d3533538e98a8e13713231969def7536e8039606fc46bf0" +dependencies = [ + "fastrand", + "futures-core", + "pin-project", + "tokio", +] + [[package]] name = "backtrace" version = "0.3.72" @@ -441,12 +405,13 @@ dependencies = [ [[package]] name = "bellman_ce" -version = "0.3.2" -source = "git+https://github.com/matter-labs/bellman?branch=dev#5520aa2274afe73d281373c92b007a2ecdebfbea" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ea340d5c1394ee4daf4415dd80e06f74e0ad9b08e21f73f6bb1fa3a9dfae80d" dependencies = [ "arrayvec 0.7.4", "bit-vec", - "blake2s_const 0.6.0 (git+https://github.com/matter-labs/bellman?branch=dev)", + "blake2s_const 0.7.0", "blake2s_simd", "byteorder", "cfg-if 1.0.0", @@ -455,7 +420,7 @@ dependencies = [ "hex", "lazy_static", "num_cpus", - "pairing_ce 0.28.5 (registry+https://github.com/rust-lang/crates.io-index)", + "pairing_ce", "rand 0.4.6", "serde", "smallvec", @@ -464,12 +429,13 @@ dependencies = [ [[package]] name = "bellman_ce" -version = "0.3.2" -source = "git+https://github.com/matter-labs/bellman?branch=snark-wrapper#e01e5fa08a97a113e76ec8a69d06fe6cc2c82d17" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7aab6627603565b664e6c643a1dc7ea8bbff25b776f5fecd80ac88308fc7007b" dependencies = [ "arrayvec 0.7.4", "bit-vec", - "blake2s_const 0.6.0 (git+https://github.com/matter-labs/bellman?branch=snark-wrapper)", + "blake2s_const 0.8.0", "blake2s_simd", "byteorder", "cfg-if 1.0.0", @@ -478,7 +444,7 @@ dependencies = [ "hex", "lazy_static", "num_cpus", - "pairing_ce 0.28.5 (registry+https://github.com/rust-lang/crates.io-index)", + "pairing_ce", "rand 0.4.6", "serde", "smallvec", @@ -605,15 +571,6 @@ dependencies = [ "serde", ] -[[package]] -name = "bitmaps" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "031043d04099746d8db04daf1fa424b2bc8bd69d92b25962dcde24da39ab64a2" -dependencies = [ - "typenum", -] - [[package]] name = "bitvec" version = "1.0.1" @@ -646,14 +603,6 @@ dependencies = [ "digest 0.10.7", ] -[[package]] -name = "blake2" -version = "0.10.6" -source = "git+https://github.com/RustCrypto/hashes.git?rev=1f727ce37ff40fa0cce84eb8543a45bdd3ca4a4e#1f727ce37ff40fa0cce84eb8543a45bdd3ca4a4e" -dependencies = [ - "digest 0.10.7", -] - [[package]] name = "blake2-rfc_bellman_edition" version = "0.0.1" @@ -665,10 +614,20 @@ dependencies = [ "constant_time_eq", ] +[[package]] +name = "blake2_ce" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90cef65f11dd09a6c58914148161dbf190e5dcc02c87ed2aa47b3b97d3e7ce76" +dependencies = [ + "digest 0.10.7", +] + [[package]] name = "blake2s_const" -version = "0.6.0" -source = "git+https://github.com/matter-labs/bellman?branch=dev#5520aa2274afe73d281373c92b007a2ecdebfbea" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f39d933cb38939f885001867874c65699c36f30f0c78aae9f4c9f01b3e4b306a" dependencies = [ "arrayref", "arrayvec 0.5.2", @@ -677,8 +636,9 @@ dependencies = [ [[package]] name = "blake2s_const" -version = "0.6.0" -source = "git+https://github.com/matter-labs/bellman?branch=snark-wrapper#e01e5fa08a97a113e76ec8a69d06fe6cc2c82d17" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1db04f0f5f88d8c95977159949b23d2ed24d33309901cf7f7e48ed40f36de667" dependencies = [ "arrayref", "arrayvec 0.5.2", @@ -736,16 +696,17 @@ dependencies = [ [[package]] name = "boojum" version = "0.2.0" -source = "git+https://github.com/matter-labs/era-boojum.git?branch=main#4bcb11f0610302110ae8109af01d5b652191b2f6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f0c2cba247d620ff76123efb335401aa05ec5639551e6ef4e5f977c0809b5cb" dependencies = [ "arrayvec 0.7.4", "bincode", - "blake2 0.10.6 (registry+https://github.com/rust-lang/crates.io-index)", + "blake2 0.10.6", "const_format", "convert_case", "crossbeam 0.8.4", "crypto-bigint 0.5.5", - "cs_derive 0.1.0 (git+https://github.com/matter-labs/era-boojum.git?branch=main)", + "cs_derive", "derivative", "ethereum-types", "firestorm", @@ -753,12 +714,12 @@ dependencies = [ "lazy_static", "num-modular", "num_cpus", - "pairing_ce 0.28.5 (git+https://github.com/matter-labs/pairing.git)", + "pairing_ce", "rand 0.8.5", "rayon", "serde", "sha2 0.10.8", - "sha3 0.10.6", + "sha3_ce", "smallvec", "tracing", "unroll", @@ -767,13 +728,14 @@ dependencies = [ [[package]] name = "boojum-cuda" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-boojum-cuda?branch=main#edf04233ea0edb6febe2f7b8cb2c8607ebf8ec96" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04e402ed72733b016d29100aa5b500d5cbcf5eaa2b6805aaba1971a355d202c9" dependencies = [ "boojum", "cmake", - "cudart", - "cudart-sys", - "itertools 0.12.1", + "era_cudart", + "era_cudart_sys", + "itertools 0.13.0", "lazy_static", ] @@ -794,34 +756,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bf4918709cc4dd777ad2b6303ed03cb37f3ca0ccede8c1b0d28ac6db8f4710e0" dependencies = [ "once_cell", - "proc-macro-crate 2.0.2", + "proc-macro-crate 2.0.0", "proc-macro2 1.0.85", "quote 1.0.36", "syn 2.0.66", "syn_derive", ] -[[package]] -name = "brotli" -version = "6.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74f7971dbd9326d58187408ab83117d8ac1bb9c17b085fdacd1cf2f598719b6b" -dependencies = [ - "alloc-no-stdlib", - "alloc-stdlib", - "brotli-decompressor", -] - -[[package]] -name = "brotli-decompressor" -version = "4.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a45bd2e4095a8b518033b128020dd4a55aab1c0a381ba4404a472630f4bc362" -dependencies = [ - "alloc-no-stdlib", - "alloc-stdlib", -] - [[package]] name = "bumpalo" version = "3.16.0" @@ -927,6 +868,12 @@ dependencies = [ "once_cell", ] +[[package]] +name = "cesu8" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d43a04d8753f35258c91f8ec639f792891f748a1edbd759cf1dcea3382ad83c" + [[package]] name = "cexpr" version = "0.6.0" @@ -954,30 +901,6 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e" -[[package]] -name = "chacha20" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3613f74bd2eac03dad61bd53dbe620703d4371614fe0bc3b9f04dd36fe4e818" -dependencies = [ - "cfg-if 1.0.0", - "cipher", - "cpufeatures", -] - -[[package]] -name = "chacha20poly1305" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10cd79432192d1c0f4e1a0fef9527696cc039165d729fb41b3f4f4f354c2dc35" -dependencies = [ - "aead", - "chacha20", - "cipher", - "poly1305", - "zeroize", -] - [[package]] name = "chrono" version = "0.4.38" @@ -993,37 +916,28 @@ dependencies = [ "windows-targets 0.52.5", ] -[[package]] -name = "cipher" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" -dependencies = [ - "crypto-common", - "inout", - "zeroize", -] - [[package]] name = "circuit_definitions" -version = "0.1.0" -source = "git+https://github.com/matter-labs/era-zkevm_test_harness?branch=gpu-wrapper#ea0d54f6d5d7d3302a4a6594150a2ca809e6677b" +version = "0.140.0-gpu-wrapper.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d32d2d377f12c125322717d06701e466eb0389400ba68209c90545fee6408677" dependencies = [ "crossbeam 0.8.4", "derivative", "seq-macro", "serde", "snark_wrapper", - "zk_evm 1.4.0", - "zkevm_circuits 1.4.0 (git+https://github.com/matter-labs/era-zkevm_circuits.git?branch=main)", + "zk_evm 0.140.0", + "zkevm_circuits 0.140.0", ] [[package]] name = "circuit_definitions" -version = "1.5.0" -source = "git+https://github.com/matter-labs/era-zkevm_test_harness.git?branch=v1.5.0#5d6a06c37f4656d26a4170d22f2298cd7716c070" +version = "0.150.2-rc.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "45eda61fb4b476ceac2dad7aaf85ba4ed02fb834598dd7aafacebe405f2af612" dependencies = [ - "circuit_encodings 0.1.50", + "circuit_encodings 0.150.2-rc.1", "crossbeam 0.8.4", "derivative", "seq-macro", @@ -1033,119 +947,120 @@ dependencies = [ [[package]] name = "circuit_encodings" -version = "0.1.40" -source = "git+https://github.com/matter-labs/era-zkevm_test_harness.git?branch=v1.4.0#39665dffd576cff5007c80dd0e1b5334e230bd3b" +version = "0.140.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7f1168c8fbb45fc7704c1bcdbb65ebdcb019fc9bf1101a475904eff835632f7" dependencies = [ "derivative", "serde", - "zk_evm 1.4.0", - "zkevm_circuits 1.4.0 (git+https://github.com/matter-labs/era-zkevm_circuits.git?branch=v1.4.0)", + "zk_evm 0.140.0", + "zkevm_circuits 0.140.0", ] [[package]] name = "circuit_encodings" -version = "0.1.41" -source = "git+https://github.com/matter-labs/era-zkevm_test_harness.git?branch=v1.4.1#f7bd71fd4216e2c51ab7b09a95909fe48c75f35b" +version = "0.141.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90b17a11dd3489daef314cbb07e1098e8e34a35a625fdca421b0012f4bb6cbd0" dependencies = [ "derivative", "serde", - "zk_evm 1.4.1", - "zkevm_circuits 1.4.1", + "zk_evm 0.141.0", + "zkevm_circuits 0.141.0", ] [[package]] name = "circuit_encodings" -version = "0.1.42" -source = "git+https://github.com/matter-labs/era-zkevm_test_harness.git?branch=v1.4.2#3149a162a729581005fbad6dbcef027a3ee1b214" +version = "0.142.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5df3af2244275a1270e2887b2f47625ec78dff14db8dd8a88f7ea1ea0781e48b" dependencies = [ "derivative", "serde", - "zk_evm 1.4.1", - "zkevm_circuits 1.4.1", + "zk_evm 0.141.0", + "zkevm_circuits 0.141.0", ] [[package]] name = "circuit_encodings" -version = "0.1.50" -source = "git+https://github.com/matter-labs/era-zkevm_test_harness.git?branch=v1.5.0#5d6a06c37f4656d26a4170d22f2298cd7716c070" +version = "0.150.2-rc.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4b69893ec5a2112430adaf8e29b52ea9ec4ef2d6663879f7cc279b4479a8880" dependencies = [ "derivative", "serde", - "zk_evm 1.5.0", - "zkevm_circuits 1.5.0", + "zk_evm 0.150.0", + "zkevm_circuits 0.150.0", ] [[package]] name = "circuit_sequencer_api" -version = "0.1.0" -source = "git+https://github.com/matter-labs/era-zkevm_test_harness.git?branch=v1.3.3#aba8f2a32767b79838aca7d7d00d9d23144df32f" +version = "0.133.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a87dc7bee6630d4954ac7982eb77e2007476662250cf18e5c460bbc5ee435f1" dependencies = [ - "bellman_ce 0.3.2 (git+https://github.com/matter-labs/bellman?branch=dev)", + "bellman_ce 0.7.0", "derivative", "rayon", "serde", - "zk_evm 1.3.3 (git+https://github.com/matter-labs/era-zk_evm.git?branch=v1.3.3)", + "zk_evm 0.133.0", ] [[package]] name = "circuit_sequencer_api" -version = "0.1.40" -source = "git+https://github.com/matter-labs/era-zkevm_test_harness.git?branch=v1.4.0#39665dffd576cff5007c80dd0e1b5334e230bd3b" +version = "0.140.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b5138e6524c73e6d49fc1d0822b26e62a8d78b2c07e4e1c56061a447c10bec0" dependencies = [ - "bellman_ce 0.3.2 (git+https://github.com/matter-labs/bellman?branch=dev)", - "circuit_encodings 0.1.40", + "bellman_ce 0.7.0", + "circuit_encodings 0.140.0", "derivative", "rayon", "serde", - "zk_evm 1.4.0", + "zk_evm 0.140.0", ] [[package]] name = "circuit_sequencer_api" -version = "0.1.41" -source = "git+https://github.com/matter-labs/era-zkevm_test_harness.git?branch=v1.4.1#f7bd71fd4216e2c51ab7b09a95909fe48c75f35b" +version = "0.141.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff871d625d002eb7f27394a239c0b19d8449adf1b9ca7805ebb43c8cf0810b51" dependencies = [ - "bellman_ce 0.3.2 (git+https://github.com/matter-labs/bellman?branch=dev)", - "circuit_encodings 0.1.41", + "bellman_ce 0.7.0", + "circuit_encodings 0.141.0", "derivative", "rayon", "serde", - "zk_evm 1.4.1", + "zk_evm 0.141.0", ] [[package]] name = "circuit_sequencer_api" -version = "0.1.42" -source = "git+https://github.com/matter-labs/era-zkevm_test_harness.git?branch=v1.4.2#3149a162a729581005fbad6dbcef027a3ee1b214" +version = "0.142.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1d861a7a9b8df9389c63092985fc993c46954771da86462d7cab8cbf55a6497" dependencies = [ - "bellman_ce 0.3.2 (git+https://github.com/matter-labs/bellman?branch=dev)", - "circuit_encodings 0.1.42", + "bellman_ce 0.7.0", + "circuit_encodings 0.142.0", "derivative", "rayon", "serde", - "zk_evm 1.4.1", + "zk_evm 0.141.0", ] [[package]] name = "circuit_sequencer_api" -version = "0.1.50" -source = "git+https://github.com/matter-labs/era-zkevm_test_harness.git?branch=v1.5.0#5d6a06c37f4656d26a4170d22f2298cd7716c070" +version = "0.150.2-rc.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "121470724079938b8f878e8a95f757d814624795c9a5ca69dd9dd782035fbe39" dependencies = [ - "bellman_ce 0.3.2 (git+https://github.com/matter-labs/bellman?branch=dev)", - "circuit_encodings 0.1.50", + "bellman_ce 0.7.0", + "circuit_encodings 0.150.2-rc.1", "derivative", "rayon", "serde", ] -[[package]] -name = "circuit_testing" -version = "0.1.0" -source = "git+https://github.com/matter-labs/era-circuit_testing.git?branch=main#164c0adac85be39ee44bd9456b2b91cdede5af80" -dependencies = [ - "bellman_ce 0.3.2 (git+https://github.com/matter-labs/bellman?branch=dev)", -] - [[package]] name = "clang-sys" version = "1.8.1" @@ -1221,22 +1136,6 @@ dependencies = [ "cc", ] -[[package]] -name = "codegen" -version = "0.1.0" -source = "git+https://github.com/matter-labs/solidity_plonk_verifier.git?branch=dev#82f96b7156551087f1c9bfe4f0ea68845b6debfc" -dependencies = [ - "ethereum-types", - "franklin-crypto 0.0.5 (git+https://github.com/matter-labs/franklin-crypto?branch=dev)", - "handlebars", - "hex", - "paste", - "rescue_poseidon 0.4.1 (git+https://github.com/matter-labs/rescue-poseidon.git)", - "serde", - "serde_derive", - "serde_json", -] - [[package]] name = "codegen" version = "0.2.0" @@ -1278,15 +1177,6 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bed69047ed42e52c7e38d6421eeb8ceefb4f2a2b52eed59137f7bad7908f6800" -[[package]] -name = "concurrent-queue" -version = "2.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ca0197aee26d1ae37445ee532fefce43251d24cc7c166799f4d46817f1d3973" -dependencies = [ - "crossbeam-utils 0.8.20", -] - [[package]] name = "console" version = "0.15.8" @@ -1561,7 +1451,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ "generic-array", - "rand_core 0.6.4", "typenum", ] @@ -1577,36 +1466,16 @@ dependencies = [ [[package]] name = "cs_derive" -version = "0.1.0" -source = "git+https://github.com/matter-labs/era-boojum.git?branch=main#4bcb11f0610302110ae8109af01d5b652191b2f6" -dependencies = [ - "proc-macro-error", - "proc-macro2 1.0.85", - "quote 1.0.36", - "syn 1.0.109", -] - -[[package]] -name = "cs_derive" -version = "0.1.0" -source = "git+https://github.com/matter-labs/era-sync_vm.git?branch=v1.3.3#ed8ab8984cae05d00d9d62196753c8d40df47c7d" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "faa0b8f9fdb5c91dcd5569cc7cbc11f514fd784a34988ead8455db0db2cfc1c7" dependencies = [ "proc-macro-error", "proc-macro2 1.0.85", "quote 1.0.36", - "serde", "syn 1.0.109", ] -[[package]] -name = "ctr" -version = "0.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0369ee1ad671834580515889b80f2ea915f23b8be8d0daa4bbaf2ac5c7590835" -dependencies = [ - "cipher", -] - [[package]] name = "ctrlc" version = "3.4.4" @@ -1617,55 +1486,6 @@ dependencies = [ "windows-sys 0.52.0", ] -[[package]] -name = "cudart" -version = "0.1.0" -source = "git+https://github.com/matter-labs/era-cuda?branch=main#3ef61d56b84c1f877fe8aab6ec2b1d14a96cd671" -dependencies = [ - "bitflags 2.5.0", - "cudart-sys", - "paste", -] - -[[package]] -name = "cudart-sys" -version = "0.1.0" -source = "git+https://github.com/matter-labs/era-cuda?branch=main#3ef61d56b84c1f877fe8aab6ec2b1d14a96cd671" -dependencies = [ - "bindgen 0.69.4", - "serde_json", -] - -[[package]] -name = "curl" -version = "0.4.46" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e2161dd6eba090ff1594084e95fd67aeccf04382ffea77999ea94ed42ec67b6" -dependencies = [ - "curl-sys", - "libc", - "openssl-probe", - "openssl-sys", - "schannel", - "socket2", - "windows-sys 0.52.0", -] - -[[package]] -name = "curl-sys" -version = "0.4.72+curl-8.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29cbdc8314c447d11e8fd156dcdd031d9e02a7a976163e396b548c03153bc9ea" -dependencies = [ - "cc", - "libc", - "libz-sys", - "openssl-sys", - "pkg-config", - "vcpkg", - "windows-sys 0.52.0", -] - [[package]] name = "curve25519-dalek" version = "4.1.3" @@ -1866,6 +1686,12 @@ version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dcbb2bf8e87535c23f7a8a321e364ce21462d0ff10cb6407820e8e96dfff6653" +[[package]] +name = "dunce" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56ce8c6da7551ec6c462cbaf3bfbc75131ebbfa1c944aeaa9dab51ca1c5f0c3b" + [[package]] name = "ecdsa" version = "0.14.8" @@ -2009,7 +1835,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a009aa4810eb158359dda09d0c87378e4bbb89b5a801f016885a4707ba24f7ea" dependencies = [ "log", - "regex", ] [[package]] @@ -2034,7 +1859,6 @@ dependencies = [ "anstream", "anstyle", "env_filter", - "humantime", "log", ] @@ -2053,6 +1877,27 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" +[[package]] +name = "era_cudart" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1725b17e5e41b89f566ace3900f119fdc87f04e2daa8e253b668573ad67a454f" +dependencies = [ + "bitflags 2.5.0", + "era_cudart_sys", + "paste", +] + +[[package]] +name = "era_cudart_sys" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60d46683f8a9a5364874f95b00073f6dc93d33e9a019f150b0d6ce09ffc13251" +dependencies = [ + "bindgen 0.69.4", + "serde_json", +] + [[package]] name = "errno" version = "0.3.9" @@ -2133,27 +1978,6 @@ version = "2.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" -[[package]] -name = "event-listener" -version = "5.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6032be9bd27023a771701cc49f9f053c751055f71efb2e0ae5c15809093675ba" -dependencies = [ - "concurrent-queue", - "parking", - "pin-project-lite", -] - -[[package]] -name = "event-listener-strategy" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f214dc438f977e6d4e3500aaa277f5ad94ca83fbbd9b1a15713ce2344ccc5a1" -dependencies = [ - "event-listener 5.3.1", - "pin-project-lite", -] - [[package]] name = "fastrand" version = "2.1.0" @@ -2268,7 +2092,7 @@ checksum = "55ac459de2512911e4b674ce33cf20befaba382d05b62b008afc1c8b57cbf181" dependencies = [ "futures-core", "futures-sink", - "spin 0.9.8", + "spin", ] [[package]] @@ -2303,41 +2127,12 @@ dependencies = [ [[package]] name = "franklin-crypto" -version = "0.0.5" -source = "git+https://github.com/matter-labs/franklin-crypto?branch=dev#5695d07c7bc604c2c39a27712ffac171d39ee1ed" -dependencies = [ - "arr_macro", - "bellman_ce 0.3.2 (git+https://github.com/matter-labs/bellman?branch=dev)", - "bit-vec", - "blake2 0.9.2", - "blake2-rfc_bellman_edition", - "blake2s_simd", - "byteorder", - "digest 0.9.0", - "hex", - "indexmap 1.9.3", - "itertools 0.10.5", - "lazy_static", - "num-bigint 0.4.5", - "num-derive 0.2.5", - "num-integer", - "num-traits", - "rand 0.4.6", - "serde", - "sha2 0.9.9", - "sha3 0.9.1", - "smallvec", - "splitmut", - "tiny-keccak 1.5.0", -] - -[[package]] -name = "franklin-crypto" -version = "0.0.5" -source = "git+https://github.com/matter-labs/franklin-crypto?branch=snark_wrapper#2546c63b91b59bdb0ad342d26f03fb57477550b2" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77d90323407438ad4fc3385f2dc78f5e92aa4d67a03a08a8562396d68a07f96b" dependencies = [ "arr_macro", - "bellman_ce 0.3.2 (git+https://github.com/matter-labs/bellman?branch=snark-wrapper)", + "bellman_ce 0.8.0", "bit-vec", "blake2 0.9.2", "blake2-rfc_bellman_edition", @@ -2351,7 +2146,7 @@ dependencies = [ "itertools 0.10.5", "lazy_static", "num-bigint 0.4.5", - "num-derive 0.2.5", + "num-derive", "num-integer", "num-traits", "rand 0.4.6", @@ -2363,6 +2158,12 @@ dependencies = [ "tiny-keccak 1.5.0", ] +[[package]] +name = "fs_extra" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c" + [[package]] name = "fuchsia-cprng" version = "0.1.1" @@ -2524,20 +2325,10 @@ dependencies = [ "cfg-if 1.0.0", "js-sys", "libc", - "wasi 0.11.0+wasi-snapshot-preview1", + "wasi", "wasm-bindgen", ] -[[package]] -name = "ghash" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0d8a4362ccb29cb0b265253fb0a2728f592895ee6854fd9bc13f2ffda266ff1" -dependencies = [ - "opaque-debug", - "polyval", -] - [[package]] name = "gimli" version = "0.29.0" @@ -2560,7 +2351,7 @@ dependencies = [ "futures-core", "futures-sink", "gloo-utils", - "http", + "http 0.2.12", "js-sys", "pin-project", "serde", @@ -2598,9 +2389,9 @@ dependencies = [ [[package]] name = "google-cloud-auth" -version = "0.13.2" +version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3bf7cb7864f08a92e77c26bb230d021ea57691788fb5dd51793f96965d19e7f9" +checksum = "1112c453c2e155b3e683204ffff52bcc6d6495d04b68d9e90cd24161270c5058" dependencies = [ "async-trait", "base64 0.21.7", @@ -2608,7 +2399,7 @@ dependencies = [ "google-cloud-token", "home", "jsonwebtoken", - "reqwest", + "reqwest 0.12.5", "serde", "serde_json", "thiserror", @@ -2620,21 +2411,22 @@ dependencies = [ [[package]] name = "google-cloud-metadata" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc279bfb50487d7bcd900e8688406475fc750fe474a835b2ab9ade9eb1fc90e2" +checksum = "04f945a208886a13d07636f38fb978da371d0abc3e34bad338124b9f8c135a8f" dependencies = [ - "reqwest", + "reqwest 0.12.5", "thiserror", "tokio", ] [[package]] name = "google-cloud-storage" -version = "0.15.0" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac04b29849ebdeb9fb008988cc1c4d1f0c9d121b4c7f1ddeb8061df124580e93" +checksum = "cc0c5b7469142d91bd77959e69375bede324a5def07c7f29aa0d582586cba305" dependencies = [ + "anyhow", "async-stream", "async-trait", "base64 0.21.7", @@ -2648,7 +2440,8 @@ dependencies = [ "percent-encoding", "pkcs8 0.10.2", "regex", - "reqwest", + "reqwest 0.12.5", + "reqwest-middleware", "ring", "serde", "serde_json", @@ -2669,52 +2462,6 @@ dependencies = [ "async-trait", ] -[[package]] -name = "governor" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19775995ee20209163239355bc3ad2f33f83da35d9ef72dea26e5af753552c87" -dependencies = [ - "dashmap", - "futures 0.3.30", - "futures-timer", - "no-std-compat", - "nonzero_ext", - "parking_lot", - "quanta 0.9.3", - "rand 0.8.5", - "smallvec", -] - -[[package]] -name = "gpu-ffi" -version = "0.1.0" -source = "git+https://github.com/matter-labs/era-heavy-ops-service.git?rev=3d33e06#3d33e069d9d263f3a9626d235ac6dc6c49179965" -dependencies = [ - "bindgen 0.59.2", - "crossbeam 0.8.4", - "derivative", - "futures 0.3.30", - "futures-locks", - "num_cpus", -] - -[[package]] -name = "gpu-prover" -version = "0.1.0" -source = "git+https://github.com/matter-labs/era-heavy-ops-service.git?rev=3d33e06#3d33e069d9d263f3a9626d235ac6dc6c49179965" -dependencies = [ - "bit-vec", - "cfg-if 1.0.0", - "crossbeam 0.8.4", - "franklin-crypto 0.0.5 (git+https://github.com/matter-labs/franklin-crypto?branch=snark_wrapper)", - "gpu-ffi", - "itertools 0.13.0", - "num_cpus", - "rand 0.4.6", - "serde", -] - [[package]] name = "group" version = "0.12.1" @@ -2748,7 +2495,7 @@ dependencies = [ "futures-core", "futures-sink", "futures-util", - "http", + "http 0.2.12", "indexmap 2.2.6", "slab", "tokio", @@ -2757,17 +2504,22 @@ dependencies = [ ] [[package]] -name = "handlebars" -version = "5.1.2" +name = "h2" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d08485b96a0e6393e9e4d1b8d48cf74ad6c063cd905eb33f42c1ce3f0377539b" +checksum = "fa82e28a107a8cc405f0839610bdc9b15f1e25ec7d696aa5cf173edbcb1486ab" dependencies = [ - "log", - "pest", - "pest_derive", - "serde", - "serde_json", - "thiserror", + "atomic-waker", + "bytes", + "fnv", + "futures-core", + "futures-sink", + "http 1.1.0", + "indexmap 2.2.6", + "slab", + "tokio", + "tokio-util", + "tracing", ] [[package]] @@ -2779,15 +2531,6 @@ dependencies = [ "ahash 0.7.8", ] -[[package]] -name = "hashbrown" -version = "0.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33ff8ae62cd3a9102e5637afc8452c55acf3844001bd5374e0b0bd7b6616c038" -dependencies = [ - "ahash 0.8.11", -] - [[package]] name = "hashbrown" version = "0.14.5" @@ -2807,16 +2550,6 @@ dependencies = [ "hashbrown 0.14.5", ] -[[package]] -name = "hdrhistogram" -version = "7.5.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "765c9198f173dd59ce26ff9f95ef0aafd0a0fe01fb9d72841bc5066a4c06511d" -dependencies = [ - "byteorder", - "num-traits", -] - [[package]] name = "heck" version = "0.3.3" @@ -2911,6 +2644,17 @@ dependencies = [ "itoa", ] +[[package]] +name = "http" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258" +dependencies = [ + "bytes", + "fnv", + "itoa", +] + [[package]] name = "http-body" version = "0.4.6" @@ -2918,15 +2662,32 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" dependencies = [ "bytes", - "http", + "http 0.2.12", "pin-project-lite", ] [[package]] -name = "http-range-header" -version = "0.3.1" +name = "http-body" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1cac85db508abc24a2e48553ba12a996e87244a0395ce011e62b37158745d643" +dependencies = [ + "bytes", + "http 1.1.0", +] + +[[package]] +name = "http-body-util" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "add0ab9360ddbd88cfeb3bd9574a1d85cfdfa14db10b3e21d3700dbc4328758f" +checksum = "793429d76616a256bcb62c2a2ec2bed781c8307e797e2598c50010f2bee2544f" +dependencies = [ + "bytes", + "futures-util", + "http 1.1.0", + "http-body 1.0.0", + "pin-project-lite", +] [[package]] name = "httparse" @@ -2956,9 +2717,9 @@ dependencies = [ "futures-channel", "futures-core", "futures-util", - "h2", - "http", - "http-body", + "h2 0.3.26", + "http 0.2.12", + "http-body 0.4.6", "httparse", "httpdate", "itoa", @@ -2970,20 +2731,42 @@ dependencies = [ "want", ] +[[package]] +name = "hyper" +version = "1.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe575dd17d0862a9a33781c8c4696a55c320909004a67a00fb286ba8b1bc496d" +dependencies = [ + "bytes", + "futures-channel", + "futures-util", + "h2 0.4.5", + "http 1.1.0", + "http-body 1.0.0", + "httparse", + "itoa", + "pin-project-lite", + "smallvec", + "tokio", + "want", +] + [[package]] name = "hyper-rustls" -version = "0.24.2" +version = "0.27.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" +checksum = "5ee4be2c948921a1a5320b629c4193916ed787a7f7f293fd3f7f5a6c9de74155" dependencies = [ "futures-util", - "http", - "hyper", + "http 1.1.0", + "hyper 1.3.1", + "hyper-util", "log", - "rustls 0.21.12", - "rustls-native-certs 0.6.3", + "rustls", + "rustls-pki-types", "tokio", - "tokio-rustls 0.24.1", + "tokio-rustls", + "tower-service", ] [[package]] @@ -2992,7 +2775,7 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1" dependencies = [ - "hyper", + "hyper 0.14.29", "pin-project-lite", "tokio", "tokio-io-timeout", @@ -3005,10 +2788,46 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" dependencies = [ "bytes", - "hyper", + "hyper 0.14.29", + "native-tls", + "tokio", + "tokio-native-tls", +] + +[[package]] +name = "hyper-tls" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" +dependencies = [ + "bytes", + "http-body-util", + "hyper 1.3.1", + "hyper-util", "native-tls", "tokio", "tokio-native-tls", + "tower-service", +] + +[[package]] +name = "hyper-util" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b875924a60b96e5d7b9ae7b066540b1dd1cbd90d1828f54c92e02a283351c56" +dependencies = [ + "bytes", + "futures-channel", + "futures-util", + "http 1.1.0", + "http-body 1.0.0", + "hyper 1.3.1", + "pin-project-lite", + "socket2", + "tokio", + "tower", + "tower-service", + "tracing", ] [[package]] @@ -3050,20 +2869,6 @@ dependencies = [ "unicode-normalization", ] -[[package]] -name = "im" -version = "15.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0acd33ff0285af998aaf9b57342af478078f53492322fafc47450e09397e0e9" -dependencies = [ - "bitmaps", - "rand_core 0.6.4", - "rand_xoshiro", - "sized-chunks", - "typenum", - "version_check", -] - [[package]] name = "impl-codec" version = "0.6.0" @@ -3134,15 +2939,6 @@ dependencies = [ "regex", ] -[[package]] -name = "inout" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0c10553d664a4d0bcff9f4215d0aac67a639cc68ef660840afe309b807bc9f5" -dependencies = [ - "generic-array", -] - [[package]] name = "ipnet" version = "2.9.0" @@ -3158,16 +2954,6 @@ dependencies = [ "serde", ] -[[package]] -name = "iri-string" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f5f6c2df22c009ac44f6f1499308e7a3ac7ba42cd2378475cc691510e1eef1b" -dependencies = [ - "memchr", - "serde", -] - [[package]] name = "is_terminal_polyfill" version = "1.70.0" @@ -3227,6 +3013,26 @@ dependencies = [ "libc", ] +[[package]] +name = "jni" +version = "0.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c6df18c2e3db7e453d3c6ac5b3e9d5182664d28788126d39b91f2d1e22b017ec" +dependencies = [ + "cesu8", + "combine", + "jni-sys", + "log", + "thiserror", + "walkdir", +] + +[[package]] +name = "jni-sys" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8eaf4bc02d17cbdd7ff4c7438cafcdf7fb9a4613313ad11b4f8fefe7d3fa0130" + [[package]] name = "jobserver" version = "0.1.31" @@ -3247,63 +3053,62 @@ dependencies = [ [[package]] name = "jsonrpsee" -version = "0.21.0" +version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9579d0ca9fb30da026bac2f0f7d9576ec93489aeb7cd4971dd5b4617d82c79b2" +checksum = "62b089779ad7f80768693755a031cc14a7766aba707cbe886674e3f79e9b7e47" dependencies = [ "jsonrpsee-client-transport", "jsonrpsee-core", "jsonrpsee-http-client", "jsonrpsee-proc-macros", - "jsonrpsee-server", "jsonrpsee-types", "jsonrpsee-wasm-client", "jsonrpsee-ws-client", - "tokio", "tracing", ] [[package]] name = "jsonrpsee-client-transport" -version = "0.21.0" +version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f9f9ed46590a8d5681975f126e22531698211b926129a40a2db47cbca429220" +checksum = "08163edd8bcc466c33d79e10f695cdc98c00d1e6ddfb95cec41b6b0279dd5432" dependencies = [ + "base64 0.22.1", "futures-channel", "futures-util", "gloo-net", - "http", + "http 1.1.0", "jsonrpsee-core", "pin-project", - "rustls-native-certs 0.7.0", + "rustls", "rustls-pki-types", + "rustls-platform-verifier", "soketto", "thiserror", "tokio", - "tokio-rustls 0.25.0", + "tokio-rustls", "tokio-util", "tracing", "url", - "webpki-roots", ] [[package]] name = "jsonrpsee-core" -version = "0.21.0" +version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "776d009e2f591b78c038e0d053a796f94575d66ca4e77dd84bfc5e81419e436c" +checksum = "79712302e737d23ca0daa178e752c9334846b08321d439fd89af9a384f8c830b" dependencies = [ "anyhow", - "async-lock", "async-trait", "beef", + "bytes", "futures-timer", "futures-util", - "hyper", + "http 1.1.0", + "http-body 1.0.0", + "http-body-util", "jsonrpsee-types", - "parking_lot", "pin-project", - "rand 0.8.5", "rustc-hash", "serde", "serde_json", @@ -3316,15 +3121,20 @@ dependencies = [ [[package]] name = "jsonrpsee-http-client" -version = "0.21.0" +version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78b7de9f3219d95985eb77fd03194d7c1b56c19bce1abfcc9d07462574b15572" +checksum = "2d90064e04fb9d7282b1c71044ea94d0bbc6eff5621c66f1a0bce9e9de7cf3ac" dependencies = [ "async-trait", - "hyper", + "base64 0.22.1", + "http-body 1.0.0", + "hyper 1.3.1", "hyper-rustls", + "hyper-util", "jsonrpsee-core", "jsonrpsee-types", + "rustls", + "rustls-platform-verifier", "serde", "serde_json", "thiserror", @@ -3336,49 +3146,25 @@ dependencies = [ [[package]] name = "jsonrpsee-proc-macros" -version = "0.21.0" +version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d94b7505034e2737e688e1153bf81e6f93ad296695c43958d6da2e4321f0a990" +checksum = "7895f186d5921065d96e16bd795e5ca89ac8356ec423fafc6e3d7cf8ec11aee4" dependencies = [ - "heck 0.4.1", - "proc-macro-crate 2.0.2", + "heck 0.5.0", + "proc-macro-crate 3.1.0", "proc-macro2 1.0.85", "quote 1.0.36", - "syn 1.0.109", -] - -[[package]] -name = "jsonrpsee-server" -version = "0.21.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cc7c6d1a2c58f6135810284a390d9f823d0f508db74cd914d8237802de80f98" -dependencies = [ - "futures-util", - "http", - "hyper", - "jsonrpsee-core", - "jsonrpsee-types", - "pin-project", - "route-recognizer", - "serde", - "serde_json", - "soketto", - "thiserror", - "tokio", - "tokio-stream", - "tokio-util", - "tower", - "tracing", + "syn 2.0.66", ] [[package]] name = "jsonrpsee-types" -version = "0.21.0" +version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3266dfb045c9174b24c77c2dfe0084914bb23a6b2597d70c9dc6018392e1cd1b" +checksum = "d9c465fbe385238e861fdc4d1c85e04ada6c1fd246161d26385c1b311724d2af" dependencies = [ - "anyhow", "beef", + "http 1.1.0", "serde", "serde_json", "thiserror", @@ -3386,9 +3172,9 @@ dependencies = [ [[package]] name = "jsonrpsee-wasm-client" -version = "0.21.0" +version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30f36d27503d0efc0355c1630b74ecfb367050847bf7241a0ed75fab6dfa96c0" +checksum = "4727ac037f834c6f04c0912cada7532dbddb54e92fbc64e33d6cb8c24af313c9" dependencies = [ "jsonrpsee-client-transport", "jsonrpsee-core", @@ -3397,11 +3183,11 @@ dependencies = [ [[package]] name = "jsonrpsee-ws-client" -version = "0.21.0" +version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "073c077471e89c4b511fa88b3df9a0f0abdf4a0a2e6683dd2ab36893af87bb2d" +checksum = "1c28759775f5cb2f1ea9667672d3fe2b0e701d1f4b7b67954e60afe7fd058b5e" dependencies = [ - "http", + "http 1.1.0", "jsonrpsee-client-transport", "jsonrpsee-core", "jsonrpsee-types", @@ -3458,29 +3244,13 @@ dependencies = [ "cpufeatures", ] -[[package]] -name = "kzg" -version = "0.1.50" -source = "git+https://github.com/matter-labs/era-zkevm_test_harness.git?branch=v1.5.0#5d6a06c37f4656d26a4170d22f2298cd7716c070" -dependencies = [ - "boojum", - "derivative", - "hex", - "once_cell", - "rayon", - "serde", - "serde_json", - "serde_with", - "zkevm_circuits 1.5.0", -] - [[package]] name = "lazy_static" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" dependencies = [ - "spin 0.5.2", + "spin", ] [[package]] @@ -3551,7 +3321,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c15da26e5af7e25c90b37a2d75cdbf940cf4a55316de9d84c679c9b8bfabf82e" dependencies = [ "cc", - "libc", "pkg-config", "vcpkg", ] @@ -3642,12 +3411,6 @@ dependencies = [ "logos-codegen", ] -[[package]] -name = "lru" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3262e75e648fce39813cb56ac41f3c3e3f65217ebf3844d818d1f9398cfb0dc" - [[package]] name = "lz4-sys" version = "1.9.4" @@ -3658,24 +3421,6 @@ dependencies = [ "libc", ] -[[package]] -name = "mach" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b823e83b2affd8f40a9ee8c29dbc56404c1e34cd2710921f2801e2cf29527afa" -dependencies = [ - "libc", -] - -[[package]] -name = "mach2" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19b955cdeb2a02b9117f121ce63aa52d08ade45de53e48fe6a38b39c10f6f709" -dependencies = [ - "libc", -] - [[package]] name = "match_cfg" version = "0.1.0" @@ -3734,61 +3479,6 @@ dependencies = [ "autocfg", ] -[[package]] -name = "metrics" -version = "0.21.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fde3af1a009ed76a778cb84fdef9e7dbbdf5775ae3e4cc1f434a6a307f6f76c5" -dependencies = [ - "ahash 0.8.11", - "metrics-macros", - "portable-atomic", -] - -[[package]] -name = "metrics-exporter-prometheus" -version = "0.12.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d4fa7ce7c4862db464a37b0b31d89bca874562f034bd7993895572783d02950" -dependencies = [ - "base64 0.21.7", - "hyper", - "indexmap 1.9.3", - "ipnet", - "metrics", - "metrics-util", - "quanta 0.11.1", - "thiserror", - "tokio", - "tracing", -] - -[[package]] -name = "metrics-macros" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38b4faf00617defe497754acde3024865bc143d44a86799b24e191ecff91354f" -dependencies = [ - "proc-macro2 1.0.85", - "quote 1.0.36", - "syn 2.0.66", -] - -[[package]] -name = "metrics-util" -version = "0.15.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4de2ed6e491ed114b40b732e4d1659a9d53992ebd87490c44a6ffe23739d973e" -dependencies = [ - "crossbeam-epoch 0.9.18", - "crossbeam-utils 0.8.20", - "hashbrown 0.13.1", - "metrics", - "num_cpus", - "quanta 0.11.1", - "sketches-ddsketch", -] - [[package]] name = "miette" version = "5.10.0" @@ -3865,47 +3555,22 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" dependencies = [ "libc", - "wasi 0.11.0+wasi-snapshot-preview1", + "wasi", "windows-sys 0.48.0", ] +[[package]] +name = "mirai-annotations" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c9be0862c1b3f26a88803c4a49de6889c10e608b3ee9344e6ef5b45fb37ad3d1" + [[package]] name = "multimap" version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "defc4c55412d89136f966bbb339008b474350e5e6e78d2714439c386b3137a03" -[[package]] -name = "multivm" -version = "0.1.0" -dependencies = [ - "anyhow", - "circuit_sequencer_api 0.1.0", - "circuit_sequencer_api 0.1.40", - "circuit_sequencer_api 0.1.41", - "circuit_sequencer_api 0.1.42", - "circuit_sequencer_api 0.1.50", - "hex", - "itertools 0.10.5", - "once_cell", - "pretty_assertions", - "serde", - "thiserror", - "tracing", - "vise", - "vm2", - "zk_evm 1.3.1", - "zk_evm 1.3.3 (git+https://github.com/matter-labs/era-zk_evm.git?tag=v1.3.3-rc2)", - "zk_evm 1.4.0", - "zk_evm 1.4.1", - "zk_evm 1.5.0", - "zksync_contracts", - "zksync_state", - "zksync_system_constants", - "zksync_types", - "zksync_utils", -] - [[package]] name = "native-tls" version = "0.2.12" @@ -3960,12 +3625,6 @@ dependencies = [ "libc", ] -[[package]] -name = "no-std-compat" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b93853da6d84c2e3c7d730d6473e8817692dd89be387eb01b94d7f108ecb5b8c" - [[package]] name = "nodrop" version = "0.1.14" @@ -3982,12 +3641,6 @@ dependencies = [ "minimal-lexical", ] -[[package]] -name = "nonzero_ext" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38bf9645c8b145698bb0b18a4637dcacbc421ea49bef2317e4fd8065a387cf21" - [[package]] name = "nu-ansi-term" version = "0.46.0" @@ -4078,17 +3731,6 @@ dependencies = [ "syn 0.15.44", ] -[[package]] -name = "num-derive" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "876a53fff98e03a936a674b29568b0e605f06b29372c2489ff4de23f1949743d" -dependencies = [ - "proc-macro2 1.0.85", - "quote 1.0.36", - "syn 1.0.109", -] - [[package]] name = "num-integer" version = "0.1.46" @@ -4187,7 +3829,7 @@ version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "681030a937600a36906c185595136d26abfebb4aa9c65701cefcaf8578bb982b" dependencies = [ - "proc-macro-crate 2.0.2", + "proc-macro-crate 3.1.0", "proc-macro2 1.0.85", "quote 1.0.36", "syn 2.0.66", @@ -4282,9 +3924,9 @@ checksum = "c7594ec0e11d8e33faf03530a4c49af7064ebba81c1480e01be67d90b356508b" dependencies = [ "async-trait", "bytes", - "http", + "http 0.2.12", "opentelemetry_api", - "reqwest", + "reqwest 0.11.27", ] [[package]] @@ -4295,14 +3937,14 @@ checksum = "7e5e5a5c4135864099f3faafbe939eb4d7f9b80ebf68a8448da961b32a7c1275" dependencies = [ "async-trait", "futures-core", - "http", + "http 0.2.12", "opentelemetry-http", "opentelemetry-proto", "opentelemetry-semantic-conventions", "opentelemetry_api", "opentelemetry_sdk", "prost 0.11.9", - "reqwest", + "reqwest 0.11.27", "thiserror", "tokio", "tonic", @@ -4417,33 +4059,9 @@ dependencies = [ [[package]] name = "pairing_ce" -version = "0.28.5" +version = "0.28.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db007b21259660d025918e653508f03050bf23fb96a88601f9936329faadc597" -dependencies = [ - "byteorder", - "cfg-if 1.0.0", - "ff_ce", - "rand 0.4.6", - "serde", -] - -[[package]] -name = "pairing_ce" -version = "0.28.5" -source = "git+https://github.com/matter-labs/pairing.git?rev=d24f2c5871089c4cd4f54c0ca266bb9fef6115eb#d24f2c5871089c4cd4f54c0ca266bb9fef6115eb" -dependencies = [ - "byteorder", - "cfg-if 1.0.0", - "ff_ce", - "rand 0.4.6", - "serde", -] - -[[package]] -name = "pairing_ce" -version = "0.28.5" -source = "git+https://github.com/matter-labs/pairing.git#d24f2c5871089c4cd4f54c0ca266bb9fef6115eb" +checksum = "843b5b6fb63f00460f611dbc87a50bbbb745f0dfe5cbf67ca89299c79098640e" dependencies = [ "byteorder", "cfg-if 1.0.0", @@ -4472,18 +4090,12 @@ version = "3.6.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be30eaf4b0a9fba5336683b38de57bb86d179a35862ba6bfcf57625d006bde5b" dependencies = [ - "proc-macro-crate 2.0.2", + "proc-macro-crate 2.0.0", "proc-macro2 1.0.85", "quote 1.0.36", "syn 1.0.109", ] -[[package]] -name = "parking" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb813b8af86854136c6922af0598d719255ecb2179515e6e7730d468f05c9cae" - [[package]] name = "parking_lot" version = "0.12.3" @@ -4544,51 +4156,6 @@ version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" -[[package]] -name = "pest" -version = "2.7.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "560131c633294438da9f7c4b08189194b20946c8274c6b9e38881a7874dc8ee8" -dependencies = [ - "memchr", - "thiserror", - "ucd-trie", -] - -[[package]] -name = "pest_derive" -version = "2.7.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26293c9193fbca7b1a3bf9b79dc1e388e927e6cacaa78b4a3ab705a1d3d41459" -dependencies = [ - "pest", - "pest_generator", -] - -[[package]] -name = "pest_generator" -version = "2.7.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ec22af7d3fb470a85dd2ca96b7c577a1eb4ef6f1683a9fe9a8c16e136c04687" -dependencies = [ - "pest", - "pest_meta", - "proc-macro2 1.0.85", - "quote 1.0.36", - "syn 2.0.66", -] - -[[package]] -name = "pest_meta" -version = "2.7.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7a240022f37c361ec1878d646fc5b7d7c4d28d5946e1a80ad5a7a4f4ca0bdcd" -dependencies = [ - "once_cell", - "pest", - "sha2 0.10.8", -] - [[package]] name = "petgraph" version = "0.6.5" @@ -4668,35 +4235,6 @@ version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d231b230927b5e4ad203db57bbcbee2802f6bce620b1e4a9024a07d94e2907ec" -[[package]] -name = "poly1305" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8159bd90725d2df49889a078b54f4f79e87f1f8a8444194cdca81d38f5393abf" -dependencies = [ - "cpufeatures", - "opaque-debug", - "universal-hash", -] - -[[package]] -name = "polyval" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d1fe60d06143b2430aa532c94cfe9e29783047f06c0d7fd359a9a51b729fa25" -dependencies = [ - "cfg-if 1.0.0", - "cpufeatures", - "opaque-debug", - "universal-hash", -] - -[[package]] -name = "portable-atomic" -version = "1.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7170ef9988bc169ba16dd36a7fa041e5c4cbeb6a35b76d4c03daded371eae7c0" - [[package]] name = "powerfmt" version = "0.2.0" @@ -4763,14 +4301,22 @@ dependencies = [ [[package]] name = "proc-macro-crate" -version = "2.0.2" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b00f26d3400549137f92511a46ac1cd8ce37cb5598a96d382381458b992a5d24" +checksum = "7e8366a6159044a37876a2b9817124296703c586a5c92e2c53751fa06d8d43e8" dependencies = [ - "toml_datetime", "toml_edit 0.20.2", ] +[[package]] +name = "proc-macro-crate" +version = "3.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d37c51ca738a55da99dc0c4a34860fd675453b8b36209178c2249bb13651284" +dependencies = [ + "toml_edit 0.21.1", +] + [[package]] name = "proc-macro-error" version = "1.0.4" @@ -4842,18 +4388,6 @@ dependencies = [ "syn 2.0.66", ] -[[package]] -name = "prometheus_exporter" -version = "0.1.0" -dependencies = [ - "anyhow", - "metrics", - "metrics-exporter-prometheus", - "tokio", - "vise", - "vise-exporter", -] - [[package]] name = "proptest" version = "1.4.0" @@ -4999,19 +4533,19 @@ version = "0.1.0" dependencies = [ "anyhow", "bincode", - "circuit_definitions 1.5.0", + "chrono", + "circuit_definitions 0.150.2-rc.1", "clap 4.5.4", "colored", "dialoguer", "hex", - "prover_dal", "serde_json", "sqlx", "strum", "tokio", "tracing", "tracing-subscriber", - "zkevm_test_harness 1.5.0", + "zkevm_test_harness 0.150.2-rc.1", "zksync_basic_types", "zksync_config", "zksync_contracts", @@ -5019,22 +4553,13 @@ dependencies = [ "zksync_db_connection", "zksync_env_config", "zksync_eth_client", + "zksync_prover_dal", "zksync_prover_fri_types", "zksync_prover_interface", "zksync_types", "zksync_utils", ] -[[package]] -name = "prover_dal" -version = "0.1.0" -dependencies = [ - "sqlx", - "strum", - "zksync_basic_types", - "zksync_db_connection", -] - [[package]] name = "prover_version" version = "0.1.0" @@ -5074,54 +4599,16 @@ dependencies = [ ] [[package]] -name = "quanta" -version = "0.9.3" +name = "quick-error" +version = "1.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" + +[[package]] +name = "quick-protobuf" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20afe714292d5e879d8b12740aa223c6a88f118af41870e8b6196e39a02238a8" -dependencies = [ - "crossbeam-utils 0.8.20", - "libc", - "mach", - "once_cell", - "raw-cpuid", - "wasi 0.10.2+wasi-snapshot-preview1", - "web-sys", - "winapi", -] - -[[package]] -name = "quanta" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a17e662a7a8291a865152364c20c7abc5e60486ab2001e8ec10b24862de0b9ab" -dependencies = [ - "crossbeam-utils 0.8.20", - "libc", - "mach2", - "once_cell", - "raw-cpuid", - "wasi 0.11.0+wasi-snapshot-preview1", - "web-sys", - "winapi", -] - -[[package]] -name = "queues" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1475abae4f8ad4998590fe3acfe20104f0a5d48fc420c817cd2c09c3f56151f0" - -[[package]] -name = "quick-error" -version = "1.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" - -[[package]] -name = "quick-protobuf" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d6da84cc204722a989e01ba2f6e1e276e190f22263d0cb6ce8526fcdb0d2e1f" +checksum = "9d6da84cc204722a989e01ba2f6e1e276e190f22263d0cb6ce8526fcdb0d2e1f" dependencies = [ "byteorder", ] @@ -5217,24 +4704,6 @@ dependencies = [ "rand_core 0.6.4", ] -[[package]] -name = "rand_xoshiro" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f97cdb2a36ed4183de61b2f824cc45c9f1037f28afe0a322e9fff4c108b5aaa" -dependencies = [ - "rand_core 0.6.4", -] - -[[package]] -name = "raw-cpuid" -version = "10.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c297679cb867470fa8c9f67dbba74a78d78e3e98d7cf2b08d6d71540f797332" -dependencies = [ - "bitflags 1.3.2", -] - [[package]] name = "rayon" version = "1.10.0" @@ -5346,16 +4815,15 @@ dependencies = [ "encoding_rs", "futures-core", "futures-util", - "h2", - "http", - "http-body", - "hyper", - "hyper-tls", + "h2 0.3.26", + "http 0.2.12", + "http-body 0.4.6", + "hyper 0.14.29", + "hyper-tls 0.5.0", "ipnet", "js-sys", "log", "mime", - "mime_guess", "native-tls", "once_cell", "percent-encoding", @@ -5364,7 +4832,52 @@ dependencies = [ "serde", "serde_json", "serde_urlencoded", - "sync_wrapper", + "sync_wrapper 0.1.2", + "system-configuration", + "tokio", + "tokio-native-tls", + "tower-service", + "url", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", + "winreg 0.50.0", +] + +[[package]] +name = "reqwest" +version = "0.12.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7d6d2a27d57148378eb5e111173f4276ad26340ecc5c49a4a2152167a2d6a37" +dependencies = [ + "base64 0.22.1", + "bytes", + "encoding_rs", + "futures-channel", + "futures-core", + "futures-util", + "h2 0.4.5", + "http 1.1.0", + "http-body 1.0.0", + "http-body-util", + "hyper 1.3.1", + "hyper-rustls", + "hyper-tls 0.6.0", + "hyper-util", + "ipnet", + "js-sys", + "log", + "mime", + "mime_guess", + "native-tls", + "once_cell", + "percent-encoding", + "pin-project-lite", + "rustls-pemfile 2.1.2", + "serde", + "serde_json", + "serde_urlencoded", + "sync_wrapper 1.0.1", "system-configuration", "tokio", "tokio-native-tls", @@ -5375,43 +4888,38 @@ dependencies = [ "wasm-bindgen-futures", "wasm-streams", "web-sys", - "winreg", + "winreg 0.52.0", ] [[package]] -name = "rescue_poseidon" -version = "0.4.1" -source = "git+https://github.com/matter-labs/rescue-poseidon.git?branch=poseidon2#126937ef0e7a281f1ff9f512ac41a746a691a342" +name = "reqwest-middleware" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "39346a33ddfe6be00cbc17a34ce996818b97b230b87229f10114693becca1268" dependencies = [ - "addchain", - "arrayvec 0.7.4", - "blake2 0.10.6 (registry+https://github.com/rust-lang/crates.io-index)", - "byteorder", - "derivative", - "franklin-crypto 0.0.5 (git+https://github.com/matter-labs/franklin-crypto?branch=snark_wrapper)", - "lazy_static", - "log", - "num-bigint 0.3.3", - "num-integer", - "num-iter", - "num-traits", - "rand 0.4.6", + "anyhow", + "async-trait", + "http 1.1.0", + "reqwest 0.12.5", "serde", - "sha3 0.9.1", - "smallvec", - "typemap_rev", + "thiserror", + "tower-service", ] [[package]] name = "rescue_poseidon" -version = "0.4.1" -source = "git+https://github.com/matter-labs/rescue-poseidon.git#d059b5042df5ed80e151f05751410b524a54d16c" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50e3a9a33bb7d2a469247e4f5fc47f7ab87807cd603739d306fa84e06ca0a160" dependencies = [ "addchain", "arrayvec 0.7.4", - "blake2 0.10.6 (registry+https://github.com/rust-lang/crates.io-index)", + "blake2 0.10.6", "byteorder", - "franklin-crypto 0.0.5 (git+https://github.com/matter-labs/franklin-crypto?branch=dev)", + "derivative", + "franklin-crypto", + "lazy_static", + "log", "num-bigint 0.3.3", "num-integer", "num-iter", @@ -5420,6 +4928,7 @@ dependencies = [ "serde", "sha3 0.9.1", "smallvec", + "typemap_rev", ] [[package]] @@ -5453,7 +4962,7 @@ dependencies = [ "cfg-if 1.0.0", "getrandom", "libc", - "spin 0.9.8", + "spin", "untrusted", "windows-sys 0.52.0", ] @@ -5507,12 +5016,6 @@ dependencies = [ "librocksdb-sys", ] -[[package]] -name = "route-recognizer" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afab94fb28594581f62d981211a9a4d53cc8130bbcbbb89a0440d9b8e81a7746" - [[package]] name = "rsa" version = "0.9.6" @@ -5591,42 +5094,20 @@ dependencies = [ [[package]] name = "rustls" -version = "0.21.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e" -dependencies = [ - "log", - "ring", - "rustls-webpki 0.101.7", - "sct", -] - -[[package]] -name = "rustls" -version = "0.22.4" +version = "0.23.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf4ef73721ac7bcd79b2b315da7779d8fc09718c6b3d2d1b2d94850eb8c18432" +checksum = "05cff451f60db80f490f3c182b77c35260baace73209e9cdbbe526bfe3a4d402" dependencies = [ + "aws-lc-rs", "log", + "once_cell", "ring", "rustls-pki-types", - "rustls-webpki 0.102.4", + "rustls-webpki", "subtle", "zeroize", ] -[[package]] -name = "rustls-native-certs" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9aace74cb666635c918e9c12bc0d348266037aa8eb599b5cba565709a8dff00" -dependencies = [ - "openssl-probe", - "rustls-pemfile 1.0.4", - "schannel", - "security-framework", -] - [[package]] name = "rustls-native-certs" version = "0.7.0" @@ -5666,21 +5147,39 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "976295e77ce332211c0d24d92c0e83e50f5c5f046d11082cea19f3df13a3562d" [[package]] -name = "rustls-webpki" -version = "0.101.7" +name = "rustls-platform-verifier" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" +checksum = "3e3beb939bcd33c269f4bf946cc829fcd336370267c4a927ac0399c84a3151a1" dependencies = [ - "ring", - "untrusted", + "core-foundation", + "core-foundation-sys", + "jni", + "log", + "once_cell", + "rustls", + "rustls-native-certs", + "rustls-platform-verifier-android", + "rustls-webpki", + "security-framework", + "security-framework-sys", + "webpki-roots", + "winapi", ] +[[package]] +name = "rustls-platform-verifier-android" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "84e217e7fdc8466b5b35d30f8c0a30febd29173df4a3a0c2115d306b9c4117ad" + [[package]] name = "rustls-webpki" version = "0.102.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff448f7e92e913c4b7d4c6d8e4540a1724b319b4152b8aef6d4cf8339712b33e" dependencies = [ + "aws-lc-rs", "ring", "rustls-pki-types", "untrusted", @@ -5734,16 +5233,6 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" -[[package]] -name = "sct" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" -dependencies = [ - "ring", - "untrusted", -] - [[package]] name = "seahash" version = "4.1.0" @@ -5815,6 +5304,7 @@ dependencies = [ "core-foundation", "core-foundation-sys", "libc", + "num-bigint 0.4.5", "security-framework-sys", ] @@ -5851,7 +5341,7 @@ checksum = "6ce4b57f1b521f674df7a1d200be8ff5d74e3712020ee25b553146657b5377d5" dependencies = [ "httpdate", "native-tls", - "reqwest", + "reqwest 0.11.27", "sentry-backtrace", "sentry-contexts", "sentry-core", @@ -5998,16 +5488,6 @@ dependencies = [ "serde", ] -[[package]] -name = "serde_path_to_error" -version = "0.1.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af99884400da37c88f5e9146b7f1fd0fbcae8f6eec4e9da38b67d05486f814a6" -dependencies = [ - "itoa", - "serde", -] - [[package]] name = "serde_urlencoded" version = "0.7.1" @@ -6057,19 +5537,6 @@ dependencies = [ "unsafe-libyaml", ] -[[package]] -name = "sha-1" -version = "0.9.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99cd6713db3cf16b6c84e06321e049a9b9f699826e16096d23bbcc44d15d51a6" -dependencies = [ - "block-buffer 0.9.0", - "cfg-if 1.0.0", - "cpufeatures", - "digest 0.9.0", - "opaque-debug", -] - [[package]] name = "sha1" version = "0.10.6" @@ -6096,8 +5563,9 @@ dependencies = [ [[package]] name = "sha2" -version = "0.10.6" -source = "git+https://github.com/RustCrypto/hashes.git?rev=1731ced4a116d61ba9dc6ee6d0f38fb8102e357a#1731ced4a116d61ba9dc6ee6d0f38fb8102e357a" +version = "0.10.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" dependencies = [ "cfg-if 1.0.0", "cpufeatures", @@ -6105,10 +5573,10 @@ dependencies = [ ] [[package]] -name = "sha2" -version = "0.10.8" +name = "sha2_ce" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" +checksum = "eca2daa77078f4ddff27e75c4bf59e4c2697525f56dbb3c842d34a5d1f2b04a2" dependencies = [ "cfg-if 1.0.0", "cpufeatures", @@ -6129,18 +5597,19 @@ dependencies = [ [[package]] name = "sha3" -version = "0.10.6" -source = "git+https://github.com/RustCrypto/hashes.git?rev=7a187e934c1f6c68e4b4e5cf37541b7a0d64d303#7a187e934c1f6c68e4b4e5cf37541b7a0d64d303" +version = "0.10.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75872d278a8f37ef87fa0ddbda7802605cb18344497949862c0d4dcb291eba60" dependencies = [ "digest 0.10.7", "keccak", ] [[package]] -name = "sha3" -version = "0.10.8" +name = "sha3_ce" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75872d278a8f37ef87fa0ddbda7802605cb18344497949862c0d4dcb291eba60" +checksum = "34c9a08202c50378d8a07a5f458193a5f542d2828ac6640263dbc0c2533ea25e" dependencies = [ "digest 0.10.7", "keccak", @@ -6163,17 +5632,18 @@ checksum = "24188a676b6ae68c3b2cb3a01be17fbf7240ce009799bb56d5b1409051e78fde" [[package]] name = "shivini" -version = "0.2.0" -source = "git+https://github.com/matter-labs/era-shivini.git?branch=v1.5.0#e77678baa55bfaf56fe3b29724b50ae21fe92fa2" +version = "0.150.2-rc.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2e391df42e8e145b12d7c446acd0de300ccc964ee941f5b9013ec970811f70f" dependencies = [ "bincode", - "blake2 0.10.6 (registry+https://github.com/rust-lang/crates.io-index)", + "blake2 0.10.6", "boojum", "boojum-cuda", - "circuit_definitions 1.5.0", - "cudart", - "cudart-sys", + "circuit_definitions 0.150.2-rc.1", "derivative", + "era_cudart", + "era_cudart_sys", "hex", "rand 0.8.5", "serde", @@ -6234,16 +5704,6 @@ dependencies = [ "time", ] -[[package]] -name = "sized-chunks" -version = "0.6.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16d69225bde7a69b235da73377861095455d298f2b970996eec25ddbb42b3d1e" -dependencies = [ - "bitmaps", - "typenum", -] - [[package]] name = "skeptic" version = "0.13.7" @@ -6259,12 +5719,6 @@ dependencies = [ "walkdir", ] -[[package]] -name = "sketches-ddsketch" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85636c14b73d81f541e525f585c0a2109e6744e1565b5c1668e31c70c10ed65c" - [[package]] name = "slab" version = "0.4.9" @@ -6286,27 +5740,12 @@ dependencies = [ [[package]] name = "snark_wrapper" version = "0.1.0" -source = "git+https://github.com/matter-labs/snark-wrapper.git?branch=main#76959cadabeec344b9fa1458728400d60340e496" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c6e57fa6c50ac36e39c58bf411aa5b9ca2f2a878c3da9769fb12736fc77ee346" dependencies = [ "derivative", "rand 0.4.6", - "rescue_poseidon 0.4.1 (git+https://github.com/matter-labs/rescue-poseidon.git?branch=poseidon2)", -] - -[[package]] -name = "snow" -version = "0.9.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "850948bee068e713b8ab860fe1adc4d109676ab4c3b621fd8147f06b261f2f85" -dependencies = [ - "aes-gcm", - "blake2 0.10.6 (registry+https://github.com/rust-lang/crates.io-index)", - "chacha20poly1305", - "curve25519-dalek", - "rand_core 0.6.4", - "rustc_version", - "sha2 0.10.8", - "subtle", + "rescue_poseidon", ] [[package]] @@ -6321,26 +5760,19 @@ dependencies = [ [[package]] name = "soketto" -version = "0.7.1" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41d1c5305e39e09653383c2c7244f2f78b3bcae37cf50c64cb4789c9f5096ec2" +checksum = "37468c595637c10857701c990f93a40ce0e357cedb0953d1c26c8d8027f9bb53" dependencies = [ - "base64 0.13.1", + "base64 0.22.1", "bytes", "futures 0.3.30", - "http", "httparse", "log", "rand 0.8.5", - "sha-1", + "sha1", ] -[[package]] -name = "spin" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" - [[package]] name = "spin" version = "0.9.8" @@ -6415,7 +5847,7 @@ dependencies = [ "crc", "crossbeam-queue 0.3.11", "either", - "event-listener 2.5.3", + "event-listener", "futures-channel", "futures-core", "futures-intrusive", @@ -6733,35 +6165,18 @@ dependencies = [ "syn 2.0.66", ] -[[package]] -name = "sync_vm" -version = "1.3.3" -source = "git+https://github.com/matter-labs/era-sync_vm.git?branch=v1.3.3#ed8ab8984cae05d00d9d62196753c8d40df47c7d" -dependencies = [ - "arrayvec 0.7.4", - "cs_derive 0.1.0 (git+https://github.com/matter-labs/era-sync_vm.git?branch=v1.3.3)", - "derivative", - "franklin-crypto 0.0.5 (git+https://github.com/matter-labs/franklin-crypto?branch=dev)", - "hex", - "itertools 0.10.5", - "num-bigint 0.4.5", - "num-derive 0.3.3", - "num-integer", - "num-traits", - "once_cell", - "rand 0.4.6", - "rescue_poseidon 0.4.1 (git+https://github.com/matter-labs/rescue-poseidon.git)", - "serde", - "smallvec", - "zk_evm 1.3.3 (git+https://github.com/matter-labs/era-zk_evm.git?branch=v1.3.3)", -] - [[package]] name = "sync_wrapper" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" +[[package]] +name = "sync_wrapper" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7065abeca94b6a8a577f9bd45aa0867a2238b74e8eb67cf10d492bc39351394" + [[package]] name = "system-configuration" version = "0.5.1" @@ -7002,30 +6417,20 @@ dependencies = [ [[package]] name = "tokio-rustls" -version = "0.24.1" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" +checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" dependencies = [ - "rustls 0.21.12", + "rustls", + "rustls-pki-types", "tokio", ] [[package]] -name = "tokio-rustls" -version = "0.25.0" +name = "tokio-stream" +version = "0.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "775e0c0f0adb3a2f22a00c4745d728b479985fc15ee7ca6a2608388c5569860f" -dependencies = [ - "rustls 0.22.4", - "rustls-pki-types", - "tokio", -] - -[[package]] -name = "tokio-stream" -version = "0.1.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "267ac89e0bec6e691e5813911606935d77c476ff49024f98abcea3e7b15e37af" +checksum = "267ac89e0bec6e691e5813911606935d77c476ff49024f98abcea3e7b15e37af" dependencies = [ "futures-core", "pin-project-lite", @@ -7048,9 +6453,9 @@ dependencies = [ [[package]] name = "toml_datetime" -version = "0.6.3" +version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cda73e2f1397b1262d6dfdcef8aafae14d1de7748d66822d3bfeeb6d03e5e4b" +checksum = "4badfd56924ae69bcc9039335b2e017639ce3f9b001c393c1b2d1ef846ce2cbf" [[package]] name = "toml_edit" @@ -7085,6 +6490,17 @@ dependencies = [ "winnow", ] +[[package]] +name = "toml_edit" +version = "0.21.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a8534fd7f78b5405e860340ad6575217ce99f38d4d5c8f2442cb5ecb50090e1" +dependencies = [ + "indexmap 2.2.6", + "toml_datetime", + "winnow", +] + [[package]] name = "tonic" version = "0.9.2" @@ -7097,10 +6513,10 @@ dependencies = [ "bytes", "futures-core", "futures-util", - "h2", - "http", - "http-body", - "hyper", + "h2 0.3.26", + "http 0.2.12", + "http-body 0.4.6", + "hyper 0.14.29", "hyper-timeout", "percent-encoding", "pin-project", @@ -7121,7 +6537,6 @@ checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" dependencies = [ "futures-core", "futures-util", - "hdrhistogram", "indexmap 1.9.3", "pin-project", "pin-project-lite", @@ -7134,36 +6549,6 @@ dependencies = [ "tracing", ] -[[package]] -name = "tower-http" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61c5bb1d698276a2443e5ecfabc1008bf15a36c12e6a7176e7bf089ea9131140" -dependencies = [ - "async-compression", - "base64 0.21.7", - "bitflags 2.5.0", - "bytes", - "futures-core", - "futures-util", - "http", - "http-body", - "http-range-header", - "httpdate", - "iri-string", - "mime", - "mime_guess", - "percent-encoding", - "pin-project-lite", - "tokio", - "tokio-util", - "tower", - "tower-layer", - "tower-service", - "tracing", - "uuid", -] - [[package]] name = "tower-layer" version = "0.3.2" @@ -7303,12 +6688,6 @@ version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" -[[package]] -name = "ucd-trie" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed646292ffc8188ef8ea4d1e0e0150fb15a5c2e12ad9b8fc191ae7a8a7f3c4b9" - [[package]] name = "uint" version = "0.9.5" @@ -7402,16 +6781,6 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "39ec24b3121d976906ece63c9daad25b85969647682eee313cb5779fdd69e14e" -[[package]] -name = "universal-hash" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc1de2c688dc15305988b563c3854064043356019f97a4b46276fe734c4f07ea" -dependencies = [ - "crypto-common", - "subtle", -] - [[package]] name = "unroll" version = "0.1.5" @@ -7477,7 +6846,6 @@ version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a183cf7feeba97b4dd1c0d46788634f6221d87fa961b305bed08c851829efcc0" dependencies = [ - "getrandom", "serde", ] @@ -7508,7 +6876,8 @@ checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" [[package]] name = "vise" version = "0.1.0" -source = "git+https://github.com/matter-labs/vise.git?rev=a5bb80c9ce7168663114ee30e794d6dc32159ee4#a5bb80c9ce7168663114ee30e794d6dc32159ee4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "229baafe01d5177b63c6ee1def80d8e39a2365e64caf69ddb05a57594b15647c" dependencies = [ "compile-fmt", "elsa", @@ -7521,10 +6890,10 @@ dependencies = [ [[package]] name = "vise-exporter" version = "0.1.0" -source = "git+https://github.com/matter-labs/vise.git?rev=a5bb80c9ce7168663114ee30e794d6dc32159ee4#a5bb80c9ce7168663114ee30e794d6dc32159ee4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23981b18d697026f5430249ab01ba739ef2edc463e400042394331cb2bb63494" dependencies = [ - "hyper", - "metrics-exporter-prometheus", + "hyper 0.14.29", "once_cell", "tokio", "tracing", @@ -7534,7 +6903,8 @@ dependencies = [ [[package]] name = "vise-macros" version = "0.1.0" -source = "git+https://github.com/matter-labs/vise.git?rev=a5bb80c9ce7168663114ee30e794d6dc32159ee4#a5bb80c9ce7168663114ee30e794d6dc32159ee4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8bb19c33cd5f04dcf4e767635e058a998edbc2b7fca32ade0a4a1cea0f8e9b34" dependencies = [ "proc-macro2 1.0.85", "quote 1.0.36", @@ -7547,7 +6917,7 @@ version = "0.1.0" dependencies = [ "anyhow", "bincode", - "circuit_definitions 1.5.0", + "circuit_definitions 0.150.2-rc.1", "clap 4.5.4", "hex", "indicatif", @@ -7564,35 +6934,19 @@ dependencies = [ "toml_edit 0.14.4", "tracing", "tracing-subscriber", - "vlog", - "zkevm_test_harness 1.5.0", + "zkevm_test_harness 0.150.2-rc.1", "zksync_config", "zksync_env_config", "zksync_prover_fri_types", "zksync_types", "zksync_utils", -] - -[[package]] -name = "vlog" -version = "0.1.0" -dependencies = [ - "chrono", - "opentelemetry", - "opentelemetry-otlp", - "opentelemetry-semantic-conventions", - "sentry", - "serde", - "serde_json", - "tracing", - "tracing-opentelemetry", - "tracing-subscriber", + "zksync_vlog", ] [[package]] name = "vm2" version = "0.1.0" -source = "git+https://github.com/matter-labs/vm2.git?rev=4931b489dc9e95f88a0b11bd27b110656277c5d2#4931b489dc9e95f88a0b11bd27b110656277c5d2" +source = "git+https://github.com/matter-labs/vm2.git?rev=2882a1232a695ffc1ec4b796195f7aababeb6ab2#2882a1232a695ffc1ec4b796195f7aababeb6ab2" dependencies = [ "enum_dispatch", "primitive-types", @@ -7600,21 +6954,6 @@ dependencies = [ "zkevm_opcode_defs 1.5.0", ] -[[package]] -name = "vm_utils" -version = "0.1.0" -dependencies = [ - "anyhow", - "multivm", - "tokio", - "tracing", - "zksync_contracts", - "zksync_dal", - "zksync_state", - "zksync_types", - "zksync_utils", -] - [[package]] name = "wait-timeout" version = "0.2.0" @@ -7643,12 +6982,6 @@ dependencies = [ "try-lock", ] -[[package]] -name = "wasi" -version = "0.10.2+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" - [[package]] name = "wasi" version = "0.11.0+wasi-snapshot-preview1" @@ -7980,13 +7313,13 @@ dependencies = [ ] [[package]] -name = "wrapper-prover" -version = "0.1.0" -source = "git+https://github.com/matter-labs/era-heavy-ops-service.git?rev=3d33e06#3d33e069d9d263f3a9626d235ac6dc6c49179965" +name = "winreg" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a277a57398d4bfa075df44f501a17cfdf8542d224f0d36095a2adc7aee4ef0a5" dependencies = [ - "circuit_definitions 0.1.0", - "gpu-prover", - "zkevm_test_harness 1.4.0", + "cfg-if 1.0.0", + "windows-sys 0.48.0", ] [[package]] @@ -8046,25 +7379,27 @@ dependencies = [ [[package]] name = "zk_evm" -version = "1.3.1" -source = "git+https://github.com/matter-labs/era-zk_evm.git?tag=v1.3.1-rc2#0a7c775932db4839ff6b7fb0db9bdb3583ab54c0" +version = "0.131.0-rc.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2b83ee7887fb29fda57c6b26a0f64c9b211459d718f8a26310f962e69f0b764" dependencies = [ - "blake2 0.10.6 (git+https://github.com/RustCrypto/hashes.git?rev=1f727ce37ff40fa0cce84eb8543a45bdd3ca4a4e)", + "blake2_ce", "k256 0.11.6", "lazy_static", "num", "serde", "serde_json", - "sha2 0.10.6", - "sha3 0.10.6", + "sha2_ce", + "sha3_ce", "static_assertions", - "zkevm_opcode_defs 1.3.1", + "zkevm_opcode_defs 0.131.0", ] [[package]] name = "zk_evm" -version = "1.3.3" -source = "git+https://github.com/matter-labs/era-zk_evm.git?tag=v1.3.3-rc2#fbee20f5bac7d6ca3e22ae69b2077c510a07de4e" +version = "0.133.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9af08e9284686a1b0c89ec4931eb915ac0729367f1247abd06164874fe738106" dependencies = [ "anyhow", "lazy_static", @@ -8072,14 +7407,15 @@ dependencies = [ "serde", "serde_json", "static_assertions", - "zk_evm_abstractions 0.1.0", - "zkevm_opcode_defs 1.3.2", + "zk_evm_abstractions 0.140.0", + "zkevm_opcode_defs 0.132.0", ] [[package]] name = "zk_evm" -version = "1.3.3" -source = "git+https://github.com/matter-labs/era-zk_evm.git?branch=v1.3.3#fbee20f5bac7d6ca3e22ae69b2077c510a07de4e" +version = "0.140.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "349bb8320d12578537658792df708f43c52e6330f0df071f812cb93b04ade962" dependencies = [ "anyhow", "lazy_static", @@ -8087,14 +7423,15 @@ dependencies = [ "serde", "serde_json", "static_assertions", - "zk_evm_abstractions 0.1.0", - "zkevm_opcode_defs 1.3.2", + "zk_evm_abstractions 0.140.0", + "zkevm_opcode_defs 0.132.0", ] [[package]] name = "zk_evm" -version = "1.4.0" -source = "git+https://github.com/matter-labs/era-zk_evm.git?branch=v1.4.0#dd76fc5badf2c05278a21b38015a7798fe2fe358" +version = "0.141.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8886ba5989b952b7b76096469eeb6fdfaf3369770e9e22a6f67dc4b7d65f9243" dependencies = [ "anyhow", "lazy_static", @@ -8102,14 +7439,15 @@ dependencies = [ "serde", "serde_json", "static_assertions", - "zk_evm_abstractions 0.1.0", - "zkevm_opcode_defs 1.3.2", + "zk_evm_abstractions 0.141.0", + "zkevm_opcode_defs 0.141.0", ] [[package]] name = "zk_evm" -version = "1.4.1" -source = "git+https://github.com/matter-labs/era-zk_evm.git?branch=v1.4.1#6250dbf64b2d14ced87a127735da559f27a432d5" +version = "0.150.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f5bf91304aa14827758afa3def8cf622f9a7f9fb65fe5d5099018dbacf0c5984" dependencies = [ "anyhow", "lazy_static", @@ -8117,46 +7455,46 @@ dependencies = [ "serde", "serde_json", "static_assertions", - "zk_evm_abstractions 1.4.1", - "zkevm_opcode_defs 1.4.1", + "zk_evm_abstractions 0.150.0", ] [[package]] -name = "zk_evm" -version = "1.5.0" -source = "git+https://github.com/matter-labs/era-zk_evm.git?branch=v1.5.0#0c5cdca00cca4fa0a8c49147a11048c24f8a4b12" +name = "zk_evm_abstractions" +version = "0.140.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be696258861eba4e6625a5665084b2266720bb67f4ba69819469700ac5c6a401" dependencies = [ "anyhow", - "lazy_static", - "num", + "num_enum 0.6.1", "serde", - "serde_json", "static_assertions", - "zk_evm_abstractions 1.5.0", + "zkevm_opcode_defs 0.132.0", ] [[package]] name = "zk_evm_abstractions" -version = "0.1.0" -source = "git+https://github.com/matter-labs/era-zk_evm_abstractions.git#32dd320953841aa78579d9da08abbc70bcaed175" +version = "0.141.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "637a3cb6cb475bb238bee3e450763205d36fe6c92dc1b23300655927915baf03" dependencies = [ "anyhow", "num_enum 0.6.1", "serde", "static_assertions", - "zkevm_opcode_defs 1.3.2", + "zkevm_opcode_defs 0.141.0", ] [[package]] name = "zk_evm_abstractions" -version = "1.4.1" -source = "git+https://github.com/matter-labs/era-zk_evm_abstractions.git?branch=v1.4.1#0aac08c3b097ee8147e748475117ac46bddcdcef" +version = "0.150.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc313cea4ac9ef6b855264b1425cbe9de30dd8f009559dabcb6b2896122da5db" dependencies = [ "anyhow", "num_enum 0.6.1", "serde", "static_assertions", - "zkevm_opcode_defs 1.4.1", + "zkevm_opcode_defs 0.150.0", ] [[package]] @@ -8173,8 +7511,9 @@ dependencies = [ [[package]] name = "zkevm-assembly" -version = "1.3.2" -source = "git+https://github.com/matter-labs/era-zkEVM-assembly.git?branch=v1.3.2#3c61d450cbe6548068be8f313ed02f1bd229a865" +version = "0.132.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fde7992c5cdb4edac74f6bb9cecfd5150f83eb1a7b5b27eb86aceb2b08b8d8de" dependencies = [ "env_logger 0.9.3", "hex", @@ -8187,13 +7526,14 @@ dependencies = [ "smallvec", "structopt", "thiserror", - "zkevm_opcode_defs 1.3.2", + "zkevm_opcode_defs 0.132.0", ] [[package]] name = "zkevm-assembly" -version = "1.5.0" -source = "git+https://github.com/matter-labs/era-zkEVM-assembly.git?branch=v1.5.0#48303aa435810adb12e277494e5dae3764313330" +version = "0.150.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d55e7082c5a313e46e1017d12ea5acfba9f961af3c260ff580490ce02d52067c" dependencies = [ "env_logger 0.9.3", "hex", @@ -8206,18 +7546,19 @@ dependencies = [ "smallvec", "structopt", "thiserror", - "zkevm_opcode_defs 1.5.0", + "zkevm_opcode_defs 0.150.0", ] [[package]] name = "zkevm_circuits" -version = "1.4.0" -source = "git+https://github.com/matter-labs/era-zkevm_circuits.git?branch=main#fb3e2574b5c890342518fc930c145443f039a105" +version = "0.140.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6db7061a85757529d06a9cb1c4697902bff16dfb303484499eeb5c7f20e1ac0d" dependencies = [ "arrayvec 0.7.4", "bincode", "boojum", - "cs_derive 0.1.0 (git+https://github.com/matter-labs/era-boojum.git?branch=main)", + "cs_derive", "derivative", "hex", "itertools 0.10.5", @@ -8227,18 +7568,19 @@ dependencies = [ "serde", "serde_json", "smallvec", - "zkevm_opcode_defs 1.3.2", + "zkevm_opcode_defs 0.132.0", ] [[package]] name = "zkevm_circuits" -version = "1.4.0" -source = "git+https://github.com/matter-labs/era-zkevm_circuits.git?branch=v1.4.0#fb3e2574b5c890342518fc930c145443f039a105" +version = "0.141.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "25e0f6e554b88310ad3b086e5334fbebe27154674a91c91643241b64c3d05b3a" dependencies = [ "arrayvec 0.7.4", "bincode", "boojum", - "cs_derive 0.1.0 (git+https://github.com/matter-labs/era-boojum.git?branch=main)", + "cs_derive", "derivative", "hex", "itertools 0.10.5", @@ -8248,18 +7590,18 @@ dependencies = [ "serde", "serde_json", "smallvec", - "zkevm_opcode_defs 1.3.2", + "zkevm_opcode_defs 0.141.0", ] [[package]] name = "zkevm_circuits" -version = "1.4.1" -source = "git+https://github.com/matter-labs/era-zkevm_circuits.git?branch=v1.4.1#8bf24543ffc5bafab34182388394e887ecb37d17" +version = "0.150.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4691ca0faeb666120ad48fb1a45750c5bacc90118a851f4450f3e1e903f9b2e3" dependencies = [ "arrayvec 0.7.4", - "bincode", "boojum", - "cs_derive 0.1.0 (git+https://github.com/matter-labs/era-boojum.git?branch=main)", + "cs_derive", "derivative", "hex", "itertools 0.10.5", @@ -8267,65 +7609,65 @@ dependencies = [ "rand 0.8.5", "seq-macro", "serde", - "serde_json", "smallvec", - "zkevm_opcode_defs 1.4.1", + "zkevm_opcode_defs 0.150.0", ] [[package]] -name = "zkevm_circuits" -version = "1.5.0" -source = "git+https://github.com/matter-labs/era-zkevm_circuits.git?branch=v1.5.0#b7a86c739e8a8f88e788e90893c6e7496f6d7dfc" +name = "zkevm_opcode_defs" +version = "0.131.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49e0154bd4ae8202c96c52b29dd44f944bfd08c1c233fef843744463964de957" dependencies = [ - "arrayvec 0.7.4", - "boojum", - "cs_derive 0.1.0 (git+https://github.com/matter-labs/era-boojum.git?branch=main)", - "derivative", - "hex", - "itertools 0.10.5", - "rand 0.4.6", - "rand 0.8.5", - "seq-macro", - "serde", - "smallvec", - "zkevm_opcode_defs 1.5.0", + "bitflags 1.3.2", + "ethereum-types", + "lazy_static", + "sha2 0.10.8", ] [[package]] name = "zkevm_opcode_defs" -version = "1.3.1" -source = "git+https://github.com/matter-labs/era-zkevm_opcode_defs.git?branch=v1.3.1#00d4ad2292bd55374a0fa10fe11686d7a109d8a0" +version = "0.132.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e0769f7b27d8fb06e715da3290c575cac5d04d10a557faef180e847afce50ac4" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.5.0", + "blake2 0.10.6", "ethereum-types", + "k256 0.11.6", "lazy_static", - "sha2 0.10.8", + "sha2_ce", + "sha3_ce", ] [[package]] name = "zkevm_opcode_defs" -version = "1.3.2" -source = "git+https://github.com/matter-labs/era-zkevm_opcode_defs.git?branch=v1.3.2#dffacadeccdfdbff4bc124d44c595c4a6eae5013" +version = "0.141.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6be7bd5f0e0b61211f544147289640b4712715589d7f2fe5229d92a7a3ac64c0" dependencies = [ "bitflags 2.5.0", - "blake2 0.10.6 (git+https://github.com/RustCrypto/hashes.git?rev=1f727ce37ff40fa0cce84eb8543a45bdd3ca4a4e)", + "blake2 0.10.6", "ethereum-types", - "k256 0.11.6", + "k256 0.13.3", "lazy_static", - "sha2 0.10.6", - "sha3 0.10.6", + "sha2 0.10.8", + "sha3 0.10.8", ] [[package]] name = "zkevm_opcode_defs" -version = "1.4.1" -source = "git+https://github.com/matter-labs/era-zkevm_opcode_defs.git?branch=v1.4.1#ba8228ff0582d21f64d6a319d50d0aec48e9e7b6" +version = "0.150.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b3328c012d444bdbfadb754a72c01a56879eb66584efc71eac457e89e7843608" dependencies = [ "bitflags 2.5.0", - "blake2 0.10.6 (registry+https://github.com/rust-lang/crates.io-index)", + "blake2 0.10.6", "ethereum-types", "k256 0.13.3", "lazy_static", + "p256", + "serde", "sha2 0.10.8", "sha3 0.10.8", ] @@ -8336,7 +7678,7 @@ version = "1.5.0" source = "git+https://github.com/matter-labs/era-zkevm_opcode_defs.git?branch=v1.5.0#28d2edabf902ea9b08f6a26a4506831fd89346b9" dependencies = [ "bitflags 2.5.0", - "blake2 0.10.6 (registry+https://github.com/rust-lang/crates.io-index)", + "blake2 0.10.6", "ethereum-types", "k256 0.13.3", "lazy_static", @@ -8348,83 +7690,95 @@ dependencies = [ [[package]] name = "zkevm_test_harness" -version = "1.3.3" -source = "git+https://github.com/matter-labs/era-zkevm_test_harness.git?branch=v1.3.3#aba8f2a32767b79838aca7d7d00d9d23144df32f" +version = "0.140.0-gpu-wrapper.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba6c5aaadac549dbc474a5d590d897548cb3587a119d9e48b8014cd4b6dc0bcc" dependencies = [ "bincode", - "circuit_sequencer_api 0.1.0", - "circuit_testing", - "codegen 0.2.0", + "circuit_definitions 0.140.0-gpu-wrapper.0", + "codegen", "crossbeam 0.8.4", "derivative", - "env_logger 0.11.3", + "env_logger 0.9.3", "hex", - "num-bigint 0.4.5", - "num-integer", - "num-traits", + "rand 0.4.6", "rayon", "serde", "serde_json", "smallvec", "structopt", - "sync_vm", "test-log", "tracing", - "zk_evm 1.3.3 (git+https://github.com/matter-labs/era-zk_evm.git?branch=v1.3.3)", - "zkevm-assembly 1.3.2", + "zkevm-assembly 0.132.0", ] [[package]] name = "zkevm_test_harness" -version = "1.4.0" -source = "git+https://github.com/matter-labs/era-zkevm_test_harness?branch=gpu-wrapper#ea0d54f6d5d7d3302a4a6594150a2ca809e6677b" +version = "0.150.2-rc.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fdbf14a5793a23aec1b315680b152413a477c8243b7c23a9acf743471b313e4" dependencies = [ "bincode", - "circuit_definitions 0.1.0", - "codegen 0.2.0", + "circuit_definitions 0.150.2-rc.1", + "circuit_sequencer_api 0.150.2-rc.1", + "codegen", "crossbeam 0.8.4", "derivative", - "env_logger 0.11.3", + "env_logger 0.9.3", "hex", "rand 0.4.6", "rayon", + "regex", "serde", "serde_json", "smallvec", "structopt", "test-log", "tracing", - "zkevm-assembly 1.3.2", + "zkevm-assembly 0.150.0", + "zksync_kzg", ] [[package]] -name = "zkevm_test_harness" -version = "1.5.0" -source = "git+https://github.com/matter-labs/era-zkevm_test_harness.git?branch=v1.5.0#5d6a06c37f4656d26a4170d22f2298cd7716c070" +name = "zksync-gpu-ffi" +version = "0.140.0-gpu-wrapper.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7bff4168aca5a3b1ee07abf23f7af95c48b78c50e0c8dac3a383c834eb020300" dependencies = [ - "bincode", - "circuit_definitions 1.5.0", - "circuit_sequencer_api 0.1.50", - "codegen 0.2.0", + "bindgen 0.59.2", "crossbeam 0.8.4", - "curl", "derivative", - "env_logger 0.11.3", - "hex", - "kzg", - "lazy_static", + "futures 0.3.30", + "futures-locks", + "num_cpus", +] + +[[package]] +name = "zksync-gpu-prover" +version = "0.140.0-gpu-wrapper.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e2ee87fbdf2f52de4b22bd5f1004c6323f8a524eadc571a4d5d1a16cfd9c102" +dependencies = [ + "bit-vec", + "cfg-if 1.0.0", + "crossbeam 0.8.4", + "franklin-crypto", + "itertools 0.10.5", + "num_cpus", "rand 0.4.6", - "rayon", - "regex", - "reqwest", "serde", - "serde_json", - "smallvec", - "structopt", - "test-log", - "tracing", - "walkdir", - "zkevm-assembly 1.5.0", + "zksync-gpu-ffi", +] + +[[package]] +name = "zksync-wrapper-prover" +version = "0.140.0-gpu-wrapper.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59606513a9d32195b62c775141483da0eda06181d0571e9bd537e679308156d2" +dependencies = [ + "circuit_definitions 0.140.0-gpu-wrapper.0", + "zkevm_test_harness 0.140.0-gpu-wrapper.0", + "zksync-gpu-prover", ] [[package]] @@ -8445,52 +7799,11 @@ dependencies = [ "url", ] -[[package]] -name = "zksync_circuit_breaker" -version = "0.1.0" -dependencies = [ - "anyhow", - "async-trait", - "thiserror", - "tokio", - "tracing", - "vise", - "zksync_config", - "zksync_dal", -] - -[[package]] -name = "zksync_commitment_generator" -version = "0.1.0" -dependencies = [ - "anyhow", - "circuit_sequencer_api 0.1.40", - "circuit_sequencer_api 0.1.41", - "circuit_sequencer_api 0.1.50", - "futures 0.3.30", - "itertools 0.10.5", - "multivm", - "num_cpus", - "serde_json", - "tokio", - "tracing", - "vise", - "zk_evm 1.3.3 (git+https://github.com/matter-labs/era-zk_evm.git?tag=v1.3.3-rc2)", - "zk_evm 1.4.1", - "zk_evm 1.5.0", - "zksync_contracts", - "zksync_dal", - "zksync_eth_client", - "zksync_health_check", - "zksync_l1_contract_interface", - "zksync_types", - "zksync_utils", -] - [[package]] name = "zksync_concurrency" -version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=3e6f101ee4124308c4c974caaa259d524549b0c6#3e6f101ee4124308c4c974caaa259d524549b0c6" +version = "0.1.0-rc.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50302b77192891256d180ff2551dc0c3bc4144958b49e9a16c50a0dc218958ba" dependencies = [ "anyhow", "once_cell", @@ -8513,45 +7826,29 @@ dependencies = [ "rand 0.8.5", "secrecy", "serde", + "url", "zksync_basic_types", + "zksync_concurrency", "zksync_consensus_utils", "zksync_crypto_primitives", ] [[package]] -name = "zksync_consensus_bft" -version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=3e6f101ee4124308c4c974caaa259d524549b0c6#3e6f101ee4124308c4c974caaa259d524549b0c6" -dependencies = [ - "anyhow", - "async-trait", - "once_cell", - "rand 0.8.5", - "thiserror", - "tracing", - "vise", - "zksync_concurrency", - "zksync_consensus_crypto", - "zksync_consensus_network", - "zksync_consensus_roles", - "zksync_consensus_storage", - "zksync_consensus_utils", - "zksync_protobuf", -] - -[[package]] -name = "zksync_consensus_crypto" -version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=3e6f101ee4124308c4c974caaa259d524549b0c6#3e6f101ee4124308c4c974caaa259d524549b0c6" +name = "zksync_consensus_crypto" +version = "0.1.0-rc.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f5cb8ed0d59593f6147085b77142628e459ba673aa4d48fce064d5b96e31eb36" dependencies = [ "anyhow", "blst", "ed25519-dalek", + "elliptic-curve 0.13.8", "ff_ce", "hex", + "k256 0.13.3", "num-bigint 0.4.5", "num-traits", - "pairing_ce 0.28.5 (git+https://github.com/matter-labs/pairing.git?rev=d24f2c5871089c4cd4f54c0ca266bb9fef6115eb)", + "pairing_ce", "rand 0.4.6", "rand 0.8.5", "sha3 0.10.8", @@ -8560,54 +7857,11 @@ dependencies = [ "zeroize", ] -[[package]] -name = "zksync_consensus_executor" -version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=3e6f101ee4124308c4c974caaa259d524549b0c6#3e6f101ee4124308c4c974caaa259d524549b0c6" -dependencies = [ - "anyhow", - "rand 0.8.5", - "tracing", - "vise", - "zksync_concurrency", - "zksync_consensus_bft", - "zksync_consensus_crypto", - "zksync_consensus_network", - "zksync_consensus_roles", - "zksync_consensus_storage", - "zksync_consensus_utils", - "zksync_protobuf", -] - -[[package]] -name = "zksync_consensus_network" -version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=3e6f101ee4124308c4c974caaa259d524549b0c6#3e6f101ee4124308c4c974caaa259d524549b0c6" -dependencies = [ - "anyhow", - "async-trait", - "im", - "once_cell", - "pin-project", - "prost 0.12.6", - "rand 0.8.5", - "snow", - "thiserror", - "tracing", - "vise", - "zksync_concurrency", - "zksync_consensus_crypto", - "zksync_consensus_roles", - "zksync_consensus_storage", - "zksync_consensus_utils", - "zksync_protobuf", - "zksync_protobuf_build", -] - [[package]] name = "zksync_consensus_roles" -version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=3e6f101ee4124308c4c974caaa259d524549b0c6#3e6f101ee4124308c4c974caaa259d524549b0c6" +version = "0.1.0-rc.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ffe3e47d99eb943eb94f2f5c9d929b1192bf3e8d1434de0fa6f0090f9c1197e" dependencies = [ "anyhow", "bit-vec", @@ -8627,8 +7881,9 @@ dependencies = [ [[package]] name = "zksync_consensus_storage" -version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=3e6f101ee4124308c4c974caaa259d524549b0c6#3e6f101ee4124308c4c974caaa259d524549b0c6" +version = "0.1.0-rc.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9ae9a0ec64ce9c0af346e50cc87dc257c30259101ce9675b408cb883e096087" dependencies = [ "anyhow", "async-trait", @@ -8638,6 +7893,7 @@ dependencies = [ "tracing", "vise", "zksync_concurrency", + "zksync_consensus_crypto", "zksync_consensus_roles", "zksync_protobuf", "zksync_protobuf_build", @@ -8645,31 +7901,16 @@ dependencies = [ [[package]] name = "zksync_consensus_utils" -version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=3e6f101ee4124308c4c974caaa259d524549b0c6#3e6f101ee4124308c4c974caaa259d524549b0c6" +version = "0.1.0-rc.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24dc6135abeefa80f617eb2903fe43d137d362bf673f0651b4894b17069d1fb1" dependencies = [ + "anyhow", "rand 0.8.5", "thiserror", "zksync_concurrency", ] -[[package]] -name = "zksync_contract_verification_server" -version = "0.1.0" -dependencies = [ - "anyhow", - "axum", - "serde", - "serde_json", - "tokio", - "tower-http", - "tracing", - "vise", - "zksync_config", - "zksync_dal", - "zksync_types", -] - [[package]] name = "zksync_contracts" version = "0.1.0" @@ -8688,98 +7929,15 @@ name = "zksync_core_leftovers" version = "0.1.0" dependencies = [ "anyhow", - "async-trait", - "axum", - "chrono", "ctrlc", - "dashmap", - "futures 0.3.30", - "governor", - "hex", - "itertools 0.10.5", - "lru", - "multivm", - "once_cell", - "pin-project-lite", - "prometheus_exporter", - "prost 0.12.6", - "prover_dal", - "rand 0.8.5", - "reqwest", - "secrecy", - "serde", - "serde_json", "serde_yaml", - "thiserror", - "thread_local", "tokio", - "tower", - "tower-http", - "tracing", - "vise", - "vlog", - "vm_utils", - "zksync_circuit_breaker", - "zksync_commitment_generator", - "zksync_concurrency", "zksync_config", - "zksync_consensus_bft", - "zksync_consensus_crypto", - "zksync_consensus_executor", - "zksync_consensus_network", - "zksync_consensus_roles", - "zksync_consensus_storage", - "zksync_consensus_utils", - "zksync_contract_verification_server", - "zksync_contracts", "zksync_dal", - "zksync_db_connection", - "zksync_eth_client", - "zksync_eth_sender", - "zksync_eth_signer", - "zksync_eth_watch", - "zksync_health_check", - "zksync_house_keeper", - "zksync_l1_contract_interface", - "zksync_mempool", - "zksync_merkle_tree", - "zksync_metadata_calculator", - "zksync_mini_merkle_tree", - "zksync_node_api_server", - "zksync_node_consensus", - "zksync_node_fee_model", + "zksync_env_config", "zksync_node_genesis", - "zksync_node_sync", - "zksync_object_store", - "zksync_proof_data_handler", "zksync_protobuf", - "zksync_protobuf_build", "zksync_protobuf_config", - "zksync_prover_interface", - "zksync_queued_job_processor", - "zksync_shared_metrics", - "zksync_state", - "zksync_state_keeper", - "zksync_storage", - "zksync_system_constants", - "zksync_tee_verifier", - "zksync_tee_verifier_input_producer", - "zksync_types", - "zksync_utils", - "zksync_web3_decl", -] - -[[package]] -name = "zksync_crypto" -version = "0.1.0" -dependencies = [ - "blake2 0.10.6 (registry+https://github.com/rust-lang/crates.io-index)", - "hex", - "once_cell", - "serde", - "sha2 0.10.8", - "thiserror", - "zksync_basic_types", ] [[package]] @@ -8787,11 +7945,13 @@ name = "zksync_crypto_primitives" version = "0.1.0" dependencies = [ "anyhow", + "blake2 0.10.6", "hex", "rand 0.8.5", "secp256k1", "serde", "serde_json", + "sha2 0.10.8", "thiserror", "zksync_basic_types", "zksync_utils", @@ -8873,30 +8033,6 @@ dependencies = [ "zksync_web3_decl", ] -[[package]] -name = "zksync_eth_sender" -version = "0.1.0" -dependencies = [ - "anyhow", - "async-trait", - "chrono", - "thiserror", - "tokio", - "tracing", - "vise", - "zksync_config", - "zksync_contracts", - "zksync_dal", - "zksync_eth_client", - "zksync_l1_contract_interface", - "zksync_node_fee_model", - "zksync_object_store", - "zksync_prover_interface", - "zksync_shared_metrics", - "zksync_types", - "zksync_utils", -] - [[package]] name = "zksync_eth_signer" version = "0.1.0" @@ -8907,24 +8043,6 @@ dependencies = [ "zksync_types", ] -[[package]] -name = "zksync_eth_watch" -version = "0.1.0" -dependencies = [ - "anyhow", - "async-trait", - "thiserror", - "tokio", - "tracing", - "vise", - "zksync_contracts", - "zksync_dal", - "zksync_eth_client", - "zksync_shared_metrics", - "zksync_system_constants", - "zksync_types", -] - [[package]] name = "zksync_health_check" version = "0.1.0" @@ -8940,41 +8058,20 @@ dependencies = [ ] [[package]] -name = "zksync_house_keeper" -version = "0.1.0" -dependencies = [ - "anyhow", - "async-trait", - "prover_dal", - "tokio", - "tracing", - "vise", - "zksync_config", - "zksync_dal", - "zksync_shared_metrics", - "zksync_types", -] - -[[package]] -name = "zksync_l1_contract_interface" -version = "0.1.0" +name = "zksync_kzg" +version = "0.150.2-rc.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4672556b6bc06da9dcd38a607e139b8eb3083edfaabcd12981e8a62051ee1f81" dependencies = [ - "codegen 0.1.0", + "boojum", + "derivative", "hex", - "kzg", "once_cell", - "sha2 0.10.8", - "sha3 0.10.8", - "zksync_prover_interface", - "zksync_types", -] - -[[package]] -name = "zksync_mempool" -version = "0.1.0" -dependencies = [ - "tracing", - "zksync_types", + "rayon", + "serde", + "serde_json", + "serde_with", + "zkevm_circuits 0.150.0", ] [[package]] @@ -8989,143 +8086,51 @@ dependencies = [ "thread_local", "tracing", "vise", - "zksync_crypto", + "zksync_crypto_primitives", "zksync_prover_interface", "zksync_storage", "zksync_types", "zksync_utils", ] -[[package]] -name = "zksync_metadata_calculator" -version = "0.1.0" -dependencies = [ - "anyhow", - "async-trait", - "axum", - "futures 0.3.30", - "itertools 0.10.5", - "once_cell", - "reqwest", - "serde", - "serde_json", - "thiserror", - "tokio", - "tracing", - "vise", - "zksync_config", - "zksync_crypto", - "zksync_dal", - "zksync_health_check", - "zksync_merkle_tree", - "zksync_object_store", - "zksync_shared_metrics", - "zksync_storage", - "zksync_types", - "zksync_utils", -] - [[package]] name = "zksync_mini_merkle_tree" version = "0.1.0" dependencies = [ "once_cell", "zksync_basic_types", - "zksync_crypto", + "zksync_crypto_primitives", ] [[package]] -name = "zksync_node_api_server" +name = "zksync_multivm" version = "0.1.0" dependencies = [ "anyhow", - "async-trait", - "axum", - "chrono", - "futures 0.3.30", - "governor", + "circuit_sequencer_api 0.133.0", + "circuit_sequencer_api 0.140.0", + "circuit_sequencer_api 0.141.0", + "circuit_sequencer_api 0.142.0", + "circuit_sequencer_api 0.150.2-rc.1", "hex", - "http", "itertools 0.10.5", - "lru", - "multivm", "once_cell", - "pin-project-lite", - "rand 0.8.5", + "pretty_assertions", "serde", - "serde_json", "thiserror", - "thread_local", - "tokio", - "tower", - "tower-http", "tracing", "vise", - "zksync_config", + "vm2", + "zk_evm 0.131.0-rc.2", + "zk_evm 0.133.0", + "zk_evm 0.140.0", + "zk_evm 0.141.0", + "zk_evm 0.150.0", "zksync_contracts", - "zksync_dal", - "zksync_health_check", - "zksync_metadata_calculator", - "zksync_mini_merkle_tree", - "zksync_node_fee_model", - "zksync_node_sync", - "zksync_protobuf", - "zksync_shared_metrics", - "zksync_state", - "zksync_state_keeper", - "zksync_system_constants", - "zksync_types", - "zksync_utils", - "zksync_web3_decl", -] - -[[package]] -name = "zksync_node_consensus" -version = "0.1.0" -dependencies = [ - "anyhow", - "async-trait", - "secrecy", - "tempfile", - "tracing", - "zksync_concurrency", - "zksync_config", - "zksync_consensus_bft", - "zksync_consensus_crypto", - "zksync_consensus_executor", - "zksync_consensus_network", - "zksync_consensus_roles", - "zksync_consensus_storage", - "zksync_consensus_utils", - "zksync_dal", - "zksync_l1_contract_interface", - "zksync_merkle_tree", - "zksync_metadata_calculator", - "zksync_node_sync", - "zksync_protobuf", "zksync_state", - "zksync_state_keeper", "zksync_system_constants", "zksync_types", "zksync_utils", - "zksync_web3_decl", -] - -[[package]] -name = "zksync_node_fee_model" -version = "0.1.0" -dependencies = [ - "anyhow", - "async-trait", - "tokio", - "tracing", - "vise", - "zksync_config", - "zksync_dal", - "zksync_eth_client", - "zksync_types", - "zksync_utils", - "zksync_web3_decl", ] [[package]] @@ -9134,7 +8139,6 @@ version = "0.1.0" dependencies = [ "anyhow", "itertools 0.10.5", - "multivm", "thiserror", "tokio", "tracing", @@ -9144,50 +8148,7 @@ dependencies = [ "zksync_dal", "zksync_eth_client", "zksync_merkle_tree", - "zksync_system_constants", - "zksync_types", - "zksync_utils", -] - -[[package]] -name = "zksync_node_sync" -version = "0.1.0" -dependencies = [ - "anyhow", - "async-trait", - "chrono", - "futures 0.3.30", - "serde", - "serde_json", - "thiserror", - "tokio", - "tracing", - "vise", - "vm_utils", - "zksync_concurrency", - "zksync_config", - "zksync_contracts", - "zksync_dal", - "zksync_eth_client", - "zksync_health_check", - "zksync_node_genesis", - "zksync_shared_metrics", - "zksync_state_keeper", - "zksync_system_constants", - "zksync_types", - "zksync_utils", - "zksync_web3_decl", -] - -[[package]] -name = "zksync_node_test_utils" -version = "0.1.0" -dependencies = [ - "multivm", - "zksync_contracts", - "zksync_dal", - "zksync_merkle_tree", - "zksync_node_genesis", + "zksync_multivm", "zksync_system_constants", "zksync_types", "zksync_utils", @@ -9203,10 +8164,10 @@ dependencies = [ "flate2", "google-cloud-auth", "google-cloud-storage", - "http", + "http 1.1.0", "prost 0.12.6", "rand 0.8.5", - "reqwest", + "reqwest 0.12.5", "serde_json", "tokio", "tracing", @@ -9216,22 +8177,6 @@ dependencies = [ "zksync_types", ] -[[package]] -name = "zksync_proof_data_handler" -version = "0.1.0" -dependencies = [ - "anyhow", - "axum", - "tokio", - "tracing", - "zksync_config", - "zksync_dal", - "zksync_object_store", - "zksync_prover_interface", - "zksync_tee_verifier", - "zksync_types", -] - [[package]] name = "zksync_proof_fri_compressor" version = "0.1.0" @@ -9239,12 +8184,11 @@ dependencies = [ "anyhow", "async-trait", "bincode", - "circuit_sequencer_api 0.1.50", + "circuit_sequencer_api 0.150.2-rc.1", + "clap 4.5.4", "ctrlc", "futures 0.3.30", - "prometheus_exporter", - "prover_dal", - "reqwest", + "reqwest 0.12.5", "serde", "serde_json", "structopt", @@ -9252,24 +8196,25 @@ dependencies = [ "tracing", "vise", "vk_setup_data_generator_server_fri", - "vlog", - "wrapper-prover", - "zkevm_test_harness 1.3.3", - "zkevm_test_harness 1.5.0", - "zksync_config", + "zkevm_test_harness 0.150.2-rc.1", + "zksync-wrapper-prover", + "zksync_core_leftovers", "zksync_env_config", "zksync_object_store", + "zksync_prover_dal", "zksync_prover_fri_types", "zksync_prover_interface", "zksync_queued_job_processor", "zksync_types", "zksync_utils", + "zksync_vlog", ] [[package]] name = "zksync_protobuf" -version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=3e6f101ee4124308c4c974caaa259d524549b0c6#3e6f101ee4124308c4c974caaa259d524549b0c6" +version = "0.1.0-rc.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1e7c7820f290db565a1b4ff73aa1175cd7d31498fca8d859eb5aceebd33468c" dependencies = [ "anyhow", "bit-vec", @@ -9288,8 +8233,9 @@ dependencies = [ [[package]] name = "zksync_protobuf_build" -version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=3e6f101ee4124308c4c974caaa259d524549b0c6#3e6f101ee4124308c4c974caaa259d524549b0c6" +version = "0.1.0-rc.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6cafeec1150ae91f1a37c8f0dce6b71b92b93e0c4153d32b4c37e2fd71bce2f" dependencies = [ "anyhow", "heck 0.5.0", @@ -9321,14 +8267,13 @@ dependencies = [ ] [[package]] -name = "zksync_prover_config" +name = "zksync_prover_dal" version = "0.1.0" dependencies = [ - "anyhow", - "zksync_config", - "zksync_core_leftovers", - "zksync_env_config", - "zksync_protobuf_config", + "sqlx", + "strum", + "zksync_basic_types", + "zksync_db_connection", ] [[package]] @@ -9337,30 +8282,31 @@ version = "0.1.0" dependencies = [ "anyhow", "async-trait", - "circuit_definitions 1.5.0", + "circuit_definitions 0.150.2-rc.1", + "clap 4.5.4", "ctrlc", "futures 0.3.30", "local-ip-address", - "prometheus_exporter", - "prover_dal", "regex", - "reqwest", + "reqwest 0.12.5", "serde", "shivini", "tokio", "tracing", "vise", "vk_setup_data_generator_server_fri", - "vlog", - "zkevm_test_harness 1.5.0", + "zkevm_test_harness 0.150.2-rc.1", "zksync_config", + "zksync_core_leftovers", "zksync_env_config", "zksync_object_store", + "zksync_prover_dal", "zksync_prover_fri_types", "zksync_prover_fri_utils", "zksync_queued_job_processor", "zksync_types", "zksync_utils", + "zksync_vlog", ] [[package]] @@ -9373,28 +8319,27 @@ dependencies = [ "ctrlc", "futures 0.3.30", "log", - "prometheus_exporter", - "prover_dal", - "reqwest", + "reqwest 0.12.5", "serde", "tokio", "tracing", "vise", - "vlog", "zksync_config", + "zksync_core_leftovers", "zksync_env_config", "zksync_object_store", - "zksync_prover_config", + "zksync_prover_dal", "zksync_prover_interface", "zksync_types", "zksync_utils", + "zksync_vlog", ] [[package]] name = "zksync_prover_fri_types" version = "0.1.0" dependencies = [ - "circuit_definitions 1.5.0", + "circuit_definitions 0.150.2-rc.1", "serde", "zksync_object_store", "zksync_types", @@ -9405,14 +8350,14 @@ name = "zksync_prover_fri_utils" version = "0.1.0" dependencies = [ "anyhow", - "prover_dal", "regex", - "reqwest", + "reqwest 0.12.5", "serde", "tracing", "vise", "zksync_config", "zksync_object_store", + "zksync_prover_dal", "zksync_prover_fri_types", "zksync_types", "zksync_utils", @@ -9423,11 +8368,13 @@ name = "zksync_prover_interface" version = "0.1.0" dependencies = [ "chrono", - "circuit_sequencer_api 0.1.50", + "circuit_sequencer_api 0.150.2-rc.1", "serde", "serde_with", "strum", + "zksync_multivm", "zksync_object_store", + "zksync_state", "zksync_types", ] @@ -9460,6 +8407,7 @@ version = "0.1.0" dependencies = [ "anyhow", "async-trait", + "backon", "chrono", "itertools 0.10.5", "mini-moka", @@ -9474,38 +8422,6 @@ dependencies = [ "zksync_utils", ] -[[package]] -name = "zksync_state_keeper" -version = "0.1.0" -dependencies = [ - "anyhow", - "async-trait", - "futures 0.3.30", - "hex", - "itertools 0.10.5", - "multivm", - "once_cell", - "thiserror", - "tokio", - "tracing", - "vise", - "vm_utils", - "zksync_config", - "zksync_contracts", - "zksync_dal", - "zksync_mempool", - "zksync_node_fee_model", - "zksync_node_genesis", - "zksync_node_test_utils", - "zksync_protobuf", - "zksync_shared_metrics", - "zksync_state", - "zksync_storage", - "zksync_test_account", - "zksync_types", - "zksync_utils", -] - [[package]] name = "zksync_storage" version = "0.1.0" @@ -9527,67 +8443,13 @@ dependencies = [ "zksync_utils", ] -[[package]] -name = "zksync_tee_verifier" -version = "0.1.0" -dependencies = [ - "anyhow", - "multivm", - "serde", - "tracing", - "vm_utils", - "zksync_config", - "zksync_crypto", - "zksync_dal", - "zksync_db_connection", - "zksync_merkle_tree", - "zksync_object_store", - "zksync_prover_interface", - "zksync_queued_job_processor", - "zksync_state", - "zksync_types", - "zksync_utils", -] - -[[package]] -name = "zksync_tee_verifier_input_producer" -version = "0.1.0" -dependencies = [ - "anyhow", - "async-trait", - "tokio", - "tracing", - "vise", - "vm_utils", - "zksync_dal", - "zksync_object_store", - "zksync_prover_interface", - "zksync_queued_job_processor", - "zksync_tee_verifier", - "zksync_types", - "zksync_utils", -] - -[[package]] -name = "zksync_test_account" -version = "0.1.0" -dependencies = [ - "ethabi", - "hex", - "rand 0.8.5", - "zksync_contracts", - "zksync_eth_signer", - "zksync_system_constants", - "zksync_types", - "zksync_utils", -] - [[package]] name = "zksync_types" version = "0.1.0" dependencies = [ "anyhow", - "blake2 0.10.6 (registry+https://github.com/rust-lang/crates.io-index)", + "bigdecimal", + "blake2 0.10.6", "chrono", "derive_more", "hex", @@ -9602,6 +8464,7 @@ dependencies = [ "serde_json", "strum", "thiserror", + "tracing", "zksync_basic_types", "zksync_config", "zksync_contracts", @@ -9624,15 +8487,35 @@ dependencies = [ "itertools 0.10.5", "num", "once_cell", - "reqwest", + "reqwest 0.12.5", "serde", "serde_json", "thiserror", "tokio", "tracing", - "vlog", - "zk_evm 1.3.3 (git+https://github.com/matter-labs/era-zk_evm.git?tag=v1.3.3-rc2)", + "zk_evm 0.133.0", "zksync_basic_types", + "zksync_vlog", +] + +[[package]] +name = "zksync_vlog" +version = "0.1.0" +dependencies = [ + "anyhow", + "chrono", + "opentelemetry", + "opentelemetry-otlp", + "opentelemetry-semantic-conventions", + "sentry", + "serde", + "serde_json", + "tokio", + "tracing", + "tracing-opentelemetry", + "tracing-subscriber", + "vise", + "vise-exporter", ] [[package]] @@ -9645,6 +8528,7 @@ dependencies = [ "jsonrpsee", "pin-project-lite", "rlp", + "rustls", "serde", "serde_json", "thiserror", @@ -9662,15 +8546,12 @@ dependencies = [ "anyhow", "async-trait", "bincode", - "circuit_definitions 1.5.0", + "circuit_definitions 0.150.2-rc.1", "const-decoder", "ctrlc", "futures 0.3.30", "jemallocator", - "multivm", "once_cell", - "prometheus_exporter", - "prover_dal", "rand 0.8.5", "serde", "structopt", @@ -9679,16 +8560,13 @@ dependencies = [ "tracing", "vise", "vk_setup_data_generator_server_fri", - "vlog", - "zk_evm 1.4.1", - "zkevm_test_harness 1.5.0", + "zkevm_test_harness 0.150.2-rc.1", "zksync_config", "zksync_core_leftovers", - "zksync_dal", "zksync_env_config", + "zksync_multivm", "zksync_object_store", - "zksync_protobuf_config", - "zksync_prover_config", + "zksync_prover_dal", "zksync_prover_fri_types", "zksync_prover_fri_utils", "zksync_prover_interface", @@ -9697,6 +8575,7 @@ dependencies = [ "zksync_system_constants", "zksync_types", "zksync_utils", + "zksync_vlog", ] [[package]] @@ -9706,44 +8585,23 @@ dependencies = [ "anyhow", "async-trait", "bincode", + "clap 4.5.4", "ctrlc", - "futures 0.3.30", - "prometheus_exporter", - "prover_dal", - "queues", - "serde", - "structopt", "tokio", "tracing", "vise", "vk_setup_data_generator_server_fri", - "vlog", "zksync_config", + "zksync_core_leftovers", "zksync_env_config", "zksync_object_store", + "zksync_prover_dal", "zksync_prover_fri_types", "zksync_prover_fri_utils", "zksync_queued_job_processor", "zksync_types", "zksync_utils", -] - -[[package]] -name = "zstd" -version = "0.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d789b1514203a1120ad2429eae43a7bd32b90976a7bb8a05f7ec02fa88cc23a" -dependencies = [ - "zstd-safe", -] - -[[package]] -name = "zstd-safe" -version = "7.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cd99b45c6bc03a018c8b8a86025678c87e55526064e38f9df301989dce7ec0a" -dependencies = [ - "zstd-sys", + "zksync_vlog", ] [[package]] diff --git a/prover/Cargo.toml b/prover/Cargo.toml index 87021c27a7fb..c06c0774639a 100644 --- a/prover/Cargo.toml +++ b/prover/Cargo.toml @@ -1,19 +1,7 @@ [workspace] members = [ - # lib - "prover_fri_utils", - "prover_fri_types", - "prover_dal", - # binaries - "witness_generator", - "vk_setup_data_generator_server_fri", - "prover_fri", - "witness_vector_generator", - "prover_fri_gateway", - "proof_fri_compressor", - "prover_cli", - "prover_version", - "config", + "crates/bin/*", + "crates/lib/*", ] resolver = "2" @@ -29,11 +17,11 @@ keywords = ["blockchain", "zksync"] categories = ["cryptography"] [workspace.dependencies] +# Common dependencies anyhow = "1.0" async-trait = "0.1" bincode = "1" -circuit_definitions = { git = "https://github.com/matter-labs/era-zkevm_test_harness.git", branch = "v1.5.0" } -circuit_sequencer_api = { package = "circuit_sequencer_api", git = "https://github.com/matter-labs/era-zkevm_test_harness.git", branch = "v1.5.0" } +chrono = "0.4.38" clap = "4.4.6" colored = "2.0" const-decoder = "0.3.0" @@ -47,20 +35,16 @@ jemallocator = "0.5" local-ip-address = "0.5.0" log = "0.4.20" md5 = "0.7.0" -multivm = { path = "../core/lib/multivm", version = "0.1.0" } once_cell = "1.18" -prometheus_exporter = { path = "../core/lib/prometheus_exporter" } proptest = "1.2.0" -prover_dal = { path = "prover_dal" } queues = "1.1.0" rand = "0.8" regex = "1.10.4" -reqwest = "0.11" +reqwest = "0.12" serde = "1.0" serde_derive = "1.0" serde_json = "1.0" sha3 = "0.10.8" -shivini = { git = "https://github.com/matter-labs/era-shivini.git", branch = "v1.5.0" } sqlx = { version = "0.7.3", default-features = false } structopt = "0.3.26" strum = { version = "0.24" } @@ -69,21 +53,26 @@ tokio = "1" toml_edit = "0.14.4" tracing = "0.1" tracing-subscriber = { version = "0.3" } -vise = { git = "https://github.com/matter-labs/vise.git", version = "0.1.0", rev = "a5bb80c9ce7168663114ee30e794d6dc32159ee4" } -vk_setup_data_generator_server_fri = { path = "vk_setup_data_generator_server_fri" } -zksync_prover_config = { path = "config" } -vlog = { path = "../core/lib/vlog" } -zk_evm = { git = "https://github.com/matter-labs/era-zk_evm.git", branch = "v1.4.1" } -zkevm_test_harness = { git = "https://github.com/matter-labs/era-zkevm_test_harness.git", branch = "v1.5.0" } -zkevm_test_harness_1_3_3 = { git = "https://github.com/matter-labs/era-zkevm_test_harness.git", branch = "v1.3.3", package = "zkevm_test_harness" } +vise = "0.1.0" + +# Proving dependencies +circuit_definitions = "=0.150.2-rc.1" +circuit_sequencer_api = "=0.150.2-rc.1" +zkevm_test_harness = "=0.150.2-rc.1" + +# GPU proving dependencies +wrapper_prover = { package = "zksync-wrapper-prover", version = "=0.140.0-gpu-wrapper.0" } +shivini = "=0.150.2-rc.1" + +# Core workspace dependencies +zksync_multivm = { path = "../core/lib/multivm", version = "0.1.0" } +zksync_vlog = { path = "../core/lib/vlog" } zksync_basic_types = { path = "../core/lib/basic_types" } zksync_config = { path = "../core/lib/config" } zksync_dal = { path = "../core/lib/dal" } zksync_db_connection = { path = "../core/lib/db_connection" } zksync_env_config = { path = "../core/lib/env_config" } zksync_object_store = { path = "../core/lib/object_store" } -zksync_prover_fri_types = { path = "prover_fri_types" } -zksync_prover_fri_utils = { path = "prover_fri_utils" } zksync_prover_interface = { path = "../core/lib/prover_interface" } zksync_queued_job_processor = { path = "../core/lib/queued_job_processor" } zksync_state = { path = "../core/lib/state" } @@ -93,10 +82,12 @@ zksync_utils = { path = "../core/lib/utils" } zksync_eth_client = { path = "../core/lib/eth_client" } zksync_contracts = { path = "../core/lib/contracts" } zksync_core_leftovers = { path = "../core/lib/zksync_core_leftovers" } -zksync_protobuf_config = { path = "../core/lib/protobuf_config" } - -wrapper_prover = { package = "wrapper-prover", git = "https://github.com/matter-labs/era-heavy-ops-service.git", rev = "3d33e06" } +# Prover workspace dependencies +zksync_prover_dal = { path = "crates/lib/prover_dal" } +zksync_prover_fri_types = { path = "crates/lib/prover_fri_types" } +zksync_prover_fri_utils = { path = "crates/lib/prover_fri_utils" } +vk_setup_data_generator_server_fri = { path = "crates/bin/vk_setup_data_generator_server_fri" } # for `perf` profiling [profile.perf] diff --git a/prover/config/src/lib.rs b/prover/config/src/lib.rs deleted file mode 100644 index f501dd2d6e06..000000000000 --- a/prover/config/src/lib.rs +++ /dev/null @@ -1,79 +0,0 @@ -use anyhow::Context; -use zksync_config::{ - configs::{ - api::{HealthCheckConfig, MerkleTreeApiConfig, Web3JsonRpcConfig}, - chain::{ - CircuitBreakerConfig, MempoolConfig, NetworkConfig, OperationsManagerConfig, - StateKeeperConfig, - }, - fri_prover_group::FriProverGroupConfig, - house_keeper::HouseKeeperConfig, - DatabaseSecrets, FriProofCompressorConfig, FriProverConfig, FriProverGatewayConfig, - FriWitnessGeneratorConfig, FriWitnessVectorGeneratorConfig, GeneralConfig, - ObjectStoreConfig, ObservabilityConfig, PrometheusConfig, ProofDataHandlerConfig, - ProtectiveReadsWriterConfig, - }, - ApiConfig, ContractVerifierConfig, DBConfig, EthConfig, EthWatchConfig, GasAdjusterConfig, - PostgresConfig, SnapshotsCreatorConfig, -}; -use zksync_core_leftovers::temp_config_store::{decode_yaml_repr, TempConfigStore}; -use zksync_env_config::FromEnv; -use zksync_protobuf_config::proto::secrets::Secrets; - -fn load_env_config() -> anyhow::Result { - Ok(TempConfigStore { - postgres_config: PostgresConfig::from_env().ok(), - health_check_config: HealthCheckConfig::from_env().ok(), - merkle_tree_api_config: MerkleTreeApiConfig::from_env().ok(), - web3_json_rpc_config: Web3JsonRpcConfig::from_env().ok(), - circuit_breaker_config: CircuitBreakerConfig::from_env().ok(), - mempool_config: MempoolConfig::from_env().ok(), - network_config: NetworkConfig::from_env().ok(), - contract_verifier: ContractVerifierConfig::from_env().ok(), - operations_manager_config: OperationsManagerConfig::from_env().ok(), - state_keeper_config: StateKeeperConfig::from_env().ok(), - house_keeper_config: HouseKeeperConfig::from_env().ok(), - fri_proof_compressor_config: FriProofCompressorConfig::from_env().ok(), - fri_prover_config: FriProverConfig::from_env().ok(), - fri_prover_group_config: FriProverGroupConfig::from_env().ok(), - fri_prover_gateway_config: FriProverGatewayConfig::from_env().ok(), - fri_witness_vector_generator: FriWitnessVectorGeneratorConfig::from_env().ok(), - fri_witness_generator_config: FriWitnessGeneratorConfig::from_env().ok(), - prometheus_config: PrometheusConfig::from_env().ok(), - proof_data_handler_config: ProofDataHandlerConfig::from_env().ok(), - api_config: ApiConfig::from_env().ok(), - db_config: DBConfig::from_env().ok(), - eth_sender_config: EthConfig::from_env().ok(), - eth_watch_config: EthWatchConfig::from_env().ok(), - gas_adjuster_config: GasAdjusterConfig::from_env().ok(), - observability: ObservabilityConfig::from_env().ok(), - snapshot_creator: SnapshotsCreatorConfig::from_env().ok(), - protective_reads_writer_config: ProtectiveReadsWriterConfig::from_env().ok(), - core_object_store: ObjectStoreConfig::from_env().ok(), - }) -} - -pub fn load_general_config(path: Option) -> anyhow::Result { - match path { - Some(path) => { - let yaml = std::fs::read_to_string(path).context("Failed to read general config")?; - decode_yaml_repr::(&yaml) - } - None => Ok(load_env_config() - .context("general config from env")? - .general()), - } -} - -pub fn load_database_secrets(path: Option) -> anyhow::Result { - match path { - Some(path) => { - let yaml = std::fs::read_to_string(path).context("Failed to read secrets")?; - let secrets = decode_yaml_repr::(&yaml).context("Failed to parse secrets")?; - Ok(secrets - .database - .context("failed to parse database secrets")?) - } - None => DatabaseSecrets::from_env(), - } -} diff --git a/prover/proof_fri_compressor/Cargo.toml b/prover/crates/bin/proof_fri_compressor/Cargo.toml similarity index 87% rename from prover/proof_fri_compressor/Cargo.toml rename to prover/crates/bin/proof_fri_compressor/Cargo.toml index dd1aad902da3..0c01a40874f2 100644 --- a/prover/proof_fri_compressor/Cargo.toml +++ b/prover/crates/bin/proof_fri_compressor/Cargo.toml @@ -12,19 +12,17 @@ categories.workspace = true [dependencies] vise.workspace = true zksync_types.workspace = true -prover_dal.workspace = true -zksync_config.workspace = true +zksync_prover_dal.workspace = true zksync_env_config.workspace = true zksync_object_store.workspace = true zksync_prover_interface.workspace = true zksync_utils.workspace = true -prometheus_exporter.workspace = true +zksync_core_leftovers.workspace = true zksync_prover_fri_types.workspace = true zksync_queued_job_processor.workspace = true vk_setup_data_generator_server_fri.workspace = true -vlog.workspace = true +zksync_vlog.workspace = true -zkevm_test_harness_1_3_3.workspace = true circuit_sequencer_api.workspace = true zkevm_test_harness.workspace = true @@ -34,6 +32,7 @@ structopt.workspace = true tokio = { workspace = true, features = ["time", "macros"] } futures = { workspace = true, features = ["compat"] } ctrlc = { workspace = true, features = ["termination"] } +clap = { workspace = true, features = ["derive"] } async-trait.workspace = true bincode.workspace = true reqwest = { workspace = true, features = ["blocking"] } @@ -42,5 +41,6 @@ serde = { workspace = true, features = ["derive"] } wrapper_prover = { workspace = true, optional = true } [features] +default = [] gpu = ["wrapper_prover"] diff --git a/prover/proof_fri_compressor/README.md b/prover/crates/bin/proof_fri_compressor/README.md similarity index 100% rename from prover/proof_fri_compressor/README.md rename to prover/crates/bin/proof_fri_compressor/README.md diff --git a/prover/proof_fri_compressor/src/compressor.rs b/prover/crates/bin/proof_fri_compressor/src/compressor.rs similarity index 79% rename from prover/proof_fri_compressor/src/compressor.rs rename to prover/crates/bin/proof_fri_compressor/src/compressor.rs index aba03a61497b..0d9083a57c5c 100644 --- a/prover/proof_fri_compressor/src/compressor.rs +++ b/prover/crates/bin/proof_fri_compressor/src/compressor.rs @@ -3,7 +3,6 @@ use std::{sync::Arc, time::Instant}; use anyhow::Context as _; use async_trait::async_trait; use circuit_sequencer_api::proof::FinalProof; -use prover_dal::{ConnectionPool, Prover, ProverDal}; use tokio::task::JoinHandle; #[cfg(feature = "gpu")] use wrapper_prover::{Bn256, GPUWrapperConfigs, WrapperProver, DEFAULT_WRAPPER_CONFIG}; @@ -11,18 +10,8 @@ use wrapper_prover::{Bn256, GPUWrapperConfigs, WrapperProver, DEFAULT_WRAPPER_CO use zkevm_test_harness::proof_wrapper_utils::WrapperConfig; #[allow(unused_imports)] use zkevm_test_harness::proof_wrapper_utils::{get_trusted_setup, wrap_proof}; -#[cfg(not(feature = "gpu"))] -use zkevm_test_harness_1_3_3::bellman::bn256::Bn256; -use zkevm_test_harness_1_3_3::{ - abstract_zksync_circuit::concrete_circuits::{ - ZkSyncCircuit, ZkSyncProof, ZkSyncVerificationKey, - }, - bellman::plonk::better_better_cs::{ - proof::Proof, setup::VerificationKey as SnarkVerificationKey, - }, - witness::oracle::VmWitnessOracle, -}; use zksync_object_store::ObjectStore; +use zksync_prover_dal::{ConnectionPool, Prover, ProverDal}; use zksync_prover_fri_types::{ circuit_definitions::{ boojum::field::goldilocks::GoldilocksField, @@ -44,7 +33,6 @@ pub struct ProofCompressor { blob_store: Arc, pool: ConnectionPool, compression_mode: u8, - verify_wrapper_proof: bool, max_attempts: u32, protocol_version: ProtocolSemanticVersion, } @@ -54,7 +42,6 @@ impl ProofCompressor { blob_store: Arc, pool: ConnectionPool, compression_mode: u8, - verify_wrapper_proof: bool, max_attempts: u32, protocol_version: ProtocolSemanticVersion, ) -> Self { @@ -62,37 +49,14 @@ impl ProofCompressor { blob_store, pool, compression_mode, - verify_wrapper_proof, max_attempts, protocol_version, } } - fn verify_proof(keystore: Keystore, serialized_proof: Vec) -> anyhow::Result<()> { - let proof: Proof>> = - bincode::deserialize(&serialized_proof) - .expect("Failed to deserialize proof with ZkSyncCircuit"); - // We're fetching the key as String and deserializing it here - // as we don't want to include the old version of prover in the main libraries. - let existing_vk_serialized = keystore - .load_snark_verification_key() - .context("get_snark_vk()")?; - let existing_vk = serde_json::from_str::< - SnarkVerificationKey>>, - >(&existing_vk_serialized)?; - - let vk = ZkSyncVerificationKey::from_verification_key_and_numeric_type(0, existing_vk); - let scheduler_proof = ZkSyncProof::from_proof_and_numeric_type(0, proof.clone()); - match vk.verify_proof(&scheduler_proof) { - true => tracing::info!("Compressed proof verified successfully"), - false => anyhow::bail!("Compressed proof verification failed "), - } - Ok(()) - } pub fn compress_proof( proof: ZkSyncRecursionLayerProof, _compression_mode: u8, - verify_wrapper_proof: bool, ) -> anyhow::Result { let keystore = Keystore::default(); let scheduler_vk = keystore @@ -126,12 +90,6 @@ impl ProofCompressor { let serialized = bincode::serialize(&wrapper_proof) .expect("Failed to serialize proof with ZkSyncSnarkWrapperCircuit"); - if verify_wrapper_proof { - // If we want to verify the proof, we have to deserialize it, with proper type. - // So that we can pass it into `from_proof_and_numeric_type` method below. - Self::verify_proof(keystore, serialized.clone())?; - } - // For sending to L1, we can use the `FinalProof` type, that has a generic circuit inside, that is not used for serialization. // So `FinalProof` and `Proof>>` are compatible on serialization bytecode level. let final_proof: FinalProof = @@ -213,11 +171,10 @@ impl JobProcessor for ProofCompressor { _started_at: Instant, ) -> JoinHandle> { let compression_mode = self.compression_mode; - let verify_wrapper_proof = self.verify_wrapper_proof; let block_number = *job_id; tokio::task::spawn_blocking(move || { let _span = tracing::info_span!("compress", %block_number).entered(); - Self::compress_proof(job, compression_mode, verify_wrapper_proof) + Self::compress_proof(job, compression_mode) }) } diff --git a/prover/proof_fri_compressor/src/initial_setup_keys.rs b/prover/crates/bin/proof_fri_compressor/src/initial_setup_keys.rs similarity index 100% rename from prover/proof_fri_compressor/src/initial_setup_keys.rs rename to prover/crates/bin/proof_fri_compressor/src/initial_setup_keys.rs diff --git a/prover/proof_fri_compressor/src/main.rs b/prover/crates/bin/proof_fri_compressor/src/main.rs similarity index 69% rename from prover/proof_fri_compressor/src/main.rs rename to prover/crates/bin/proof_fri_compressor/src/main.rs index 61b72d790f0a..8be498be5e00 100644 --- a/prover/proof_fri_compressor/src/main.rs +++ b/prover/crates/bin/proof_fri_compressor/src/main.rs @@ -1,18 +1,19 @@ +#![allow(incomplete_features)] // We have to use generic const exprs. #![feature(generic_const_exprs)] use std::{env, time::Duration}; use anyhow::Context as _; -use prometheus_exporter::PrometheusExporterConfig; -use prover_dal::{ConnectionPool, Prover}; -use structopt::StructOpt; +use clap::Parser; use tokio::sync::{oneshot, watch}; -use zksync_config::configs::{DatabaseSecrets, FriProofCompressorConfig, ObservabilityConfig}; -use zksync_env_config::{object_store::ProverObjectStoreConfig, FromEnv}; +use zksync_core_leftovers::temp_config_store::{load_database_secrets, load_general_config}; +use zksync_env_config::object_store::ProverObjectStoreConfig; use zksync_object_store::ObjectStoreFactory; +use zksync_prover_dal::{ConnectionPool, Prover}; use zksync_prover_fri_types::PROVER_PROTOCOL_SEMANTIC_VERSION; use zksync_queued_job_processor::JobProcessor; use zksync_utils::wait_for_tasks::ManagedTasks; +use zksync_vlog::prometheus::PrometheusExporterConfig; use crate::{ compressor::ProofCompressor, initial_setup_keys::download_initial_setup_keys_if_not_present, @@ -22,27 +23,36 @@ mod compressor; mod initial_setup_keys; mod metrics; -#[derive(Debug, StructOpt)] -#[structopt( - name = "zksync_proof_fri_compressor", - about = "Tool for compressing FRI proofs to old bellman proof" -)] -struct Opt { +#[derive(Debug, Parser)] +#[command(author = "Matter Labs", version)] +struct Cli { /// Number of times proof fri compressor should be run. - #[structopt(short = "n", long = "n_iterations")] + #[arg(long = "n_iterations")] + #[arg(short)] number_of_iterations: Option, + #[arg(long)] + pub(crate) config_path: Option, + #[arg(long)] + pub(crate) secrets_path: Option, } #[tokio::main] async fn main() -> anyhow::Result<()> { - let observability_config = - ObservabilityConfig::from_env().context("ObservabilityConfig::from_env()")?; - let log_format: vlog::LogFormat = observability_config + let opt = Cli::parse(); + + let general_config = load_general_config(opt.config_path).context("general config")?; + let database_secrets = load_database_secrets(opt.secrets_path).context("database secrets")?; + + let observability_config = general_config + .observability + .expect("observability config") + .clone(); + let log_format: zksync_vlog::LogFormat = observability_config .log_format .parse() .context("Invalid log format")?; - let mut builder = vlog::ObservabilityBuilder::new().with_log_format(log_format); + let mut builder = zksync_vlog::ObservabilityBuilder::new().with_log_format(log_format); if let Some(sentry_url) = &observability_config.sentry_url { builder = builder .with_sentry_url(sentry_url) @@ -60,15 +70,20 @@ async fn main() -> anyhow::Result<()> { } let _guard = builder.build(); - let opt = Opt::from_args(); - let config = FriProofCompressorConfig::from_env().context("FriProofCompressorConfig")?; - let database_secrets = DatabaseSecrets::from_env().context("PostgresConfig::from_env()")?; + let config = general_config + .proof_compressor_config + .context("FriProofCompressorConfig")?; let pool = ConnectionPool::::singleton(database_secrets.prover_url()?) .build() .await .context("failed to build a connection pool")?; - let object_store_config = - ProverObjectStoreConfig::from_env().context("ProverObjectStoreConfig::from_env()")?; + let object_store_config = ProverObjectStoreConfig( + general_config + .prover_config + .expect("ProverConfig") + .prover_object_store + .context("ProverObjectStoreConfig")?, + ); let blob_store = ObjectStoreFactory::new(object_store_config.0) .create_store() .await?; @@ -79,7 +94,6 @@ async fn main() -> anyhow::Result<()> { blob_store, pool, config.compression_mode, - config.verify_wrapper_proof, config.max_attempts, protocol_version, ); diff --git a/prover/proof_fri_compressor/src/metrics.rs b/prover/crates/bin/proof_fri_compressor/src/metrics.rs similarity index 100% rename from prover/proof_fri_compressor/src/metrics.rs rename to prover/crates/bin/proof_fri_compressor/src/metrics.rs diff --git a/prover/prover_cli/Cargo.toml b/prover/crates/bin/prover_cli/Cargo.toml similarity index 92% rename from prover/prover_cli/Cargo.toml rename to prover/crates/bin/prover_cli/Cargo.toml index cca26f76113a..f91cd47e0945 100644 --- a/prover/prover_cli/Cargo.toml +++ b/prover/crates/bin/prover_cli/Cargo.toml @@ -25,7 +25,7 @@ zksync_basic_types.workspace = true zksync_types.workspace = true zksync_prover_fri_types.workspace = true zksync_prover_interface.workspace = true -prover_dal.workspace = true +zksync_prover_dal.workspace = true zksync_eth_client.workspace = true zksync_contracts.workspace = true zksync_dal.workspace = true @@ -36,7 +36,8 @@ sqlx.workspace = true circuit_definitions.workspace = true serde_json.workspace = true zkevm_test_harness = { workspace = true, optional = true, features = ["verbose_circuits"] } +chrono.workspace = true [features] # enable verbose circuits, if you want to use debug_circuit command (as it is quite heavy dependency). -verbose_circuits = ["zkevm_test_harness"] \ No newline at end of file +verbose_circuits = ["zkevm_test_harness"] diff --git a/prover/prover_cli/README.md b/prover/crates/bin/prover_cli/README.md similarity index 99% rename from prover/prover_cli/README.md rename to prover/crates/bin/prover_cli/README.md index 053744914b97..6a9091aef25e 100644 --- a/prover/prover_cli/README.md +++ b/prover/crates/bin/prover_cli/README.md @@ -6,7 +6,7 @@ CLI tool for performing maintenance of a ZKsync Prover ``` git clone git@github.com:matter-labs/zksync-era.git -cargo install --path prover/prover_cli/ +cargo install -p prover_cli ``` > This should be `cargo install zksync-prover-cli` or something similar ideally. diff --git a/prover/prover_cli/src/cli.rs b/prover/crates/bin/prover_cli/src/cli.rs similarity index 84% rename from prover/prover_cli/src/cli.rs rename to prover/crates/bin/prover_cli/src/cli.rs index 57422a448881..7174830f44d1 100644 --- a/prover/prover_cli/src/cli.rs +++ b/prover/crates/bin/prover_cli/src/cli.rs @@ -1,12 +1,12 @@ use clap::{command, Args, Parser, Subcommand}; use zksync_types::url::SensitiveUrl; -use crate::commands::{self, config, debug_proof, delete, get_file_info, requeue, restart}; +use crate::commands::{self, config, debug_proof, delete, get_file_info, requeue, restart, stats}; pub const VERSION_STRING: &str = env!("CARGO_PKG_VERSION"); #[derive(Parser)] -#[command(name="prover-cli", version=VERSION_STRING, about, long_about = None)] +#[command(name = "prover-cli", version = VERSION_STRING, about, long_about = None)] struct ProverCLI { #[command(subcommand)] command: ProverCommand, @@ -35,6 +35,8 @@ enum ProverCommand { Status(commands::StatusCommand), Requeue(requeue::Args), Restart(restart::Args), + #[command(about = "Displays L1 Batch proving stats for a given period")] + Stats(stats::Options), } pub async fn start() -> anyhow::Result<()> { @@ -47,6 +49,7 @@ pub async fn start() -> anyhow::Result<()> { ProverCommand::Requeue(args) => requeue::run(args, config).await?, ProverCommand::Restart(args) => restart::run(args).await?, ProverCommand::DebugProof(args) => debug_proof::run(args).await?, + ProverCommand::Stats(args) => stats::run(args, config).await?, }; Ok(()) diff --git a/prover/prover_cli/src/commands/config.rs b/prover/crates/bin/prover_cli/src/commands/config.rs similarity index 100% rename from prover/prover_cli/src/commands/config.rs rename to prover/crates/bin/prover_cli/src/commands/config.rs diff --git a/prover/prover_cli/src/commands/debug_proof.rs b/prover/crates/bin/prover_cli/src/commands/debug_proof.rs similarity index 100% rename from prover/prover_cli/src/commands/debug_proof.rs rename to prover/crates/bin/prover_cli/src/commands/debug_proof.rs diff --git a/prover/prover_cli/src/commands/delete.rs b/prover/crates/bin/prover_cli/src/commands/delete.rs similarity index 97% rename from prover/prover_cli/src/commands/delete.rs rename to prover/crates/bin/prover_cli/src/commands/delete.rs index 7df869b1311b..436bb10e10cb 100644 --- a/prover/prover_cli/src/commands/delete.rs +++ b/prover/crates/bin/prover_cli/src/commands/delete.rs @@ -1,7 +1,7 @@ use anyhow::Context; use clap::Args as ClapArgs; use dialoguer::{theme::ColorfulTheme, Input}; -use prover_dal::{Connection, ConnectionPool, Prover, ProverDal}; +use zksync_prover_dal::{Connection, ConnectionPool, Prover, ProverDal}; use zksync_types::L1BatchNumber; use crate::cli::ProverCLIConfig; diff --git a/prover/prover_cli/src/commands/get_file_info.rs b/prover/crates/bin/prover_cli/src/commands/get_file_info.rs similarity index 100% rename from prover/prover_cli/src/commands/get_file_info.rs rename to prover/crates/bin/prover_cli/src/commands/get_file_info.rs diff --git a/prover/prover_cli/src/commands/mod.rs b/prover/crates/bin/prover_cli/src/commands/mod.rs similarity index 90% rename from prover/prover_cli/src/commands/mod.rs rename to prover/crates/bin/prover_cli/src/commands/mod.rs index ec58554da508..4bc8b2eb392a 100644 --- a/prover/prover_cli/src/commands/mod.rs +++ b/prover/crates/bin/prover_cli/src/commands/mod.rs @@ -1,8 +1,9 @@ +pub(crate) use status::StatusCommand; pub(crate) mod config; pub(crate) mod debug_proof; pub(crate) mod delete; pub(crate) mod get_file_info; pub(crate) mod requeue; pub(crate) mod restart; +pub(crate) mod stats; pub(crate) mod status; -pub(crate) use status::StatusCommand; diff --git a/prover/prover_cli/src/commands/requeue.rs b/prover/crates/bin/prover_cli/src/commands/requeue.rs similarity index 98% rename from prover/prover_cli/src/commands/requeue.rs rename to prover/crates/bin/prover_cli/src/commands/requeue.rs index d529aebcc162..a9d967be5ba9 100644 --- a/prover/prover_cli/src/commands/requeue.rs +++ b/prover/crates/bin/prover_cli/src/commands/requeue.rs @@ -1,6 +1,6 @@ use anyhow::Context; use clap::Args as ClapArgs; -use prover_dal::{ConnectionPool, Prover, ProverDal}; +use zksync_prover_dal::{ConnectionPool, Prover, ProverDal}; use zksync_types::{basic_fri_types::AggregationRound, prover_dal::StuckJobs, L1BatchNumber}; use crate::cli::ProverCLIConfig; diff --git a/prover/prover_cli/src/commands/restart.rs b/prover/crates/bin/prover_cli/src/commands/restart.rs similarity index 99% rename from prover/prover_cli/src/commands/restart.rs rename to prover/crates/bin/prover_cli/src/commands/restart.rs index 3b9a99c7431f..75beafd7100c 100644 --- a/prover/prover_cli/src/commands/restart.rs +++ b/prover/crates/bin/prover_cli/src/commands/restart.rs @@ -1,10 +1,10 @@ use anyhow::Context; use clap::Args as ClapArgs; -use prover_dal::{ - fri_witness_generator_dal::FriWitnessJobStatus, Connection, ConnectionPool, Prover, ProverDal, -}; use zksync_config::configs::DatabaseSecrets; use zksync_env_config::FromEnv; +use zksync_prover_dal::{ + fri_witness_generator_dal::FriWitnessJobStatus, Connection, ConnectionPool, Prover, ProverDal, +}; use zksync_types::{basic_fri_types::AggregationRound, L1BatchNumber}; #[derive(ClapArgs)] diff --git a/prover/crates/bin/prover_cli/src/commands/stats.rs b/prover/crates/bin/prover_cli/src/commands/stats.rs new file mode 100644 index 000000000000..307775fa27d3 --- /dev/null +++ b/prover/crates/bin/prover_cli/src/commands/stats.rs @@ -0,0 +1,63 @@ +use anyhow::Context; +use chrono::{self, NaiveTime}; +use clap::{Args, ValueEnum}; +use zksync_basic_types::prover_dal::ProofGenerationTime; +use zksync_db_connection::connection_pool::ConnectionPool; +use zksync_prover_dal::{Prover, ProverDal}; + +use crate::cli::ProverCLIConfig; + +#[derive(ValueEnum, Clone)] +enum StatsPeriod { + Day, + Week, +} + +#[derive(Args)] +pub(crate) struct Options { + #[clap( + short = 'p', + long = "period", + help = "Specify the time frame to look for stats", + default_value = "day" + )] + period: StatsPeriod, +} + +pub(crate) async fn run(opts: Options, config: ProverCLIConfig) -> anyhow::Result<()> { + let prover_connection_pool = ConnectionPool::::singleton(config.db_url) + .build() + .await + .context("failed to build a prover_connection_pool")?; + let mut conn = prover_connection_pool + .connection() + .await + .context("failed to get connection from pool")?; + + let start_date = match opts.period { + StatsPeriod::Day => chrono::offset::Local::now().date_naive(), + StatsPeriod::Week => { + (chrono::offset::Local::now() - chrono::Duration::days(7)).date_naive() + } + }; + let start_date = + start_date.and_time(NaiveTime::from_num_seconds_from_midnight_opt(0, 0).unwrap()); + let proof_generation_times = conn + .fri_witness_generator_dal() + .get_proof_generation_times_for_time_frame(start_date) + .await?; + display_proof_generation_time(proof_generation_times); + Ok(()) +} + +fn display_proof_generation_time(proof_generation_times: Vec) { + println!("Batch\tTime Taken\t\tCreated At"); + for proof_generation_time in proof_generation_times { + println!( + "{}\t{:?}\t\t{}", + proof_generation_time.l1_batch_number, + proof_generation_time.time_taken, + proof_generation_time.created_at + ); + } +} diff --git a/prover/prover_cli/src/commands/status/batch.rs b/prover/crates/bin/prover_cli/src/commands/status/batch.rs similarity index 99% rename from prover/prover_cli/src/commands/status/batch.rs rename to prover/crates/bin/prover_cli/src/commands/status/batch.rs index dc23bf046683..84a8e7184a65 100644 --- a/prover/prover_cli/src/commands/status/batch.rs +++ b/prover/crates/bin/prover_cli/src/commands/status/batch.rs @@ -4,7 +4,7 @@ use anyhow::Context as _; use circuit_definitions::zkevm_circuits::scheduler::aux::BaseLayerCircuitType; use clap::Args as ClapArgs; use colored::*; -use prover_dal::{Connection, ConnectionPool, Prover, ProverDal}; +use zksync_prover_dal::{Connection, ConnectionPool, Prover, ProverDal}; use zksync_types::{ basic_fri_types::AggregationRound, prover_dal::{ diff --git a/prover/prover_cli/src/commands/status/l1.rs b/prover/crates/bin/prover_cli/src/commands/status/l1.rs similarity index 99% rename from prover/prover_cli/src/commands/status/l1.rs rename to prover/crates/bin/prover_cli/src/commands/status/l1.rs index d02e545a4178..128005c309c6 100644 --- a/prover/prover_cli/src/commands/status/l1.rs +++ b/prover/crates/bin/prover_cli/src/commands/status/l1.rs @@ -1,5 +1,4 @@ use anyhow::Context; -use prover_dal::{Prover, ProverDal}; use zksync_basic_types::{ protocol_version::{L1VerifierConfig, VerifierParams}, L1BatchNumber, H256, U256, @@ -14,6 +13,7 @@ use zksync_eth_client::{ clients::{Client, L1}, CallFunctionArgs, }; +use zksync_prover_dal::{Prover, ProverDal}; use crate::helper; diff --git a/prover/prover_cli/src/commands/status/mod.rs b/prover/crates/bin/prover_cli/src/commands/status/mod.rs similarity index 100% rename from prover/prover_cli/src/commands/status/mod.rs rename to prover/crates/bin/prover_cli/src/commands/status/mod.rs diff --git a/prover/prover_cli/src/commands/status/utils.rs b/prover/crates/bin/prover_cli/src/commands/status/utils.rs similarity index 100% rename from prover/prover_cli/src/commands/status/utils.rs rename to prover/crates/bin/prover_cli/src/commands/status/utils.rs diff --git a/prover/prover_cli/src/config/mod.rs b/prover/crates/bin/prover_cli/src/config/mod.rs similarity index 100% rename from prover/prover_cli/src/config/mod.rs rename to prover/crates/bin/prover_cli/src/config/mod.rs diff --git a/prover/prover_cli/src/examples/pliconfig b/prover/crates/bin/prover_cli/src/examples/pliconfig similarity index 100% rename from prover/prover_cli/src/examples/pliconfig rename to prover/crates/bin/prover_cli/src/examples/pliconfig diff --git a/prover/prover_cli/src/helper.rs b/prover/crates/bin/prover_cli/src/helper.rs similarity index 100% rename from prover/prover_cli/src/helper.rs rename to prover/crates/bin/prover_cli/src/helper.rs diff --git a/prover/prover_cli/src/lib.rs b/prover/crates/bin/prover_cli/src/lib.rs similarity index 100% rename from prover/prover_cli/src/lib.rs rename to prover/crates/bin/prover_cli/src/lib.rs diff --git a/prover/prover_cli/src/main.rs b/prover/crates/bin/prover_cli/src/main.rs similarity index 100% rename from prover/prover_cli/src/main.rs rename to prover/crates/bin/prover_cli/src/main.rs diff --git a/prover/prover_fri/Cargo.toml b/prover/crates/bin/prover_fri/Cargo.toml similarity index 89% rename from prover/prover_fri/Cargo.toml rename to prover/crates/bin/prover_fri/Cargo.toml index 5b618c928ed4..4f343e8c4e91 100644 --- a/prover/prover_fri/Cargo.toml +++ b/prover/crates/bin/prover_fri/Cargo.toml @@ -12,14 +12,14 @@ categories.workspace = true [dependencies] vise.workspace = true zksync_types.workspace = true -prover_dal.workspace = true +zksync_prover_dal.workspace = true zksync_config.workspace = true zksync_env_config.workspace = true -prometheus_exporter.workspace = true -vlog.workspace = true +zksync_vlog.workspace = true zksync_object_store.workspace = true zksync_queued_job_processor.workspace = true zksync_prover_fri_utils.workspace = true +zksync_core_leftovers.workspace = true zksync_prover_fri_types.workspace = true zksync_utils.workspace = true vk_setup_data_generator_server_fri.workspace = true @@ -41,6 +41,7 @@ async-trait.workspace = true local-ip-address.workspace = true reqwest = { workspace = true, features = ["blocking"] } regex.workspace = true +clap = { workspace = true, features = ["derive"] } [features] default = [] diff --git a/prover/prover_fri/README.md b/prover/crates/bin/prover_fri/README.md similarity index 97% rename from prover/prover_fri/README.md rename to prover/crates/bin/prover_fri/README.md index 5f0a26cfdd49..141b058172f7 100644 --- a/prover/prover_fri/README.md +++ b/prover/crates/bin/prover_fri/README.md @@ -55,7 +55,7 @@ installation as a pre-requisite, alongside these machine specs: 2. Run the server. In the root of the repository: ```console - zk server --components=api,eth,tree,state_keeper,housekeeper,commitment_generator,proof_data_handler + zk server --components=api,eth,tree,state_keeper,housekeeper,commitment_generator,proof_data_handler,vm_runner_bwip ``` Note that it will produce a first l1 batch that can be proven (should be batch 0). @@ -176,8 +176,8 @@ There is an option to run compressors with the GPU, which will significantly imp 2. Install and compile `era-bellman-cuda` library ```console - git clone https://github.com/matter-labs/bellman-cuda.git --branch dev bellman-cuda - cmake -Bbellman-cuda/build -Sbellman-cuda/ -DCMAKE_BUILD_TYPE=Release + git clone https://github.com/matter-labs/era-bellman-cuda + cmake -Bera-bellman-cuda/build -Sera-bellman-cuda/ -DCMAKE_BUILD_TYPE=Release cmake --build bellman-cuda/build/ ``` @@ -202,7 +202,7 @@ There is an option to run compressors with the GPU, which will significantly imp 6. Run the compressor using: ```console - zk f cargo run ---features "gpu" --release --bin zksync_proof_fri_compressor + zk f cargo run --features "gpu" --release --bin zksync_proof_fri_compressor ``` ## Checking the status of the prover diff --git a/prover/prover_fri/src/gpu_prover_availability_checker.rs b/prover/crates/bin/prover_fri/src/gpu_prover_availability_checker.rs similarity index 94% rename from prover/prover_fri/src/gpu_prover_availability_checker.rs rename to prover/crates/bin/prover_fri/src/gpu_prover_availability_checker.rs index 027c7a4b07a8..6e154ba553a9 100644 --- a/prover/prover_fri/src/gpu_prover_availability_checker.rs +++ b/prover/crates/bin/prover_fri/src/gpu_prover_availability_checker.rs @@ -2,8 +2,9 @@ pub mod availability_checker { use std::{sync::Arc, time::Duration}; - use prover_dal::{ConnectionPool, Prover, ProverDal}; use tokio::sync::Notify; + use zksync_prover_dal::{ConnectionPool, Prover, ProverDal}; + use zksync_prover_fri_utils::region_fetcher::Zone; use zksync_types::prover_dal::{GpuProverInstanceStatus, SocketAddress}; use crate::metrics::{KillingReason, METRICS}; @@ -12,7 +13,7 @@ pub mod availability_checker { /// If the prover instance is not found in the database or marked as dead, the availability checker will shut down the prover. pub struct AvailabilityChecker { address: SocketAddress, - zone: String, + zone: Zone, polling_interval: Duration, pool: ConnectionPool, } @@ -20,7 +21,7 @@ pub mod availability_checker { impl AvailabilityChecker { pub fn new( address: SocketAddress, - zone: String, + zone: Zone, polling_interval_secs: u32, pool: ConnectionPool, ) -> Self { @@ -46,7 +47,7 @@ pub mod availability_checker { .await .unwrap() .fri_gpu_prover_queue_dal() - .get_prover_instance_status(self.address.clone(), self.zone.clone()) + .get_prover_instance_status(self.address.clone(), self.zone.to_string()) .await; // If the prover instance is not found in the database or marked as dead, we should shut down the prover diff --git a/prover/prover_fri/src/gpu_prover_job_processor.rs b/prover/crates/bin/prover_fri/src/gpu_prover_job_processor.rs similarity index 97% rename from prover/prover_fri/src/gpu_prover_job_processor.rs rename to prover/crates/bin/prover_fri/src/gpu_prover_job_processor.rs index 09493627bca4..6148ca3e0aed 100644 --- a/prover/prover_fri/src/gpu_prover_job_processor.rs +++ b/prover/crates/bin/prover_fri/src/gpu_prover_job_processor.rs @@ -3,7 +3,6 @@ pub mod gpu_prover { use std::{collections::HashMap, sync::Arc, time::Instant}; use anyhow::Context as _; - use prover_dal::{ConnectionPool, ProverDal}; use shivini::{ gpu_proof_config::GpuProofConfig, gpu_prove_from_external_witness_data, ProverContext, }; @@ -11,6 +10,7 @@ pub mod gpu_prover { use zksync_config::configs::{fri_prover_group::FriProverGroupConfig, FriProverConfig}; use zksync_env_config::FromEnv; use zksync_object_store::ObjectStore; + use zksync_prover_dal::{ConnectionPool, ProverDal}; use zksync_prover_fri_types::{ circuit_definitions::{ base_layer_proof_config, @@ -28,6 +28,7 @@ pub mod gpu_prover { }, CircuitWrapper, FriProofWrapper, ProverServiceDataKey, WitnessVectorArtifacts, }; + use zksync_prover_fri_utils::region_fetcher::Zone; use zksync_queued_job_processor::{async_trait, JobProcessor}; use zksync_types::{ basic_fri_types::CircuitIdRoundTuple, protocol_version::ProtocolSemanticVersion, @@ -56,7 +57,7 @@ pub mod gpu_prover { blob_store: Arc, public_blob_store: Option>, config: Arc, - prover_connection_pool: ConnectionPool, + prover_connection_pool: ConnectionPool, setup_load_mode: SetupLoadMode, // Only pick jobs for the configured circuit id and aggregation rounds. // Empty means all jobs are picked. @@ -64,7 +65,7 @@ pub mod gpu_prover { witness_vector_queue: SharedWitnessVectorQueue, prover_context: ProverContext, address: SocketAddress, - zone: String, + zone: Zone, protocol_version: ProtocolSemanticVersion, } @@ -74,12 +75,12 @@ pub mod gpu_prover { blob_store: Arc, public_blob_store: Option>, config: FriProverConfig, - prover_connection_pool: ConnectionPool, + prover_connection_pool: ConnectionPool, setup_load_mode: SetupLoadMode, circuit_ids_for_round_to_be_proven: Vec, witness_vector_queue: SharedWitnessVectorQueue, address: SocketAddress, - zone: String, + zone: Zone, protocol_version: ProtocolSemanticVersion, ) -> Self { Prover { @@ -230,7 +231,7 @@ pub mod gpu_prover { .fri_gpu_prover_queue_dal() .update_prover_instance_from_full_to_available( self.address.clone(), - self.zone.clone(), + self.zone.to_string(), ) .await; } diff --git a/prover/prover_fri/src/lib.rs b/prover/crates/bin/prover_fri/src/lib.rs similarity index 56% rename from prover/prover_fri/src/lib.rs rename to prover/crates/bin/prover_fri/src/lib.rs index 8d57083ebd36..39757795d980 100644 --- a/prover/prover_fri/src/lib.rs +++ b/prover/crates/bin/prover_fri/src/lib.rs @@ -1,3 +1,4 @@ +#![allow(incomplete_features)] // We have to use generic const exprs. #![feature(generic_const_exprs)] mod metrics; pub mod prover_job_processor; diff --git a/prover/prover_fri/src/main.rs b/prover/crates/bin/prover_fri/src/main.rs similarity index 79% rename from prover/prover_fri/src/main.rs rename to prover/crates/bin/prover_fri/src/main.rs index 86fd114fa12e..e4b2fd5a6709 100644 --- a/prover/prover_fri/src/main.rs +++ b/prover/crates/bin/prover_fri/src/main.rs @@ -1,31 +1,32 @@ +#![allow(incomplete_features)] // We have to use generic const exprs. #![feature(generic_const_exprs)] use std::{future::Future, sync::Arc, time::Duration}; use anyhow::Context as _; +use clap::Parser; use local_ip_address::local_ip; -use prometheus_exporter::PrometheusExporterConfig; -use prover_dal::{ConnectionPool, Prover, ProverDal}; use tokio::{ sync::{oneshot, watch::Receiver, Notify}, task::JoinHandle, }; -use zksync_config::configs::{ - fri_prover_group::FriProverGroupConfig, DatabaseSecrets, FriProverConfig, ObservabilityConfig, -}; -use zksync_env_config::{ - object_store::{ProverObjectStoreConfig, PublicObjectStoreConfig}, - FromEnv, -}; +use zksync_config::configs::{DatabaseSecrets, FriProverConfig}; +use zksync_core_leftovers::temp_config_store::{load_database_secrets, load_general_config}; +use zksync_env_config::FromEnv; use zksync_object_store::{ObjectStore, ObjectStoreFactory}; +use zksync_prover_dal::{ConnectionPool, Prover, ProverDal}; use zksync_prover_fri_types::PROVER_PROTOCOL_SEMANTIC_VERSION; -use zksync_prover_fri_utils::{get_all_circuit_id_round_tuples_for, region_fetcher::get_zone}; +use zksync_prover_fri_utils::{ + get_all_circuit_id_round_tuples_for, + region_fetcher::{RegionFetcher, Zone}, +}; use zksync_queued_job_processor::JobProcessor; use zksync_types::{ basic_fri_types::CircuitIdRoundTuple, prover_dal::{GpuProverInstanceStatus, SocketAddress}, }; use zksync_utils::wait_for_tasks::ManagedTasks; +use zksync_vlog::prometheus::PrometheusExporterConfig; mod gpu_prover_availability_checker; mod gpu_prover_job_processor; @@ -34,38 +35,40 @@ mod prover_job_processor; mod socket_listener; mod utils; -async fn graceful_shutdown(port: u16) -> anyhow::Result> { +async fn graceful_shutdown(zone: Zone, port: u16) -> anyhow::Result> { let database_secrets = DatabaseSecrets::from_env().context("DatabaseSecrets::from_env()")?; let pool = ConnectionPool::::singleton(database_secrets.prover_url()?) .build() .await .context("failed to build a connection pool")?; let host = local_ip().context("Failed obtaining local IP address")?; - let zone_url = &FriProverConfig::from_env() - .context("FriProverConfig::from_env()")? - .zone_read_url; - let zone = get_zone(zone_url).await.context("get_zone()")?; let address = SocketAddress { host, port }; Ok(async move { pool.connection() .await .unwrap() .fri_gpu_prover_queue_dal() - .update_prover_instance_status(address, GpuProverInstanceStatus::Dead, zone) + .update_prover_instance_status(address, GpuProverInstanceStatus::Dead, zone.to_string()) .await }) } #[tokio::main] async fn main() -> anyhow::Result<()> { - let observability_config = - ObservabilityConfig::from_env().context("ObservabilityConfig::from_env()")?; - let log_format: vlog::LogFormat = observability_config + let opt = Cli::parse(); + + let general_config = load_general_config(opt.config_path).context("general config")?; + let database_secrets = load_database_secrets(opt.secrets_path).context("database secrets")?; + + let observability_config = general_config + .observability + .context("observability config")?; + let log_format: zksync_vlog::LogFormat = observability_config .log_format .parse() .context("Invalid log format")?; - let mut builder = vlog::ObservabilityBuilder::new().with_log_format(log_format); + let mut builder = zksync_vlog::ObservabilityBuilder::new().with_log_format(log_format); if let Some(sentry_url) = &observability_config.sentry_url { builder = builder .with_sentry_url(sentry_url) @@ -91,7 +94,7 @@ async fn main() -> anyhow::Result<()> { tracing::info!("No sentry URL was provided"); } - let prover_config = FriProverConfig::from_env().context("FriProverConfig::from_env()")?; + let prover_config = general_config.prover_config.context("fri_prover config")?; let exporter_config = PrometheusExporterConfig::pull(prover_config.prometheus_port); let (stop_signal_sender, stop_signal_receiver) = oneshot::channel(); @@ -103,24 +106,36 @@ async fn main() -> anyhow::Result<()> { }) .context("Error setting Ctrl+C handler")?; + let zone = RegionFetcher::new( + prover_config.cloud_type, + prover_config.zone_read_url.clone(), + ) + .get_zone() + .await?; + let (stop_sender, stop_receiver) = tokio::sync::watch::channel(false); - let object_store_config = - ProverObjectStoreConfig::from_env().context("ProverObjectStoreConfig::from_env()")?; - let object_store_factory = ObjectStoreFactory::new(object_store_config.0); - let public_object_store_config = - PublicObjectStoreConfig::from_env().context("PublicObjectStoreConfig::from_env()")?; + let prover_object_store_config = prover_config + .prover_object_store + .clone() + .context("prover object store config")?; + let object_store_factory = ObjectStoreFactory::new(prover_object_store_config); + let public_object_store_config = prover_config + .public_object_store + .clone() + .context("public object store config")?; let public_blob_store = match prover_config.shall_save_to_public_bucket { false => None, true => Some( - ObjectStoreFactory::new(public_object_store_config.0) + ObjectStoreFactory::new(public_object_store_config) .create_store() .await?, ), }; let specialized_group_id = prover_config.specialized_group_id; - let circuit_ids_for_round_to_be_proven = FriProverGroupConfig::from_env() - .context("FriProverGroupConfig::from_env()")? + let circuit_ids_for_round_to_be_proven = general_config + .prover_group_config + .context("prover group config")? .get_circuit_ids_for_group_id(specialized_group_id) .unwrap_or_default(); let circuit_ids_for_round_to_be_proven = @@ -131,7 +146,6 @@ async fn main() -> anyhow::Result<()> { specialized_group_id, circuit_ids_for_round_to_be_proven.clone() ); - let database_secrets = DatabaseSecrets::from_env().context("DatabaseSecrets")?; // There are 2 threads using the connection pool: // 1. The prover thread, which is used to update the prover job status. @@ -148,6 +162,7 @@ async fn main() -> anyhow::Result<()> { let prover_tasks = get_prover_tasks( prover_config, + zone.clone(), stop_receiver.clone(), object_store_factory, public_blob_store, @@ -166,7 +181,7 @@ async fn main() -> anyhow::Result<()> { tokio::select! { _ = tasks.wait_single() => { if cfg!(feature = "gpu") { - graceful_shutdown(port) + graceful_shutdown(zone, port) .await .context("failed to prepare graceful shutdown future")? .await; @@ -186,6 +201,7 @@ async fn main() -> anyhow::Result<()> { #[cfg(not(feature = "gpu"))] async fn get_prover_tasks( prover_config: FriProverConfig, + _zone: Zone, stop_receiver: Receiver, store_factory: ObjectStoreFactory, public_blob_store: Option>, @@ -220,6 +236,7 @@ async fn get_prover_tasks( #[cfg(feature = "gpu")] async fn get_prover_tasks( prover_config: FriProverConfig, + zone: Zone, stop_receiver: Receiver, store_factory: ObjectStoreFactory, public_blob_store: Option>, @@ -238,9 +255,6 @@ async fn get_prover_tasks( let shared_witness_vector_queue = Arc::new(Mutex::new(witness_vector_queue)); let consumer = shared_witness_vector_queue.clone(); - let zone = get_zone(&prover_config.zone_read_url) - .await - .context("get_zone()")?; let local_ip = local_ip().context("Failed obtaining local IP address")?; let address = SocketAddress { host: local_ip, @@ -302,3 +316,12 @@ async fn get_prover_tasks( Ok(tasks) } + +#[derive(Debug, Parser)] +#[command(author = "Matter Labs", version)] +pub(crate) struct Cli { + #[arg(long)] + pub(crate) config_path: Option, + #[arg(long)] + pub(crate) secrets_path: Option, +} diff --git a/prover/prover_fri/src/metrics.rs b/prover/crates/bin/prover_fri/src/metrics.rs similarity index 100% rename from prover/prover_fri/src/metrics.rs rename to prover/crates/bin/prover_fri/src/metrics.rs diff --git a/prover/prover_fri/src/prover_job_processor.rs b/prover/crates/bin/prover_fri/src/prover_job_processor.rs similarity index 98% rename from prover/prover_fri/src/prover_job_processor.rs rename to prover/crates/bin/prover_fri/src/prover_job_processor.rs index 8cdfc91247fa..f06f1bbab939 100644 --- a/prover/prover_fri/src/prover_job_processor.rs +++ b/prover/crates/bin/prover_fri/src/prover_job_processor.rs @@ -1,12 +1,12 @@ use std::{collections::HashMap, sync::Arc, time::Instant}; use anyhow::Context as _; -use prover_dal::{ConnectionPool, ProverDal}; use tokio::task::JoinHandle; use zkevm_test_harness::prover_utils::{prove_base_layer_circuit, prove_recursion_layer_circuit}; use zksync_config::configs::{fri_prover_group::FriProverGroupConfig, FriProverConfig}; use zksync_env_config::FromEnv; use zksync_object_store::ObjectStore; +use zksync_prover_dal::{ConnectionPool, ProverDal}; use zksync_prover_fri_types::{ circuit_definitions::{ base_layer_proof_config, @@ -43,7 +43,7 @@ pub struct Prover { blob_store: Arc, public_blob_store: Option>, config: Arc, - prover_connection_pool: ConnectionPool, + prover_connection_pool: ConnectionPool, setup_load_mode: SetupLoadMode, // Only pick jobs for the configured circuit id and aggregation rounds. // Empty means all jobs are picked. @@ -57,7 +57,7 @@ impl Prover { blob_store: Arc, public_blob_store: Option>, config: FriProverConfig, - prover_connection_pool: ConnectionPool, + prover_connection_pool: ConnectionPool, setup_load_mode: SetupLoadMode, circuit_ids_for_round_to_be_proven: Vec, protocol_version: ProtocolSemanticVersion, diff --git a/prover/prover_fri/src/socket_listener.rs b/prover/crates/bin/prover_fri/src/socket_listener.rs similarity index 94% rename from prover/prover_fri/src/socket_listener.rs rename to prover/crates/bin/prover_fri/src/socket_listener.rs index e034b1fd9276..e65471409e1e 100644 --- a/prover/prover_fri/src/socket_listener.rs +++ b/prover/crates/bin/prover_fri/src/socket_listener.rs @@ -3,14 +3,15 @@ pub mod gpu_socket_listener { use std::{net::SocketAddr, sync::Arc, time::Instant}; use anyhow::Context as _; - use prover_dal::{ConnectionPool, Prover, ProverDal}; use tokio::{ io::copy, net::{TcpListener, TcpStream}, sync::{watch, Notify}, }; use zksync_object_store::bincode; + use zksync_prover_dal::{ConnectionPool, Prover, ProverDal}; use zksync_prover_fri_types::WitnessVectorArtifacts; + use zksync_prover_fri_utils::region_fetcher::Zone; use zksync_types::{ protocol_version::ProtocolSemanticVersion, prover_dal::{GpuProverInstanceStatus, SocketAddress}, @@ -26,7 +27,7 @@ pub mod gpu_socket_listener { queue: SharedWitnessVectorQueue, pool: ConnectionPool, specialized_prover_group_id: u8, - zone: String, + zone: Zone, protocol_version: ProtocolSemanticVersion, } @@ -36,7 +37,7 @@ pub mod gpu_socket_listener { queue: SharedWitnessVectorQueue, pool: ConnectionPool, specialized_prover_group_id: u8, - zone: String, + zone: Zone, protocol_version: ProtocolSemanticVersion, ) -> Self { Self { @@ -68,7 +69,7 @@ pub mod gpu_socket_listener { .insert_prover_instance( self.address.clone(), self.specialized_prover_group_id, - self.zone.clone(), + self.zone.to_string(), self.protocol_version, ) .await; @@ -85,7 +86,7 @@ pub mod gpu_socket_listener { let mut now = Instant::now(); loop { if *stop_receiver.borrow() { - tracing::warn!("Stop signal received, shutting down socket listener"); + tracing::info!("Stop signal received, shutting down socket listener"); return Ok(()); } let stream = listener @@ -154,7 +155,7 @@ pub mod gpu_socket_listener { .await .unwrap() .fri_gpu_prover_queue_dal() - .update_prover_instance_status(self.address.clone(), status, self.zone.clone()) + .update_prover_instance_status(self.address.clone(), status, self.zone.to_string()) .await; tracing::info!( "Marked prover as {:?} after {:?}", diff --git a/prover/prover_fri/src/utils.rs b/prover/crates/bin/prover_fri/src/utils.rs similarity index 99% rename from prover/prover_fri/src/utils.rs rename to prover/crates/bin/prover_fri/src/utils.rs index e52b66ed983f..15a2a6c18bb2 100644 --- a/prover/prover_fri/src/utils.rs +++ b/prover/crates/bin/prover_fri/src/utils.rs @@ -2,10 +2,10 @@ use std::{sync::Arc, time::Instant}; -use prover_dal::{Connection, Prover, ProverDal}; use tokio::sync::Mutex; use zkevm_test_harness::prover_utils::{verify_base_layer_proof, verify_recursion_layer_proof}; use zksync_object_store::ObjectStore; +use zksync_prover_dal::{Connection, Prover, ProverDal}; use zksync_prover_fri_types::{ circuit_definitions::{ boojum::{ diff --git a/prover/prover_fri/tests/basic_test.rs b/prover/crates/bin/prover_fri/tests/basic_test.rs similarity index 100% rename from prover/prover_fri/tests/basic_test.rs rename to prover/crates/bin/prover_fri/tests/basic_test.rs diff --git a/prover/prover_fri/tests/data/proofs_fri/proof_1293714.bin b/prover/crates/bin/prover_fri/tests/data/proofs_fri/proof_1293714.bin similarity index 100% rename from prover/prover_fri/tests/data/proofs_fri/proof_1293714.bin rename to prover/crates/bin/prover_fri/tests/data/proofs_fri/proof_1293714.bin diff --git a/prover/prover_fri/tests/data/proofs_fri/proof_5176866.bin b/prover/crates/bin/prover_fri/tests/data/proofs_fri/proof_5176866.bin similarity index 100% rename from prover/prover_fri/tests/data/proofs_fri/proof_5176866.bin rename to prover/crates/bin/prover_fri/tests/data/proofs_fri/proof_5176866.bin diff --git a/prover/prover_fri/tests/data/prover_jobs_fri/114499_479_6_BasicCircuits_0.bin b/prover/crates/bin/prover_fri/tests/data/prover_jobs_fri/114499_479_6_BasicCircuits_0.bin similarity index 100% rename from prover/prover_fri/tests/data/prover_jobs_fri/114499_479_6_BasicCircuits_0.bin rename to prover/crates/bin/prover_fri/tests/data/prover_jobs_fri/114499_479_6_BasicCircuits_0.bin diff --git a/prover/prover_fri/tests/data/prover_jobs_fri/128623_1086_1_BasicCircuits_0.bin b/prover/crates/bin/prover_fri/tests/data/prover_jobs_fri/128623_1086_1_BasicCircuits_0.bin similarity index 100% rename from prover/prover_fri/tests/data/prover_jobs_fri/128623_1086_1_BasicCircuits_0.bin rename to prover/crates/bin/prover_fri/tests/data/prover_jobs_fri/128623_1086_1_BasicCircuits_0.bin diff --git a/prover/prover_fri_gateway/Cargo.toml b/prover/crates/bin/prover_fri_gateway/Cargo.toml similarity index 88% rename from prover/prover_fri_gateway/Cargo.toml rename to prover/crates/bin/prover_fri_gateway/Cargo.toml index 6a98bd8f0067..6dd54d5d677d 100644 --- a/prover/prover_fri_gateway/Cargo.toml +++ b/prover/crates/bin/prover_fri_gateway/Cargo.toml @@ -12,15 +12,14 @@ categories.workspace = true [dependencies] vise.workspace = true zksync_types.workspace = true -prover_dal.workspace = true +zksync_prover_dal.workspace = true zksync_config.workspace = true zksync_env_config.workspace = true +zksync_core_leftovers.workspace = true zksync_object_store.workspace = true zksync_prover_interface.workspace = true -zksync_prover_config.workspace = true zksync_utils.workspace = true -prometheus_exporter.workspace = true -vlog.workspace = true +zksync_vlog.workspace = true anyhow.workspace = true tracing.workspace = true diff --git a/prover/prover_fri_gateway/README.md b/prover/crates/bin/prover_fri_gateway/README.md similarity index 100% rename from prover/prover_fri_gateway/README.md rename to prover/crates/bin/prover_fri_gateway/README.md diff --git a/prover/crates/bin/prover_fri_gateway/src/client.rs b/prover/crates/bin/prover_fri_gateway/src/client.rs new file mode 100644 index 000000000000..5f1ad79ef36f --- /dev/null +++ b/prover/crates/bin/prover_fri_gateway/src/client.rs @@ -0,0 +1,51 @@ +use std::sync::Arc; + +use serde::{de::DeserializeOwned, Serialize}; +use zksync_object_store::ObjectStore; +use zksync_prover_dal::{ConnectionPool, Prover}; + +/// A tiny wrapper over the reqwest client that also stores +/// the objects commonly needed when interacting with prover API. +#[derive(Debug)] +pub(crate) struct ProverApiClient { + pub(crate) blob_store: Arc, + pub(crate) pool: ConnectionPool, + pub(crate) api_url: String, + pub(crate) client: reqwest::Client, +} + +impl ProverApiClient { + pub(crate) fn new( + blob_store: Arc, + pool: ConnectionPool, + api_url: String, + ) -> Self { + Self { + blob_store, + pool, + api_url, + client: reqwest::Client::new(), + } + } + + pub(crate) async fn send_http_request( + &self, + request: Req, + endpoint: &str, + ) -> Result + where + Req: Serialize, + Resp: DeserializeOwned, + { + tracing::info!("Sending request to {}", endpoint); + + self.client + .post(endpoint) + .json(&request) + .send() + .await? + .error_for_status()? + .json::() + .await + } +} diff --git a/prover/prover_fri_gateway/src/main.rs b/prover/crates/bin/prover_fri_gateway/src/main.rs similarity index 69% rename from prover/prover_fri_gateway/src/main.rs rename to prover/crates/bin/prover_fri_gateway/src/main.rs index f7e7af763afb..c204fb7395f2 100644 --- a/prover/prover_fri_gateway/src/main.rs +++ b/prover/crates/bin/prover_fri_gateway/src/main.rs @@ -2,22 +2,22 @@ use std::time::Duration; use anyhow::Context as _; use clap::Parser; -use prometheus_exporter::PrometheusExporterConfig; -use prover_dal::{ConnectionPool, Prover}; -use reqwest::Client; +use proof_gen_data_fetcher::ProofGenDataFetcher; +use proof_submitter::ProofSubmitter; use tokio::sync::{oneshot, watch}; +use traits::PeriodicApi as _; +use zksync_core_leftovers::temp_config_store::{load_database_secrets, load_general_config}; use zksync_env_config::object_store::ProverObjectStoreConfig; use zksync_object_store::ObjectStoreFactory; -use zksync_prover_config::{load_database_secrets, load_general_config}; -use zksync_prover_interface::api::{ProofGenerationDataRequest, SubmitProofRequest}; +use zksync_prover_dal::{ConnectionPool, Prover}; use zksync_utils::wait_for_tasks::ManagedTasks; +use zksync_vlog::prometheus::PrometheusExporterConfig; -use crate::api_data_fetcher::{PeriodicApiStruct, PROOF_GENERATION_DATA_PATH, SUBMIT_PROOF_PATH}; - -mod api_data_fetcher; +mod client; mod metrics; mod proof_gen_data_fetcher; mod proof_submitter; +mod traits; #[tokio::main] async fn main() -> anyhow::Result<()> { @@ -30,12 +30,12 @@ async fn main() -> anyhow::Result<()> { .observability .context("observability config")?; - let log_format: vlog::LogFormat = observability_config + let log_format: zksync_vlog::LogFormat = observability_config .log_format .parse() .context("Invalid log format")?; - let mut builder = vlog::ObservabilityBuilder::new().with_log_format(log_format); + let mut builder = zksync_vlog::ObservabilityBuilder::new().with_log_format(log_format); if let Some(sentry_url) = &observability_config.sentry_url { builder = builder .with_sentry_url(sentry_url) @@ -65,20 +65,16 @@ async fn main() -> anyhow::Result<()> { ); let store_factory = ObjectStoreFactory::new(object_store_config.0); - let proof_submitter = PeriodicApiStruct { - blob_store: store_factory.create_store().await?, - pool: pool.clone(), - api_url: format!("{}{SUBMIT_PROOF_PATH}", config.api_url), - poll_duration: config.api_poll_duration(), - client: Client::new(), - }; - let proof_gen_data_fetcher = PeriodicApiStruct { - blob_store: store_factory.create_store().await?, + let proof_submitter = ProofSubmitter::new( + store_factory.create_store().await?, + config.api_url.clone(), + pool.clone(), + ); + let proof_gen_data_fetcher = ProofGenDataFetcher::new( + store_factory.create_store().await?, + config.api_url.clone(), pool, - api_url: format!("{}{PROOF_GENERATION_DATA_PATH}", config.api_url), - poll_duration: config.api_poll_duration(), - client: Client::new(), - }; + ); let (stop_sender, stop_receiver) = watch::channel(false); @@ -98,10 +94,8 @@ async fn main() -> anyhow::Result<()> { PrometheusExporterConfig::pull(config.prometheus_listener_port) .run(stop_receiver.clone()), ), - tokio::spawn( - proof_gen_data_fetcher.run::(stop_receiver.clone()), - ), - tokio::spawn(proof_submitter.run::(stop_receiver)), + tokio::spawn(proof_gen_data_fetcher.run(config.api_poll_duration(), stop_receiver.clone())), + tokio::spawn(proof_submitter.run(config.api_poll_duration(), stop_receiver)), ]; let mut tasks = ManagedTasks::new(tasks); diff --git a/prover/prover_fri_gateway/src/metrics.rs b/prover/crates/bin/prover_fri_gateway/src/metrics.rs similarity index 100% rename from prover/prover_fri_gateway/src/metrics.rs rename to prover/crates/bin/prover_fri_gateway/src/metrics.rs diff --git a/prover/prover_fri_gateway/src/proof_gen_data_fetcher.rs b/prover/crates/bin/prover_fri_gateway/src/proof_gen_data_fetcher.rs similarity index 54% rename from prover/prover_fri_gateway/src/proof_gen_data_fetcher.rs rename to prover/crates/bin/prover_fri_gateway/src/proof_gen_data_fetcher.rs index 3973ff0eea1d..809df8ae8225 100644 --- a/prover/prover_fri_gateway/src/proof_gen_data_fetcher.rs +++ b/prover/crates/bin/prover_fri_gateway/src/proof_gen_data_fetcher.rs @@ -1,38 +1,59 @@ +use std::sync::Arc; + use async_trait::async_trait; -use prover_dal::ProverDal; +use zksync_object_store::ObjectStore; +use zksync_prover_dal::{ConnectionPool, Prover, ProverDal}; use zksync_prover_interface::api::{ ProofGenerationData, ProofGenerationDataRequest, ProofGenerationDataResponse, }; -use crate::api_data_fetcher::{PeriodicApi, PeriodicApiStruct}; +use crate::{client::ProverApiClient, traits::PeriodicApi}; + +/// Poller structure that will periodically check the prover API for new proof generation data. +/// Fetched data is stored to the database/object store for further processing. +#[derive(Debug)] +pub struct ProofGenDataFetcher(ProverApiClient); + +/// The path to the API endpoint that returns the next proof generation data. +const PROOF_GENERATION_DATA_PATH: &str = "/proof_generation_data"; + +impl ProofGenDataFetcher { + pub(crate) fn new( + blob_store: Arc, + base_url: String, + pool: ConnectionPool, + ) -> Self { + let api_url = format!("{base_url}{PROOF_GENERATION_DATA_PATH}"); + let inner = ProverApiClient::new(blob_store, pool, api_url); + Self(inner) + } +} -impl PeriodicApiStruct { +impl ProofGenDataFetcher { async fn save_proof_gen_data(&self, data: ProofGenerationData) { - let store = &*self.blob_store; - let blob_url = store - .put(data.l1_batch_number, &data.data) + let store = &*self.0.blob_store; + let witness_inputs = store + .put(data.l1_batch_number, &data.witness_input_data) .await .expect("Failed to save proof generation data to GCS"); - let mut connection = self.pool.connection().await.unwrap(); + let mut connection = self.0.pool.connection().await.unwrap(); + connection .fri_protocol_versions_dal() .save_prover_protocol_version(data.protocol_version, data.l1_verifier_config) .await; + connection .fri_witness_generator_dal() - .save_witness_inputs( - data.l1_batch_number, - &blob_url, - data.protocol_version, - data.eip_4844_blobs, - ) + .save_witness_inputs(data.l1_batch_number, &witness_inputs, data.protocol_version) .await; } } #[async_trait] -impl PeriodicApi for PeriodicApiStruct { +impl PeriodicApi for ProofGenDataFetcher { type JobId = (); + type Request = ProofGenerationDataRequest; type Response = ProofGenerationDataResponse; const SERVICE_NAME: &'static str = "ProofGenDataFetcher"; @@ -46,7 +67,7 @@ impl PeriodicApi for PeriodicApiStruct { _: (), request: ProofGenerationDataRequest, ) -> reqwest::Result { - self.send_http_request(request, &self.api_url).await + self.0.send_http_request(request, &self.0.api_url).await } async fn handle_response(&self, _: (), response: Self::Response) { diff --git a/prover/prover_fri_gateway/src/proof_submitter.rs b/prover/crates/bin/prover_fri_gateway/src/proof_submitter.rs similarity index 66% rename from prover/prover_fri_gateway/src/proof_submitter.rs rename to prover/crates/bin/prover_fri_gateway/src/proof_submitter.rs index 6ed7b6d5c1db..2a74781b59dd 100644 --- a/prover/prover_fri_gateway/src/proof_submitter.rs +++ b/prover/crates/bin/prover_fri_gateway/src/proof_submitter.rs @@ -1,13 +1,37 @@ +use std::sync::Arc; + use async_trait::async_trait; -use prover_dal::ProverDal; +use zksync_object_store::ObjectStore; +use zksync_prover_dal::{ConnectionPool, Prover, ProverDal}; use zksync_prover_interface::api::{SubmitProofRequest, SubmitProofResponse}; use zksync_types::{prover_dal::ProofCompressionJobStatus, L1BatchNumber}; -use crate::api_data_fetcher::{PeriodicApi, PeriodicApiStruct}; +use crate::{client::ProverApiClient, traits::PeriodicApi}; + +/// The path to the API endpoint that submits the proof. +const SUBMIT_PROOF_PATH: &str = "/submit_proof"; -impl PeriodicApiStruct { +/// Poller structure that will periodically check the database for new proofs to submit. +/// Once a new proof is detected, it will be sent to the prover API. +#[derive(Debug)] +pub struct ProofSubmitter(ProverApiClient); + +impl ProofSubmitter { + pub(crate) fn new( + blob_store: Arc, + base_url: String, + pool: ConnectionPool, + ) -> Self { + let api_url = format!("{base_url}{SUBMIT_PROOF_PATH}"); + let inner = ProverApiClient::new(blob_store, pool, api_url); + Self(inner) + } +} + +impl ProofSubmitter { async fn next_submit_proof_request(&self) -> Option<(L1BatchNumber, SubmitProofRequest)> { let (l1_batch_number, protocol_version, status) = self + .0 .pool .connection() .await @@ -19,6 +43,7 @@ impl PeriodicApiStruct { let request = match status { ProofCompressionJobStatus::Successful => { let proof = self + .0 .blob_store .get((l1_batch_number, protocol_version)) .await @@ -36,7 +61,8 @@ impl PeriodicApiStruct { } async fn save_successful_sent_proof(&self, l1_batch_number: L1BatchNumber) { - self.pool + self.0 + .pool .connection() .await .unwrap() @@ -47,8 +73,9 @@ impl PeriodicApiStruct { } #[async_trait] -impl PeriodicApi for PeriodicApiStruct { +impl PeriodicApi for ProofSubmitter { type JobId = L1BatchNumber; + type Request = SubmitProofRequest; type Response = SubmitProofResponse; const SERVICE_NAME: &'static str = "ProofSubmitter"; @@ -62,8 +89,8 @@ impl PeriodicApi for PeriodicApiStruct { job_id: Self::JobId, request: SubmitProofRequest, ) -> reqwest::Result { - let endpoint = format!("{}/{job_id}", self.api_url); - self.send_http_request(request, &endpoint).await + let endpoint = format!("{}/{job_id}", self.0.api_url); + self.0.send_http_request(request, &endpoint).await } async fn handle_response(&self, job_id: L1BatchNumber, response: Self::Response) { diff --git a/prover/crates/bin/prover_fri_gateway/src/traits.rs b/prover/crates/bin/prover_fri_gateway/src/traits.rs new file mode 100644 index 000000000000..e54ffe2414ce --- /dev/null +++ b/prover/crates/bin/prover_fri_gateway/src/traits.rs @@ -0,0 +1,62 @@ +use std::time::Duration; + +use tokio::sync::watch; + +use crate::metrics::METRICS; + +/// Trait for fetching data from an API periodically. +#[async_trait::async_trait] +pub(crate) trait PeriodicApi: Sync + Send + 'static + Sized { + type JobId: Send + Copy; + type Request: Send; + type Response: Send; + + const SERVICE_NAME: &'static str; + + /// Returns the next request to be sent to the API and the endpoint to send it to. + async fn get_next_request(&self) -> Option<(Self::JobId, Self::Request)>; + + /// Handles the response from the API. + async fn send_request( + &self, + job_id: Self::JobId, + request: Self::Request, + ) -> reqwest::Result; + + async fn handle_response(&self, job_id: Self::JobId, response: Self::Response); + + async fn run( + self, + poll_duration: Duration, + mut stop_receiver: watch::Receiver, + ) -> anyhow::Result<()> { + tracing::info!( + "Starting periodic job: {} with frequency: {:?}", + Self::SERVICE_NAME, + poll_duration + ); + + loop { + if *stop_receiver.borrow() { + tracing::warn!("Stop signal received, shutting down {}", Self::SERVICE_NAME); + return Ok(()); + } + + if let Some((job_id, request)) = self.get_next_request().await { + match self.send_request(job_id, request).await { + Ok(response) => { + self.handle_response(job_id, response).await; + } + Err(err) => { + METRICS.http_error[&Self::SERVICE_NAME].inc(); + tracing::error!("HTTP request failed due to error: {}", err); + } + } + } + // Exit condition will be checked on the next iteration. + tokio::time::timeout(poll_duration, stop_receiver.changed()) + .await + .ok(); + } + } +} diff --git a/prover/prover_version/Cargo.toml b/prover/crates/bin/prover_version/Cargo.toml similarity index 100% rename from prover/prover_version/Cargo.toml rename to prover/crates/bin/prover_version/Cargo.toml diff --git a/prover/prover_version/src/main.rs b/prover/crates/bin/prover_version/src/main.rs similarity index 100% rename from prover/prover_version/src/main.rs rename to prover/crates/bin/prover_version/src/main.rs diff --git a/prover/vk_setup_data_generator_server_fri/Cargo.toml b/prover/crates/bin/vk_setup_data_generator_server_fri/Cargo.toml similarity index 97% rename from prover/vk_setup_data_generator_server_fri/Cargo.toml rename to prover/crates/bin/vk_setup_data_generator_server_fri/Cargo.toml index c1d72cf6ba24..edae9764438f 100644 --- a/prover/vk_setup_data_generator_server_fri/Cargo.toml +++ b/prover/crates/bin/vk_setup_data_generator_server_fri/Cargo.toml @@ -20,7 +20,7 @@ path = "src/lib.rs" [dependencies] -vlog.workspace = true +zksync_vlog.workspace = true zksync_types.workspace = true zksync_utils.workspace = true zksync_prover_fri_types.workspace = true diff --git a/prover/vk_setup_data_generator_server_fri/README.md b/prover/crates/bin/vk_setup_data_generator_server_fri/README.md similarity index 100% rename from prover/vk_setup_data_generator_server_fri/README.md rename to prover/crates/bin/vk_setup_data_generator_server_fri/README.md diff --git a/prover/vk_setup_data_generator_server_fri/data/commitments.json b/prover/crates/bin/vk_setup_data_generator_server_fri/data/commitments.json similarity index 100% rename from prover/vk_setup_data_generator_server_fri/data/commitments.json rename to prover/crates/bin/vk_setup_data_generator_server_fri/data/commitments.json diff --git a/prover/vk_setup_data_generator_server_fri/data/finalization_hints_basic_1.bin b/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_1.bin similarity index 100% rename from prover/vk_setup_data_generator_server_fri/data/finalization_hints_basic_1.bin rename to prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_1.bin diff --git a/prover/vk_setup_data_generator_server_fri/data/finalization_hints_basic_10.bin b/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_10.bin similarity index 100% rename from prover/vk_setup_data_generator_server_fri/data/finalization_hints_basic_10.bin rename to prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_10.bin diff --git a/prover/vk_setup_data_generator_server_fri/data/finalization_hints_basic_11.bin b/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_11.bin similarity index 100% rename from prover/vk_setup_data_generator_server_fri/data/finalization_hints_basic_11.bin rename to prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_11.bin diff --git a/prover/vk_setup_data_generator_server_fri/data/finalization_hints_basic_12.bin b/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_12.bin similarity index 100% rename from prover/vk_setup_data_generator_server_fri/data/finalization_hints_basic_12.bin rename to prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_12.bin diff --git a/prover/vk_setup_data_generator_server_fri/data/finalization_hints_basic_13.bin b/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_13.bin similarity index 100% rename from prover/vk_setup_data_generator_server_fri/data/finalization_hints_basic_13.bin rename to prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_13.bin diff --git a/prover/vk_setup_data_generator_server_fri/data/finalization_hints_basic_14.bin b/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_14.bin similarity index 100% rename from prover/vk_setup_data_generator_server_fri/data/finalization_hints_basic_14.bin rename to prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_14.bin diff --git a/prover/vk_setup_data_generator_server_fri/data/finalization_hints_basic_15.bin b/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_15.bin similarity index 100% rename from prover/vk_setup_data_generator_server_fri/data/finalization_hints_basic_15.bin rename to prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_15.bin diff --git a/prover/vk_setup_data_generator_server_fri/data/finalization_hints_basic_2.bin b/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_2.bin similarity index 100% rename from prover/vk_setup_data_generator_server_fri/data/finalization_hints_basic_2.bin rename to prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_2.bin diff --git a/prover/vk_setup_data_generator_server_fri/data/finalization_hints_basic_255.bin b/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_255.bin similarity index 100% rename from prover/vk_setup_data_generator_server_fri/data/finalization_hints_basic_255.bin rename to prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_255.bin diff --git a/prover/vk_setup_data_generator_server_fri/data/finalization_hints_basic_3.bin b/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_3.bin similarity index 100% rename from prover/vk_setup_data_generator_server_fri/data/finalization_hints_basic_3.bin rename to prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_3.bin diff --git a/prover/vk_setup_data_generator_server_fri/data/finalization_hints_basic_4.bin b/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_4.bin similarity index 100% rename from prover/vk_setup_data_generator_server_fri/data/finalization_hints_basic_4.bin rename to prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_4.bin diff --git a/prover/vk_setup_data_generator_server_fri/data/finalization_hints_basic_5.bin b/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_5.bin similarity index 100% rename from prover/vk_setup_data_generator_server_fri/data/finalization_hints_basic_5.bin rename to prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_5.bin diff --git a/prover/vk_setup_data_generator_server_fri/data/finalization_hints_basic_6.bin b/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_6.bin similarity index 100% rename from prover/vk_setup_data_generator_server_fri/data/finalization_hints_basic_6.bin rename to prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_6.bin diff --git a/prover/vk_setup_data_generator_server_fri/data/finalization_hints_basic_7.bin b/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_7.bin similarity index 100% rename from prover/vk_setup_data_generator_server_fri/data/finalization_hints_basic_7.bin rename to prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_7.bin diff --git a/prover/vk_setup_data_generator_server_fri/data/finalization_hints_basic_8.bin b/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_8.bin similarity index 100% rename from prover/vk_setup_data_generator_server_fri/data/finalization_hints_basic_8.bin rename to prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_8.bin diff --git a/prover/vk_setup_data_generator_server_fri/data/finalization_hints_basic_9.bin b/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_9.bin similarity index 100% rename from prover/vk_setup_data_generator_server_fri/data/finalization_hints_basic_9.bin rename to prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_9.bin diff --git a/prover/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_10.bin b/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_10.bin similarity index 100% rename from prover/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_10.bin rename to prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_10.bin diff --git a/prover/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_11.bin b/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_11.bin similarity index 100% rename from prover/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_11.bin rename to prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_11.bin diff --git a/prover/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_12.bin b/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_12.bin similarity index 100% rename from prover/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_12.bin rename to prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_12.bin diff --git a/prover/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_13.bin b/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_13.bin similarity index 100% rename from prover/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_13.bin rename to prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_13.bin diff --git a/prover/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_14.bin b/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_14.bin similarity index 100% rename from prover/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_14.bin rename to prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_14.bin diff --git a/prover/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_15.bin b/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_15.bin similarity index 100% rename from prover/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_15.bin rename to prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_15.bin diff --git a/prover/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_16.bin b/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_16.bin similarity index 100% rename from prover/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_16.bin rename to prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_16.bin diff --git a/prover/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_17.bin b/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_17.bin similarity index 100% rename from prover/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_17.bin rename to prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_17.bin diff --git a/prover/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_18.bin b/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_18.bin similarity index 100% rename from prover/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_18.bin rename to prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_18.bin diff --git a/prover/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_3.bin b/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_3.bin similarity index 100% rename from prover/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_3.bin rename to prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_3.bin diff --git a/prover/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_4.bin b/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_4.bin similarity index 100% rename from prover/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_4.bin rename to prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_4.bin diff --git a/prover/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_5.bin b/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_5.bin similarity index 100% rename from prover/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_5.bin rename to prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_5.bin diff --git a/prover/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_6.bin b/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_6.bin similarity index 100% rename from prover/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_6.bin rename to prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_6.bin diff --git a/prover/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_7.bin b/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_7.bin similarity index 100% rename from prover/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_7.bin rename to prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_7.bin diff --git a/prover/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_8.bin b/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_8.bin similarity index 100% rename from prover/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_8.bin rename to prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_8.bin diff --git a/prover/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_9.bin b/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_9.bin similarity index 100% rename from prover/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_9.bin rename to prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_9.bin diff --git a/prover/vk_setup_data_generator_server_fri/data/finalization_hints_node.bin b/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_node.bin similarity index 100% rename from prover/vk_setup_data_generator_server_fri/data/finalization_hints_node.bin rename to prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_node.bin diff --git a/prover/vk_setup_data_generator_server_fri/data/finalization_hints_recursion_tip.bin b/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_recursion_tip.bin similarity index 100% rename from prover/vk_setup_data_generator_server_fri/data/finalization_hints_recursion_tip.bin rename to prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_recursion_tip.bin diff --git a/prover/vk_setup_data_generator_server_fri/data/finalization_hints_scheduler.bin b/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_scheduler.bin similarity index 100% rename from prover/vk_setup_data_generator_server_fri/data/finalization_hints_scheduler.bin rename to prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_scheduler.bin diff --git a/prover/vk_setup_data_generator_server_fri/data/snark_verification_scheduler_key.json b/prover/crates/bin/vk_setup_data_generator_server_fri/data/snark_verification_scheduler_key.json similarity index 100% rename from prover/vk_setup_data_generator_server_fri/data/snark_verification_scheduler_key.json rename to prover/crates/bin/vk_setup_data_generator_server_fri/data/snark_verification_scheduler_key.json diff --git a/prover/vk_setup_data_generator_server_fri/data/verification_basic_10_key.json b/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_10_key.json similarity index 100% rename from prover/vk_setup_data_generator_server_fri/data/verification_basic_10_key.json rename to prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_10_key.json diff --git a/prover/vk_setup_data_generator_server_fri/data/verification_basic_11_key.json b/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_11_key.json similarity index 100% rename from prover/vk_setup_data_generator_server_fri/data/verification_basic_11_key.json rename to prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_11_key.json diff --git a/prover/vk_setup_data_generator_server_fri/data/verification_basic_12_key.json b/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_12_key.json similarity index 100% rename from prover/vk_setup_data_generator_server_fri/data/verification_basic_12_key.json rename to prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_12_key.json diff --git a/prover/vk_setup_data_generator_server_fri/data/verification_basic_13_key.json b/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_13_key.json similarity index 100% rename from prover/vk_setup_data_generator_server_fri/data/verification_basic_13_key.json rename to prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_13_key.json diff --git a/prover/vk_setup_data_generator_server_fri/data/verification_basic_14_key.json b/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_14_key.json similarity index 100% rename from prover/vk_setup_data_generator_server_fri/data/verification_basic_14_key.json rename to prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_14_key.json diff --git a/prover/vk_setup_data_generator_server_fri/data/verification_basic_15_key.json b/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_15_key.json similarity index 100% rename from prover/vk_setup_data_generator_server_fri/data/verification_basic_15_key.json rename to prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_15_key.json diff --git a/prover/vk_setup_data_generator_server_fri/data/verification_basic_1_key.json b/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_1_key.json similarity index 100% rename from prover/vk_setup_data_generator_server_fri/data/verification_basic_1_key.json rename to prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_1_key.json diff --git a/prover/vk_setup_data_generator_server_fri/data/verification_basic_255_key.json b/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_255_key.json similarity index 100% rename from prover/vk_setup_data_generator_server_fri/data/verification_basic_255_key.json rename to prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_255_key.json diff --git a/prover/vk_setup_data_generator_server_fri/data/verification_basic_2_key.json b/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_2_key.json similarity index 100% rename from prover/vk_setup_data_generator_server_fri/data/verification_basic_2_key.json rename to prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_2_key.json diff --git a/prover/vk_setup_data_generator_server_fri/data/verification_basic_3_key.json b/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_3_key.json similarity index 100% rename from prover/vk_setup_data_generator_server_fri/data/verification_basic_3_key.json rename to prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_3_key.json diff --git a/prover/vk_setup_data_generator_server_fri/data/verification_basic_4_key.json b/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_4_key.json similarity index 100% rename from prover/vk_setup_data_generator_server_fri/data/verification_basic_4_key.json rename to prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_4_key.json diff --git a/prover/vk_setup_data_generator_server_fri/data/verification_basic_5_key.json b/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_5_key.json similarity index 100% rename from prover/vk_setup_data_generator_server_fri/data/verification_basic_5_key.json rename to prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_5_key.json diff --git a/prover/vk_setup_data_generator_server_fri/data/verification_basic_6_key.json b/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_6_key.json similarity index 100% rename from prover/vk_setup_data_generator_server_fri/data/verification_basic_6_key.json rename to prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_6_key.json diff --git a/prover/vk_setup_data_generator_server_fri/data/verification_basic_7_key.json b/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_7_key.json similarity index 100% rename from prover/vk_setup_data_generator_server_fri/data/verification_basic_7_key.json rename to prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_7_key.json diff --git a/prover/vk_setup_data_generator_server_fri/data/verification_basic_8_key.json b/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_8_key.json similarity index 100% rename from prover/vk_setup_data_generator_server_fri/data/verification_basic_8_key.json rename to prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_8_key.json diff --git a/prover/vk_setup_data_generator_server_fri/data/verification_basic_9_key.json b/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_9_key.json similarity index 100% rename from prover/vk_setup_data_generator_server_fri/data/verification_basic_9_key.json rename to prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_9_key.json diff --git a/prover/vk_setup_data_generator_server_fri/data/verification_leaf_10_key.json b/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_10_key.json similarity index 100% rename from prover/vk_setup_data_generator_server_fri/data/verification_leaf_10_key.json rename to prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_10_key.json diff --git a/prover/vk_setup_data_generator_server_fri/data/verification_leaf_11_key.json b/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_11_key.json similarity index 100% rename from prover/vk_setup_data_generator_server_fri/data/verification_leaf_11_key.json rename to prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_11_key.json diff --git a/prover/vk_setup_data_generator_server_fri/data/verification_leaf_12_key.json b/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_12_key.json similarity index 100% rename from prover/vk_setup_data_generator_server_fri/data/verification_leaf_12_key.json rename to prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_12_key.json diff --git a/prover/vk_setup_data_generator_server_fri/data/verification_leaf_13_key.json b/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_13_key.json similarity index 100% rename from prover/vk_setup_data_generator_server_fri/data/verification_leaf_13_key.json rename to prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_13_key.json diff --git a/prover/vk_setup_data_generator_server_fri/data/verification_leaf_14_key.json b/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_14_key.json similarity index 100% rename from prover/vk_setup_data_generator_server_fri/data/verification_leaf_14_key.json rename to prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_14_key.json diff --git a/prover/vk_setup_data_generator_server_fri/data/verification_leaf_15_key.json b/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_15_key.json similarity index 100% rename from prover/vk_setup_data_generator_server_fri/data/verification_leaf_15_key.json rename to prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_15_key.json diff --git a/prover/vk_setup_data_generator_server_fri/data/verification_leaf_16_key.json b/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_16_key.json similarity index 100% rename from prover/vk_setup_data_generator_server_fri/data/verification_leaf_16_key.json rename to prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_16_key.json diff --git a/prover/vk_setup_data_generator_server_fri/data/verification_leaf_17_key.json b/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_17_key.json similarity index 100% rename from prover/vk_setup_data_generator_server_fri/data/verification_leaf_17_key.json rename to prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_17_key.json diff --git a/prover/vk_setup_data_generator_server_fri/data/verification_leaf_18_key.json b/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_18_key.json similarity index 100% rename from prover/vk_setup_data_generator_server_fri/data/verification_leaf_18_key.json rename to prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_18_key.json diff --git a/prover/vk_setup_data_generator_server_fri/data/verification_leaf_3_key.json b/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_3_key.json similarity index 100% rename from prover/vk_setup_data_generator_server_fri/data/verification_leaf_3_key.json rename to prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_3_key.json diff --git a/prover/vk_setup_data_generator_server_fri/data/verification_leaf_4_key.json b/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_4_key.json similarity index 100% rename from prover/vk_setup_data_generator_server_fri/data/verification_leaf_4_key.json rename to prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_4_key.json diff --git a/prover/vk_setup_data_generator_server_fri/data/verification_leaf_5_key.json b/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_5_key.json similarity index 100% rename from prover/vk_setup_data_generator_server_fri/data/verification_leaf_5_key.json rename to prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_5_key.json diff --git a/prover/vk_setup_data_generator_server_fri/data/verification_leaf_6_key.json b/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_6_key.json similarity index 100% rename from prover/vk_setup_data_generator_server_fri/data/verification_leaf_6_key.json rename to prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_6_key.json diff --git a/prover/vk_setup_data_generator_server_fri/data/verification_leaf_7_key.json b/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_7_key.json similarity index 100% rename from prover/vk_setup_data_generator_server_fri/data/verification_leaf_7_key.json rename to prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_7_key.json diff --git a/prover/vk_setup_data_generator_server_fri/data/verification_leaf_8_key.json b/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_8_key.json similarity index 100% rename from prover/vk_setup_data_generator_server_fri/data/verification_leaf_8_key.json rename to prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_8_key.json diff --git a/prover/vk_setup_data_generator_server_fri/data/verification_leaf_9_key.json b/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_9_key.json similarity index 100% rename from prover/vk_setup_data_generator_server_fri/data/verification_leaf_9_key.json rename to prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_9_key.json diff --git a/prover/vk_setup_data_generator_server_fri/data/verification_node_key.json b/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_node_key.json similarity index 100% rename from prover/vk_setup_data_generator_server_fri/data/verification_node_key.json rename to prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_node_key.json diff --git a/prover/vk_setup_data_generator_server_fri/data/verification_recursion_tip_key.json b/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_recursion_tip_key.json similarity index 100% rename from prover/vk_setup_data_generator_server_fri/data/verification_recursion_tip_key.json rename to prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_recursion_tip_key.json diff --git a/prover/vk_setup_data_generator_server_fri/data/verification_scheduler_key.json b/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_scheduler_key.json similarity index 100% rename from prover/vk_setup_data_generator_server_fri/data/verification_scheduler_key.json rename to prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_scheduler_key.json diff --git a/prover/vk_setup_data_generator_server_fri/historical_data/0.24.0/commitments.json b/prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/0.24.0/commitments.json similarity index 100% rename from prover/vk_setup_data_generator_server_fri/historical_data/0.24.0/commitments.json rename to prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/0.24.0/commitments.json diff --git a/prover/vk_setup_data_generator_server_fri/historical_data/0.24.0/snark_verification_scheduler_key.json b/prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/0.24.0/snark_verification_scheduler_key.json similarity index 100% rename from prover/vk_setup_data_generator_server_fri/historical_data/0.24.0/snark_verification_scheduler_key.json rename to prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/0.24.0/snark_verification_scheduler_key.json diff --git a/prover/vk_setup_data_generator_server_fri/historical_data/0.24.1/commitments.json b/prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/0.24.1/commitments.json similarity index 100% rename from prover/vk_setup_data_generator_server_fri/historical_data/0.24.1/commitments.json rename to prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/0.24.1/commitments.json diff --git a/prover/vk_setup_data_generator_server_fri/historical_data/0.24.1/snark_verification_scheduler_key.json b/prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/0.24.1/snark_verification_scheduler_key.json similarity index 100% rename from prover/vk_setup_data_generator_server_fri/historical_data/0.24.1/snark_verification_scheduler_key.json rename to prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/0.24.1/snark_verification_scheduler_key.json diff --git a/prover/vk_setup_data_generator_server_fri/historical_data/18/commitments.json b/prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/18/commitments.json similarity index 100% rename from prover/vk_setup_data_generator_server_fri/historical_data/18/commitments.json rename to prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/18/commitments.json diff --git a/prover/vk_setup_data_generator_server_fri/historical_data/18/snark_verification_scheduler_key.json b/prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/18/snark_verification_scheduler_key.json similarity index 100% rename from prover/vk_setup_data_generator_server_fri/historical_data/18/snark_verification_scheduler_key.json rename to prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/18/snark_verification_scheduler_key.json diff --git a/prover/vk_setup_data_generator_server_fri/historical_data/19/commitments.json b/prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/19/commitments.json similarity index 100% rename from prover/vk_setup_data_generator_server_fri/historical_data/19/commitments.json rename to prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/19/commitments.json diff --git a/prover/vk_setup_data_generator_server_fri/historical_data/19/snark_verification_scheduler_key.json b/prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/19/snark_verification_scheduler_key.json similarity index 100% rename from prover/vk_setup_data_generator_server_fri/historical_data/19/snark_verification_scheduler_key.json rename to prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/19/snark_verification_scheduler_key.json diff --git a/prover/vk_setup_data_generator_server_fri/historical_data/20/commitments.json b/prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/20/commitments.json similarity index 100% rename from prover/vk_setup_data_generator_server_fri/historical_data/20/commitments.json rename to prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/20/commitments.json diff --git a/prover/vk_setup_data_generator_server_fri/historical_data/20/snark_verification_scheduler_key.json b/prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/20/snark_verification_scheduler_key.json similarity index 100% rename from prover/vk_setup_data_generator_server_fri/historical_data/20/snark_verification_scheduler_key.json rename to prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/20/snark_verification_scheduler_key.json diff --git a/prover/vk_setup_data_generator_server_fri/historical_data/21/commitments.json b/prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/21/commitments.json similarity index 100% rename from prover/vk_setup_data_generator_server_fri/historical_data/21/commitments.json rename to prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/21/commitments.json diff --git a/prover/vk_setup_data_generator_server_fri/historical_data/21/snark_verification_scheduler_key.json b/prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/21/snark_verification_scheduler_key.json similarity index 100% rename from prover/vk_setup_data_generator_server_fri/historical_data/21/snark_verification_scheduler_key.json rename to prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/21/snark_verification_scheduler_key.json diff --git a/prover/vk_setup_data_generator_server_fri/historical_data/22/commitments.json b/prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/22/commitments.json similarity index 100% rename from prover/vk_setup_data_generator_server_fri/historical_data/22/commitments.json rename to prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/22/commitments.json diff --git a/prover/vk_setup_data_generator_server_fri/historical_data/22/snark_verification_scheduler_key.json b/prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/22/snark_verification_scheduler_key.json similarity index 100% rename from prover/vk_setup_data_generator_server_fri/historical_data/22/snark_verification_scheduler_key.json rename to prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/22/snark_verification_scheduler_key.json diff --git a/prover/vk_setup_data_generator_server_fri/historical_data/23/commitments.json b/prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/23/commitments.json similarity index 100% rename from prover/vk_setup_data_generator_server_fri/historical_data/23/commitments.json rename to prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/23/commitments.json diff --git a/prover/vk_setup_data_generator_server_fri/historical_data/23/snark_verification_scheduler_key.json b/prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/23/snark_verification_scheduler_key.json similarity index 100% rename from prover/vk_setup_data_generator_server_fri/historical_data/23/snark_verification_scheduler_key.json rename to prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/23/snark_verification_scheduler_key.json diff --git a/prover/vk_setup_data_generator_server_fri/historical_data/README.md b/prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/README.md similarity index 100% rename from prover/vk_setup_data_generator_server_fri/historical_data/README.md rename to prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/README.md diff --git a/prover/vk_setup_data_generator_server_fri/src/commitment_generator.rs b/prover/crates/bin/vk_setup_data_generator_server_fri/src/commitment_generator.rs similarity index 100% rename from prover/vk_setup_data_generator_server_fri/src/commitment_generator.rs rename to prover/crates/bin/vk_setup_data_generator_server_fri/src/commitment_generator.rs diff --git a/prover/vk_setup_data_generator_server_fri/src/commitment_utils.rs b/prover/crates/bin/vk_setup_data_generator_server_fri/src/commitment_utils.rs similarity index 84% rename from prover/vk_setup_data_generator_server_fri/src/commitment_utils.rs rename to prover/crates/bin/vk_setup_data_generator_server_fri/src/commitment_utils.rs index 935d06460181..471e76e1a680 100644 --- a/prover/vk_setup_data_generator_server_fri/src/commitment_utils.rs +++ b/prover/crates/bin/vk_setup_data_generator_server_fri/src/commitment_utils.rs @@ -1,9 +1,8 @@ -use std::str::FromStr; +use std::{str::FromStr, sync::Mutex}; use anyhow::Context as _; use hex::ToHex; use once_cell::sync::Lazy; -use structopt::lazy_static::lazy_static; use zkevm_test_harness::witness::recursive_aggregation::{ compute_leaf_vks_and_params_commitment, compute_node_vk_commitment, }; @@ -22,10 +21,7 @@ use crate::{ VkCommitments, }; -lazy_static! { - // TODO: do not initialize a static const with data read in runtime. - static ref COMMITMENTS: Lazy = Lazy::new(|| { circuit_commitments(&Keystore::default()).unwrap() }); -} +static KEYSTORE: Lazy>> = Lazy::new(|| Mutex::new(None)); fn circuit_commitments(keystore: &Keystore) -> anyhow::Result { let commitments = generate_commitments(keystore).context("generate_commitments()")?; @@ -97,14 +93,23 @@ pub fn generate_commitments(keystore: &Keystore) -> anyhow::Result L1VerifierConfig { - tracing::info!("Using cached commitments {:?}", **COMMITMENTS); - **COMMITMENTS +pub fn get_cached_commitments(setup_data_path: Option) -> L1VerifierConfig { + if let Some(setup_data_path) = setup_data_path { + let keystore = Keystore::new_with_setup_data_path(setup_data_path); + let mut keystore_lock = KEYSTORE.lock().unwrap(); + *keystore_lock = Some(keystore); + } + + let keystore = KEYSTORE.lock().unwrap().clone().unwrap_or_default(); + let commitments = circuit_commitments(&keystore).unwrap(); + + tracing::info!("Using cached commitments {:?}", commitments); + commitments } #[test] fn test_get_cached_commitments() { - let commitments = get_cached_commitments(); + let commitments = get_cached_commitments(None); assert_eq!( H256::zero(), commitments.params.recursion_circuits_set_vks_hash diff --git a/prover/vk_setup_data_generator_server_fri/src/keystore.rs b/prover/crates/bin/vk_setup_data_generator_server_fri/src/keystore.rs similarity index 97% rename from prover/vk_setup_data_generator_server_fri/src/keystore.rs rename to prover/crates/bin/vk_setup_data_generator_server_fri/src/keystore.rs index 25aedeb089ff..e886b5d1b0c0 100644 --- a/prover/vk_setup_data_generator_server_fri/src/keystore.rs +++ b/prover/crates/bin/vk_setup_data_generator_server_fri/src/keystore.rs @@ -36,6 +36,7 @@ pub enum ProverServiceDataType { /// There are 2 types: /// - small verification, finalization keys (used only during verification) /// - large setup keys, used during proving. +#[derive(Clone)] pub struct Keystore { /// Directory to store all the small keys. basedir: PathBuf, @@ -46,7 +47,7 @@ pub struct Keystore { fn get_base_path() -> PathBuf { let path = core_workspace_dir_or_current_dir(); - let new_path = path.join("prover/vk_setup_data_generator_server_fri/data"); + let new_path = path.join("prover/crates/bin/vk_setup_data_generator_server_fri/data"); if new_path.exists() { return new_path; } @@ -55,7 +56,7 @@ fn get_base_path() -> PathBuf { components.next_back().unwrap(); components .as_path() - .join("prover/vk_setup_data_generator_server_fri/data") + .join("prover/crates/bin/vk_setup_data_generator_server_fri/data") } impl Default for Keystore { @@ -80,6 +81,7 @@ impl Keystore { setup_data_path: Some(setup_data_path), } } + pub fn new_with_optional_setup_path(basedir: PathBuf, setup_data_path: Option) -> Self { Keystore { basedir, @@ -87,6 +89,13 @@ impl Keystore { } } + pub fn new_with_setup_data_path(setup_data_path: String) -> Self { + Keystore { + basedir: get_base_path(), + setup_data_path: Some(setup_data_path), + } + } + pub fn get_base_path(&self) -> &PathBuf { &self.basedir } diff --git a/prover/vk_setup_data_generator_server_fri/src/lib.rs b/prover/crates/bin/vk_setup_data_generator_server_fri/src/lib.rs similarity index 100% rename from prover/vk_setup_data_generator_server_fri/src/lib.rs rename to prover/crates/bin/vk_setup_data_generator_server_fri/src/lib.rs diff --git a/prover/vk_setup_data_generator_server_fri/src/main.rs b/prover/crates/bin/vk_setup_data_generator_server_fri/src/main.rs similarity index 100% rename from prover/vk_setup_data_generator_server_fri/src/main.rs rename to prover/crates/bin/vk_setup_data_generator_server_fri/src/main.rs diff --git a/prover/vk_setup_data_generator_server_fri/src/setup_data_generator.rs b/prover/crates/bin/vk_setup_data_generator_server_fri/src/setup_data_generator.rs similarity index 100% rename from prover/vk_setup_data_generator_server_fri/src/setup_data_generator.rs rename to prover/crates/bin/vk_setup_data_generator_server_fri/src/setup_data_generator.rs diff --git a/prover/vk_setup_data_generator_server_fri/src/tests.rs b/prover/crates/bin/vk_setup_data_generator_server_fri/src/tests.rs similarity index 100% rename from prover/vk_setup_data_generator_server_fri/src/tests.rs rename to prover/crates/bin/vk_setup_data_generator_server_fri/src/tests.rs diff --git a/prover/vk_setup_data_generator_server_fri/src/utils.rs b/prover/crates/bin/vk_setup_data_generator_server_fri/src/utils.rs similarity index 100% rename from prover/vk_setup_data_generator_server_fri/src/utils.rs rename to prover/crates/bin/vk_setup_data_generator_server_fri/src/utils.rs diff --git a/prover/vk_setup_data_generator_server_fri/src/vk_commitment_helper.rs b/prover/crates/bin/vk_setup_data_generator_server_fri/src/vk_commitment_helper.rs similarity index 100% rename from prover/vk_setup_data_generator_server_fri/src/vk_commitment_helper.rs rename to prover/crates/bin/vk_setup_data_generator_server_fri/src/vk_commitment_helper.rs diff --git a/prover/witness_generator/Cargo.toml b/prover/crates/bin/witness_generator/Cargo.toml similarity index 85% rename from prover/witness_generator/Cargo.toml rename to prover/crates/bin/witness_generator/Cargo.toml index 9dc054d23c04..64c6713540fa 100644 --- a/prover/witness_generator/Cargo.toml +++ b/prover/crates/bin/witness_generator/Cargo.toml @@ -11,17 +11,14 @@ categories.workspace = true [dependencies] vise.workspace = true -prover_dal.workspace = true -zksync_dal.workspace = true +zksync_prover_dal.workspace = true zksync_config.workspace = true zksync_prover_interface.workspace = true -zksync_prover_config.workspace = true zksync_env_config.workspace = true zksync_system_constants.workspace = true -prometheus_exporter.workspace = true -vlog.workspace = true +zksync_vlog.workspace = true zksync_queued_job_processor.workspace = true -multivm.workspace = true +zksync_multivm.workspace = true zksync_object_store.workspace = true zksync_types.workspace = true zksync_state.workspace = true @@ -30,11 +27,9 @@ vk_setup_data_generator_server_fri.workspace = true zksync_prover_fri_types.workspace = true zksync_prover_fri_utils.workspace = true zksync_core_leftovers.workspace = true -zksync_protobuf_config.workspace = true zkevm_test_harness = { workspace = true } circuit_definitions = { workspace = true, features = [ "log_tracing" ] } -zk_evm.workspace = true anyhow.workspace = true tracing.workspace = true diff --git a/prover/witness_generator/README.md b/prover/crates/bin/witness_generator/README.md similarity index 100% rename from prover/witness_generator/README.md rename to prover/crates/bin/witness_generator/README.md diff --git a/prover/witness_generator/src/basic_circuits.rs b/prover/crates/bin/witness_generator/src/basic_circuits.rs similarity index 64% rename from prover/witness_generator/src/basic_circuits.rs rename to prover/crates/bin/witness_generator/src/basic_circuits.rs index 65d3b976c086..859b8515805a 100644 --- a/prover/witness_generator/src/basic_circuits.rs +++ b/prover/crates/bin/witness_generator/src/basic_circuits.rs @@ -12,15 +12,14 @@ use circuit_definitions::{ encodings::recursion_request::RecursionQueueSimulator, zkevm_circuits::fsm_input_output::ClosedFormInputCompactFormWitness, }; -use multivm::vm_latest::{ - constants::MAX_CYCLES_FOR_TX, HistoryDisabled, StorageOracle as VmStorageOracle, -}; -use prover_dal::{ConnectionPool, Prover, ProverDal}; use tracing::Instrument; use zkevm_test_harness::geometry_config::get_geometry_config; use zksync_config::configs::FriWitnessGeneratorConfig; -use zksync_dal::{Core, CoreDal}; +use zksync_multivm::vm_latest::{ + constants::MAX_CYCLES_FOR_TX, HistoryDisabled, StorageOracle as VmStorageOracle, +}; use zksync_object_store::ObjectStore; +use zksync_prover_dal::{ConnectionPool, Prover, ProverDal}; use zksync_prover_fri_types::{ circuit_definitions::{ boojum::{ @@ -36,16 +35,13 @@ use zksync_prover_fri_types::{ AuxOutputWitnessWrapper, }; use zksync_prover_fri_utils::get_recursive_layer_circuit_id_for_base_layer; -use zksync_prover_interface::inputs::{BasicCircuitWitnessGeneratorInput, PrepareBasicCircuitsJob}; +use zksync_prover_interface::inputs::WitnessInputData; use zksync_queued_job_processor::JobProcessor; -use zksync_state::{PostgresStorage, StorageView}; +use zksync_state::{StorageView, WitnessStorage}; use zksync_types::{ - basic_fri_types::{AggregationRound, Eip4844Blobs}, - block::StorageOracleInfo, - protocol_version::ProtocolSemanticVersion, - Address, L1BatchNumber, ProtocolVersionId, BOOTLOADER_ADDRESS, H256, + basic_fri_types::AggregationRound, protocol_version::ProtocolSemanticVersion, Address, + L1BatchNumber, BOOTLOADER_ADDRESS, }; -use zksync_utils::{bytes_to_chunks, h256_to_u256, u256_to_h256}; use crate::{ metrics::WITNESS_GENERATOR_METRICS, @@ -78,8 +74,7 @@ struct BlobUrls { #[derive(Clone)] pub struct BasicWitnessGeneratorJob { block_number: L1BatchNumber, - job: PrepareBasicCircuitsJob, - eip_4844_blobs: Eip4844Blobs, + job: WitnessInputData, } #[derive(Debug)] @@ -87,7 +82,6 @@ pub struct BasicWitnessGenerator { config: Arc, object_store: Arc, public_blob_store: Option>, - connection_pool: ConnectionPool, prover_connection_pool: ConnectionPool, protocol_version: ProtocolSemanticVersion, } @@ -97,7 +91,6 @@ impl BasicWitnessGenerator { config: FriWitnessGeneratorConfig, object_store: Arc, public_blob_store: Option>, - connection_pool: ConnectionPool, prover_connection_pool: ConnectionPool, protocol_version: ProtocolSemanticVersion, ) -> Self { @@ -105,7 +98,6 @@ impl BasicWitnessGenerator { config: Arc::new(config), object_store, public_blob_store, - connection_pool, prover_connection_pool, protocol_version, } @@ -113,15 +105,10 @@ impl BasicWitnessGenerator { async fn process_job_impl( object_store: Arc, - connection_pool: ConnectionPool, basic_job: BasicWitnessGeneratorJob, started_at: Instant, ) -> Option { - let BasicWitnessGeneratorJob { - block_number, - job, - eip_4844_blobs, - } = basic_job; + let BasicWitnessGeneratorJob { block_number, job } = basic_job; tracing::info!( "Starting witness generation of type {:?} for block {}", @@ -129,17 +116,7 @@ impl BasicWitnessGenerator { block_number.0 ); - Some( - process_basic_circuits_job( - &*object_store, - connection_pool, - started_at, - block_number, - job, - eip_4844_blobs, - ) - .await, - ) + Some(process_basic_circuits_job(&*object_store, started_at, block_number, job).await) } } @@ -165,13 +142,13 @@ impl JobProcessor for BasicWitnessGenerator { ) .await { - Some((block_number, eip_4844_blobs)) => { + Some(block_number) => { tracing::info!( "Processing FRI basic witness-gen for block {}", block_number ); let started_at = Instant::now(); - let job = get_artifacts(block_number, &*self.object_store, eip_4844_blobs).await; + let job = get_artifacts(block_number, &*self.object_store).await; WITNESS_GENERATOR_METRICS.blob_fetch_time[&AggregationRound::BasicCircuits.into()] .observe(started_at.elapsed()); @@ -200,14 +177,11 @@ impl JobProcessor for BasicWitnessGenerator { started_at: Instant, ) -> tokio::task::JoinHandle>> { let object_store = Arc::clone(&self.object_store); - let connection_pool = self.connection_pool.clone(); tokio::spawn(async move { let block_number = job.block_number; - Ok( - Self::process_job_impl(object_store, connection_pool, job, started_at) - .instrument(tracing::info_span!("basic_circuit", %block_number)) - .await, - ) + Ok(Self::process_job_impl(object_store, job, started_at) + .instrument(tracing::info_span!("basic_circuit", %block_number)) + .await) }) } @@ -272,22 +246,12 @@ impl JobProcessor for BasicWitnessGenerator { #[allow(clippy::too_many_arguments)] async fn process_basic_circuits_job( object_store: &dyn ObjectStore, - connection_pool: ConnectionPool, started_at: Instant, block_number: L1BatchNumber, - job: PrepareBasicCircuitsJob, - eip_4844_blobs: Eip4844Blobs, + job: WitnessInputData, ) -> BasicCircuitArtifacts { - let witness_gen_input = - build_basic_circuits_witness_generator_input(&connection_pool, job, block_number).await; - let (circuit_urls, queue_urls, scheduler_witness, aux_output_witness) = generate_witness( - block_number, - object_store, - connection_pool, - witness_gen_input, - eip_4844_blobs, - ) - .await; + let (circuit_urls, queue_urls, scheduler_witness, aux_output_witness) = + generate_witness(block_number, object_store, job).await; WITNESS_GENERATOR_METRICS.witness_generation_time[&AggregationRound::BasicCircuits.into()] .observe(started_at.elapsed()); tracing::info!( @@ -344,14 +308,9 @@ async fn update_database( async fn get_artifacts( block_number: L1BatchNumber, object_store: &dyn ObjectStore, - eip_4844_blobs: Eip4844Blobs, ) -> BasicWitnessGeneratorJob { let job = object_store.get(block_number).await.unwrap(); - BasicWitnessGeneratorJob { - block_number, - job, - eip_4844_blobs, - } + BasicWitnessGeneratorJob { block_number, job } } async fn save_scheduler_artifacts( @@ -403,55 +362,10 @@ async fn save_recursion_queue( (circuit_id, blob_url, basic_circuit_count) } -// If making changes to this method, consider moving this logic to the DAL layer and make -// `PrepareBasicCircuitsJob` have all fields of `BasicCircuitWitnessGeneratorInput`. -async fn build_basic_circuits_witness_generator_input( - connection_pool: &ConnectionPool, - witness_merkle_input: PrepareBasicCircuitsJob, - l1_batch_number: L1BatchNumber, -) -> BasicCircuitWitnessGeneratorInput { - let mut connection = connection_pool.connection().await.unwrap(); - let block_header = connection - .blocks_dal() - .get_l1_batch_header(l1_batch_number) - .await - .unwrap() - .unwrap(); - let initial_heap_content = connection - .blocks_dal() - .get_initial_bootloader_heap(l1_batch_number) - .await - .unwrap() - .unwrap(); - let (_, previous_block_timestamp) = connection - .blocks_dal() - .get_l1_batch_state_root_and_timestamp(l1_batch_number - 1) - .await - .unwrap() - .unwrap(); - let previous_block_hash = connection - .blocks_dal() - .get_l1_batch_state_root(l1_batch_number - 1) - .await - .unwrap() - .expect("cannot generate witness before the root hash is computed"); - BasicCircuitWitnessGeneratorInput { - block_number: l1_batch_number, - previous_block_timestamp, - previous_block_hash, - block_timestamp: block_header.timestamp, - used_bytecodes_hashes: block_header.used_contract_hashes, - initial_heap_content, - merkle_paths_input: witness_merkle_input, - } -} - async fn generate_witness( block_number: L1BatchNumber, object_store: &dyn ObjectStore, - connection_pool: ConnectionPool, - input: BasicCircuitWitnessGeneratorInput, - eip_4844_blobs: Eip4844Blobs, + input: WitnessInputData, ) -> ( Vec<(u8, String)>, Vec<(u8, String, usize)>, @@ -462,119 +376,37 @@ async fn generate_witness( >, BlockAuxilaryOutputWitness, ) { - let mut connection = connection_pool.connection().await.unwrap(); - let header = connection - .blocks_dal() - .get_l1_batch_header(input.block_number) - .await - .unwrap() - .unwrap(); - - let protocol_version = header - .protocol_version - .unwrap_or(ProtocolVersionId::last_potentially_undefined()); - - let previous_batch_with_metadata = connection - .blocks_dal() - .get_l1_batch_metadata(zksync_types::L1BatchNumber( - input.block_number.checked_sub(1).unwrap(), - )) - .await - .unwrap() - .unwrap(); - - let bootloader_code_bytes = connection - .factory_deps_dal() - .get_sealed_factory_dep(header.base_system_contracts_hashes.bootloader) - .await - .expect("Failed fetching bootloader bytecode from DB") - .expect("Bootloader bytecode should exist"); - let bootloader_code = bytes_to_chunks(&bootloader_code_bytes); - let account_bytecode_bytes = connection - .factory_deps_dal() - .get_sealed_factory_dep(header.base_system_contracts_hashes.default_aa) - .await - .expect("Failed fetching default account bytecode from DB") - .expect("Default account bytecode should exist"); - let account_bytecode = bytes_to_chunks(&account_bytecode_bytes); - let bootloader_contents = - expand_bootloader_contents(&input.initial_heap_content, protocol_version); - let account_code_hash = h256_to_u256(header.base_system_contracts_hashes.default_aa); - - let hashes: HashSet = input - .used_bytecodes_hashes - .iter() - // SMA-1555: remove this hack once updated to the latest version of `zkevm_test_harness` - .filter(|&&hash| hash != h256_to_u256(header.base_system_contracts_hashes.bootloader)) - .map(|hash| u256_to_h256(*hash)) - .collect(); - - let StorageOracleInfo { - storage_refunds, - pubdata_costs, - } = connection - .blocks_dal() - .get_storage_oracle_info(input.block_number) - .await - .unwrap() - .unwrap(); - - let mut used_bytecodes = connection - .factory_deps_dal() - .get_factory_deps(&hashes) - .await; - if input.used_bytecodes_hashes.contains(&account_code_hash) { - used_bytecodes.insert(account_code_hash, account_bytecode); - } - - assert_eq!( - hashes.len(), - used_bytecodes.len(), - "{} factory deps are not found in DB", - hashes.len() - used_bytecodes.len() + let bootloader_contents = expand_bootloader_contents( + &input.vm_run_data.initial_heap_content, + input.vm_run_data.protocol_version, ); - // `DbStorageProvider` was designed to be used in API, so it accepts miniblock numbers. - // Probably, we should make it work with L1 batch numbers too. - let (_, last_miniblock_number) = connection - .blocks_dal() - .get_l2_block_range_of_l1_batch(input.block_number - 1) - .await - .unwrap() - .expect("L1 batch should contain at least one miniblock"); - drop(connection); - - let mut tree = PrecalculatedMerklePathsProvider::new( - input.merkle_paths_input, - input.previous_block_hash.0, + let tree = PrecalculatedMerklePathsProvider::new( + input.merkle_paths, + input.previous_batch_metadata.root_hash.0, ); let geometry_config = get_geometry_config(); let mut hasher = DefaultHasher::new(); geometry_config.hash(&mut hasher); tracing::info!( "generating witness for block {} using geometry config hash: {}", - input.block_number.0, + input.vm_run_data.l1_batch_number.0, hasher.finish() ); - // The following part is CPU-heavy, so we move it to a separate thread. - let rt_handle = tokio::runtime::Handle::current(); - let (circuit_sender, mut circuit_receiver) = tokio::sync::mpsc::channel(1); let (queue_sender, mut queue_receiver) = tokio::sync::mpsc::channel(1); let make_circuits = tokio::task::spawn_blocking(move || { - let connection = rt_handle.block_on(connection_pool.connection()).unwrap(); - - let storage = PostgresStorage::new(rt_handle, connection, last_miniblock_number, true); - let storage_view = StorageView::new(storage).to_rc_ptr(); + let witness_storage = WitnessStorage::new(input.vm_run_data.witness_block_state); + let storage_view = StorageView::new(witness_storage).to_rc_ptr(); - let vm_storage_oracle: VmStorageOracle>, HistoryDisabled> = + let vm_storage_oracle: VmStorageOracle, HistoryDisabled> = VmStorageOracle::new(storage_view.clone()); let storage_oracle = StorageOracle::new( vm_storage_oracle, - storage_refunds, - pubdata_costs.expect("pubdata costs should be present"), + input.vm_run_data.storage_refunds, + input.vm_run_data.pubdata_costs, ); let path = KZG_TRUSTED_SETUP_FILE @@ -585,20 +417,20 @@ async fn generate_witness( let (scheduler_witness, block_witness) = zkevm_test_harness::external_calls::run( Address::zero(), BOOTLOADER_ADDRESS, - bootloader_code, + input.vm_run_data.bootloader_code, bootloader_contents, false, - account_code_hash, + input.vm_run_data.default_account_code_hash, // NOTE: this will be evm_simulator_code_hash in future releases - account_code_hash, - used_bytecodes, + input.vm_run_data.default_account_code_hash, + input.vm_run_data.used_bytecodes, Vec::default(), MAX_CYCLES_FOR_TX as usize, geometry_config, storage_oracle, - &mut tree, + tree, path, - eip_4844_blobs.blobs(), + input.eip_4844_blobs.blobs(), |circuit| { circuit_sender.blocking_send(circuit).unwrap(); }, @@ -635,10 +467,8 @@ async fn generate_witness( recursion_urls.retain(|(circuit_id, _, _)| circuits_present.contains(circuit_id)); - scheduler_witness.previous_block_meta_hash = - previous_batch_with_metadata.metadata.meta_parameters_hash.0; - scheduler_witness.previous_block_aux_hash = - previous_batch_with_metadata.metadata.aux_data_hash.0; + scheduler_witness.previous_block_meta_hash = input.previous_batch_metadata.meta_hash.0; + scheduler_witness.previous_block_aux_hash = input.previous_batch_metadata.aux_hash.0; ( circuit_urls, diff --git a/prover/witness_generator/src/leaf_aggregation.rs b/prover/crates/bin/witness_generator/src/leaf_aggregation.rs similarity index 99% rename from prover/witness_generator/src/leaf_aggregation.rs rename to prover/crates/bin/witness_generator/src/leaf_aggregation.rs index 2695ec198888..76703d0d874d 100644 --- a/prover/witness_generator/src/leaf_aggregation.rs +++ b/prover/crates/bin/witness_generator/src/leaf_aggregation.rs @@ -3,14 +3,13 @@ use std::{sync::Arc, time::Instant}; use anyhow::Context as _; use async_trait::async_trait; use circuit_definitions::circuit_definitions::recursion_layer::base_circuit_type_into_recursive_leaf_circuit_type; -use prover_dal::{Prover, ProverDal}; use zkevm_test_harness::{ witness::recursive_aggregation::{compute_leaf_params, create_leaf_witnesses}, zkevm_circuits::scheduler::aux::BaseLayerCircuitType, }; use zksync_config::configs::FriWitnessGeneratorConfig; -use zksync_dal::ConnectionPool; use zksync_object_store::ObjectStore; +use zksync_prover_dal::{ConnectionPool, Prover, ProverDal}; use zksync_prover_fri_types::{ circuit_definitions::{ boojum::field::goldilocks::GoldilocksField, diff --git a/prover/witness_generator/src/lib.rs b/prover/crates/bin/witness_generator/src/lib.rs similarity index 79% rename from prover/witness_generator/src/lib.rs rename to prover/crates/bin/witness_generator/src/lib.rs index 0e6f7ddf6805..a80f06312d12 100644 --- a/prover/witness_generator/src/lib.rs +++ b/prover/crates/bin/witness_generator/src/lib.rs @@ -1,3 +1,4 @@ +#![allow(incomplete_features)] // We have to use generic const exprs. #![feature(generic_const_exprs)] pub mod basic_circuits; diff --git a/prover/witness_generator/src/main.rs b/prover/crates/bin/witness_generator/src/main.rs similarity index 83% rename from prover/witness_generator/src/main.rs rename to prover/crates/bin/witness_generator/src/main.rs index 6a4cc4fc33eb..caad9458827e 100644 --- a/prover/witness_generator/src/main.rs +++ b/prover/crates/bin/witness_generator/src/main.rs @@ -1,21 +1,21 @@ +#![allow(incomplete_features)] // We have to use generic const exprs. #![feature(generic_const_exprs)] use std::time::{Duration, Instant}; use anyhow::{anyhow, Context as _}; use futures::{channel::mpsc, executor::block_on, SinkExt, StreamExt}; -use prometheus_exporter::PrometheusExporterConfig; -use prover_dal::{ConnectionPool, Prover, ProverDal}; use structopt::StructOpt; use tokio::sync::watch; -use zksync_config::ObjectStoreConfig; -use zksync_env_config::{object_store::ProverObjectStoreConfig, FromEnv}; +use zksync_core_leftovers::temp_config_store::{load_database_secrets, load_general_config}; +use zksync_env_config::object_store::ProverObjectStoreConfig; use zksync_object_store::ObjectStoreFactory; -use zksync_prover_config::{load_database_secrets, load_general_config}; +use zksync_prover_dal::{ConnectionPool, Prover, ProverDal}; use zksync_queued_job_processor::JobProcessor; use zksync_types::basic_fri_types::AggregationRound; use zksync_utils::wait_for_tasks::ManagedTasks; use zksync_vk_setup_data_server_fri::commitment_utils::get_cached_commitments; +use zksync_vlog::prometheus::PrometheusExporterConfig; use crate::{ basic_circuits::BasicWitnessGenerator, leaf_aggregation::LeafAggregationWitnessGenerator, @@ -35,7 +35,6 @@ mod utils; #[cfg(not(target_env = "msvc"))] use jemallocator::Jemalloc; -use zksync_dal::Core; use zksync_prover_fri_types::PROVER_PROTOCOL_SEMANTIC_VERSION; #[cfg(not(target_env = "msvc"))] @@ -78,12 +77,12 @@ async fn main() -> anyhow::Result<()> { let observability_config = general_config .observability .context("observability config")?; - let log_format: vlog::LogFormat = observability_config + let log_format: zksync_vlog::LogFormat = observability_config .log_format .parse() .context("Invalid log format")?; - let mut builder = vlog::ObservabilityBuilder::new().with_log_format(log_format); + let mut builder = zksync_vlog::ObservabilityBuilder::new().with_log_format(log_format); if let Some(sentry_url) = &observability_config.sentry_url { builder = builder .with_sentry_url(sentry_url) @@ -111,28 +110,30 @@ async fn main() -> anyhow::Result<()> { let started_at = Instant::now(); let use_push_gateway = opt.batch_size.is_some(); + let prover_config = general_config.prover_config.context("prover config")?; let object_store_config = ProverObjectStoreConfig( - general_config - .prover_config - .context("prover config")? + prover_config .prover_object_store - .context("object store")?, + .context("object store")? + .clone(), ); let store_factory = ObjectStoreFactory::new(object_store_config.0); let config = general_config .witness_generator .context("witness generator config")?; - let prometheus_config = general_config - .prometheus_config - .context("prometheus config")?; - let postgres_config = general_config.postgres_config.context("postgres config")?; - let connection_pool = ConnectionPool::::builder( - database_secrets.master_url()?, - postgres_config.max_connections()?, - ) - .build() - .await - .context("failed to build a connection_pool")?; + + let prometheus_config = general_config.prometheus_config; + + // If the prometheus listener port is not set in the witness generator config, use the one from the prometheus config. + let prometheus_listener_port = if let Some(port) = config.prometheus_listener_port { + port + } else { + prometheus_config + .clone() + .context("prometheus config")? + .listener_port + }; + let prover_connection_pool = ConnectionPool::::singleton(database_secrets.prover_url()?) .build() @@ -190,19 +191,25 @@ async fn main() -> anyhow::Result<()> { ); let prometheus_config = if use_push_gateway { + let prometheus_config = prometheus_config + .clone() + .context("prometheus config needed when use_push_gateway enabled")?; PrometheusExporterConfig::push( - prometheus_config.gateway_endpoint(), + prometheus_config + .gateway_endpoint() + .context("gateway_endpoint needed when use_push_gateway enabled")?, prometheus_config.push_interval(), ) } else { // `u16` cast is safe since i is in range [0, 4) - PrometheusExporterConfig::pull(prometheus_config.listener_port + i as u16) + PrometheusExporterConfig::pull(prometheus_listener_port + i as u16) }; let prometheus_task = prometheus_config.run(stop_receiver.clone()); let witness_generator_task = match round { AggregationRound::BasicCircuits => { - let vk_commitments = get_cached_commitments(); + let setup_data_path = prover_config.setup_data_path.clone(); + let vk_commitments = get_cached_commitments(Some(setup_data_path)); assert_eq!( vk_commitments, vk_commitments_in_db, @@ -213,8 +220,10 @@ async fn main() -> anyhow::Result<()> { false => None, true => Some( ObjectStoreFactory::new( - ObjectStoreConfig::from_env() - .context("ObjectStoreConfig::from_env()")?, + prover_config + .public_object_store + .clone() + .expect("public_object_store"), ) .create_store() .await?, @@ -224,7 +233,6 @@ async fn main() -> anyhow::Result<()> { config.clone(), store_factory.create_store().await?, public_blob_store, - connection_pool.clone(), prover_connection_pool.clone(), protocol_version, ); diff --git a/prover/witness_generator/src/metrics.rs b/prover/crates/bin/witness_generator/src/metrics.rs similarity index 100% rename from prover/witness_generator/src/metrics.rs rename to prover/crates/bin/witness_generator/src/metrics.rs diff --git a/prover/witness_generator/src/node_aggregation.rs b/prover/crates/bin/witness_generator/src/node_aggregation.rs similarity index 99% rename from prover/witness_generator/src/node_aggregation.rs rename to prover/crates/bin/witness_generator/src/node_aggregation.rs index 209ae5ef7749..36b13d4357a9 100644 --- a/prover/witness_generator/src/node_aggregation.rs +++ b/prover/crates/bin/witness_generator/src/node_aggregation.rs @@ -2,13 +2,12 @@ use std::{sync::Arc, time::Instant}; use anyhow::Context as _; use async_trait::async_trait; -use prover_dal::{Prover, ProverDal}; use zkevm_test_harness::witness::recursive_aggregation::{ compute_node_vk_commitment, create_node_witnesses, }; use zksync_config::configs::FriWitnessGeneratorConfig; -use zksync_dal::ConnectionPool; use zksync_object_store::ObjectStore; +use zksync_prover_dal::{ConnectionPool, Prover, ProverDal}; use zksync_prover_fri_types::{ circuit_definitions::{ boojum::field::goldilocks::GoldilocksField, diff --git a/prover/witness_generator/src/precalculated_merkle_paths_provider.rs b/prover/crates/bin/witness_generator/src/precalculated_merkle_paths_provider.rs similarity index 96% rename from prover/witness_generator/src/precalculated_merkle_paths_provider.rs rename to prover/crates/bin/witness_generator/src/precalculated_merkle_paths_provider.rs index 2cfadc93fc6a..15f4fd68408b 100644 --- a/prover/witness_generator/src/precalculated_merkle_paths_provider.rs +++ b/prover/crates/bin/witness_generator/src/precalculated_merkle_paths_provider.rs @@ -1,9 +1,11 @@ use serde::{Deserialize, Serialize}; -use zk_evm::blake2::Blake2s256; -use zkevm_test_harness::witness::tree::{ - BinaryHasher, BinarySparseStorageTree, EnumeratedBinaryLeaf, LeafQuery, ZkSyncStorageLeaf, +use zkevm_test_harness::{ + witness::tree::{ + BinaryHasher, BinarySparseStorageTree, EnumeratedBinaryLeaf, LeafQuery, ZkSyncStorageLeaf, + }, + zk_evm::blake2::Blake2s256, }; -use zksync_prover_interface::inputs::{PrepareBasicCircuitsJob, StorageLogMetadata}; +use zksync_prover_interface::inputs::{StorageLogMetadata, WitnessInputMerklePaths}; #[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] pub struct PrecalculatedMerklePathsProvider { @@ -19,7 +21,7 @@ pub struct PrecalculatedMerklePathsProvider { } impl PrecalculatedMerklePathsProvider { - pub fn new(input: PrepareBasicCircuitsJob, root_hash: [u8; 32]) -> Self { + pub fn new(input: WitnessInputMerklePaths, root_hash: [u8; 32]) -> Self { let next_enumeration_index = input.next_enumeration_index(); tracing::debug!("Initializing PrecalculatedMerklePathsProvider. Initial root_hash: {:?}, initial next_enumeration_index: {:?}", root_hash, next_enumeration_index); Self { diff --git a/prover/witness_generator/src/recursion_tip.rs b/prover/crates/bin/witness_generator/src/recursion_tip.rs similarity index 99% rename from prover/witness_generator/src/recursion_tip.rs rename to prover/crates/bin/witness_generator/src/recursion_tip.rs index a44661d55aaf..2f55621fecaf 100644 --- a/prover/witness_generator/src/recursion_tip.rs +++ b/prover/crates/bin/witness_generator/src/recursion_tip.rs @@ -9,7 +9,6 @@ use circuit_definitions::{ }, recursion_layer_proof_config, }; -use prover_dal::{Prover, ProverDal}; use zkevm_test_harness::{ boojum::{ field::{ @@ -37,8 +36,8 @@ use zkevm_test_harness::{ }, }; use zksync_config::configs::FriWitnessGeneratorConfig; -use zksync_dal::ConnectionPool; use zksync_object_store::ObjectStore; +use zksync_prover_dal::{ConnectionPool, Prover, ProverDal}; use zksync_prover_fri_types::{ get_current_pod_name, keys::{ClosedFormInputKey, FriCircuitKey}, diff --git a/prover/witness_generator/src/scheduler.rs b/prover/crates/bin/witness_generator/src/scheduler.rs similarity index 99% rename from prover/witness_generator/src/scheduler.rs rename to prover/crates/bin/witness_generator/src/scheduler.rs index 8585c0c2f2b4..80c4322e644e 100644 --- a/prover/witness_generator/src/scheduler.rs +++ b/prover/crates/bin/witness_generator/src/scheduler.rs @@ -2,13 +2,12 @@ use std::{convert::TryInto, sync::Arc, time::Instant}; use anyhow::Context as _; use async_trait::async_trait; -use prover_dal::{Prover, ProverDal}; use zkevm_test_harness::zkevm_circuits::recursion::{ leaf_layer::input::RecursionLeafParametersWitness, NUM_BASE_LAYER_CIRCUITS, }; use zksync_config::configs::FriWitnessGeneratorConfig; -use zksync_dal::ConnectionPool; use zksync_object_store::ObjectStore; +use zksync_prover_dal::{ConnectionPool, Prover, ProverDal}; use zksync_prover_fri_types::{ circuit_definitions::{ boojum::{ diff --git a/prover/witness_generator/src/storage_oracle.rs b/prover/crates/bin/witness_generator/src/storage_oracle.rs similarity index 100% rename from prover/witness_generator/src/storage_oracle.rs rename to prover/crates/bin/witness_generator/src/storage_oracle.rs diff --git a/prover/witness_generator/src/tests.rs b/prover/crates/bin/witness_generator/src/tests.rs similarity index 98% rename from prover/witness_generator/src/tests.rs rename to prover/crates/bin/witness_generator/src/tests.rs index 5163368d66d2..d6b00d2ccb4b 100644 --- a/prover/witness_generator/src/tests.rs +++ b/prover/crates/bin/witness_generator/src/tests.rs @@ -5,7 +5,7 @@ use zkevm_test_harness::{ kzg::KzgSettings, witness::tree::{BinarySparseStorageTree, ZkSyncStorageLeaf}, }; -use zksync_prover_interface::inputs::{PrepareBasicCircuitsJob, StorageLogMetadata}; +use zksync_prover_interface::inputs::{StorageLogMetadata, WitnessInputMerklePaths}; use zksync_types::U256; use super::precalculated_merkle_paths_provider::PrecalculatedMerklePathsProvider; @@ -81,7 +81,7 @@ const fn generate_storage_log_metadata( } fn create_provider() -> PrecalculatedMerklePathsProvider { - let mut job = PrepareBasicCircuitsJob::new(4); + let mut job = WitnessInputMerklePaths::new(4); for (mut log, merkle_path) in LOGS_AND_PATHS { log.merkle_paths = vec![merkle_path]; job.push_merkle_path(log); diff --git a/prover/witness_generator/src/trusted_setup.json b/prover/crates/bin/witness_generator/src/trusted_setup.json similarity index 100% rename from prover/witness_generator/src/trusted_setup.json rename to prover/crates/bin/witness_generator/src/trusted_setup.json diff --git a/prover/witness_generator/src/utils.rs b/prover/crates/bin/witness_generator/src/utils.rs similarity index 99% rename from prover/witness_generator/src/utils.rs rename to prover/crates/bin/witness_generator/src/utils.rs index ae8a33519806..a1046f258fc1 100644 --- a/prover/witness_generator/src/utils.rs +++ b/prover/crates/bin/witness_generator/src/utils.rs @@ -7,12 +7,12 @@ use circuit_definitions::circuit_definitions::{ base_layer::ZkSyncBaseLayerCircuit, recursion_layer::{ZkSyncRecursionLayerStorageType, ZkSyncRecursionProof}, }; -use multivm::utils::get_used_bootloader_memory_bytes; use once_cell::sync::Lazy; use zkevm_test_harness::{ boojum::field::goldilocks::GoldilocksField, empty_node_proof, zkevm_circuits::scheduler::aux::BaseLayerCircuitType, }; +use zksync_multivm::utils::get_used_bootloader_memory_bytes; use zksync_object_store::{serialize_using_bincode, Bucket, ObjectStore, StoredObject}; use zksync_prover_fri_types::{ circuit_definitions::{ diff --git a/prover/witness_generator/tests/basic_test.rs b/prover/crates/bin/witness_generator/tests/basic_test.rs similarity index 100% rename from prover/witness_generator/tests/basic_test.rs rename to prover/crates/bin/witness_generator/tests/basic_test.rs diff --git a/prover/witness_generator/tests/data/leaf/leaf_aggregation_witness_jobs_fri/closed_form_inputs_125010_4.bin b/prover/crates/bin/witness_generator/tests/data/leaf/leaf_aggregation_witness_jobs_fri/closed_form_inputs_125010_4.bin similarity index 100% rename from prover/witness_generator/tests/data/leaf/leaf_aggregation_witness_jobs_fri/closed_form_inputs_125010_4.bin rename to prover/crates/bin/witness_generator/tests/data/leaf/leaf_aggregation_witness_jobs_fri/closed_form_inputs_125010_4.bin diff --git a/prover/witness_generator/tests/data/leaf/node_aggregation_witness_jobs_fri/aggregations_125010_6_0.bin b/prover/crates/bin/witness_generator/tests/data/leaf/node_aggregation_witness_jobs_fri/aggregations_125010_6_0.bin similarity index 100% rename from prover/witness_generator/tests/data/leaf/node_aggregation_witness_jobs_fri/aggregations_125010_6_0.bin rename to prover/crates/bin/witness_generator/tests/data/leaf/node_aggregation_witness_jobs_fri/aggregations_125010_6_0.bin diff --git a/prover/witness_generator/tests/data/leaf/proofs_fri/proof_4639043.bin b/prover/crates/bin/witness_generator/tests/data/leaf/proofs_fri/proof_4639043.bin similarity index 100% rename from prover/witness_generator/tests/data/leaf/proofs_fri/proof_4639043.bin rename to prover/crates/bin/witness_generator/tests/data/leaf/proofs_fri/proof_4639043.bin diff --git a/prover/witness_generator/tests/data/leaf/proofs_fri/proof_4639044.bin b/prover/crates/bin/witness_generator/tests/data/leaf/proofs_fri/proof_4639044.bin similarity index 100% rename from prover/witness_generator/tests/data/leaf/proofs_fri/proof_4639044.bin rename to prover/crates/bin/witness_generator/tests/data/leaf/proofs_fri/proof_4639044.bin diff --git a/prover/witness_generator/tests/data/leaf/proofs_fri/proof_4639045.bin b/prover/crates/bin/witness_generator/tests/data/leaf/proofs_fri/proof_4639045.bin similarity index 100% rename from prover/witness_generator/tests/data/leaf/proofs_fri/proof_4639045.bin rename to prover/crates/bin/witness_generator/tests/data/leaf/proofs_fri/proof_4639045.bin diff --git a/prover/witness_generator/tests/data/node/node_aggregation_witness_jobs_fri/aggregations_127856_8_0.bin b/prover/crates/bin/witness_generator/tests/data/node/node_aggregation_witness_jobs_fri/aggregations_127856_8_0.bin similarity index 100% rename from prover/witness_generator/tests/data/node/node_aggregation_witness_jobs_fri/aggregations_127856_8_0.bin rename to prover/crates/bin/witness_generator/tests/data/node/node_aggregation_witness_jobs_fri/aggregations_127856_8_0.bin diff --git a/prover/witness_generator/tests/data/node/node_aggregation_witness_jobs_fri/aggregations_127856_8_1.bin b/prover/crates/bin/witness_generator/tests/data/node/node_aggregation_witness_jobs_fri/aggregations_127856_8_1.bin similarity index 100% rename from prover/witness_generator/tests/data/node/node_aggregation_witness_jobs_fri/aggregations_127856_8_1.bin rename to prover/crates/bin/witness_generator/tests/data/node/node_aggregation_witness_jobs_fri/aggregations_127856_8_1.bin diff --git a/prover/witness_generator/tests/data/node/proofs_fri/proof_5211320.bin b/prover/crates/bin/witness_generator/tests/data/node/proofs_fri/proof_5211320.bin similarity index 100% rename from prover/witness_generator/tests/data/node/proofs_fri/proof_5211320.bin rename to prover/crates/bin/witness_generator/tests/data/node/proofs_fri/proof_5211320.bin diff --git a/prover/witness_generator/tests/data/scheduler/proofs_fri/proof_5627082.bin b/prover/crates/bin/witness_generator/tests/data/scheduler/proofs_fri/proof_5627082.bin similarity index 100% rename from prover/witness_generator/tests/data/scheduler/proofs_fri/proof_5627082.bin rename to prover/crates/bin/witness_generator/tests/data/scheduler/proofs_fri/proof_5627082.bin diff --git a/prover/witness_generator/tests/data/scheduler/proofs_fri/proof_5627083.bin b/prover/crates/bin/witness_generator/tests/data/scheduler/proofs_fri/proof_5627083.bin similarity index 100% rename from prover/witness_generator/tests/data/scheduler/proofs_fri/proof_5627083.bin rename to prover/crates/bin/witness_generator/tests/data/scheduler/proofs_fri/proof_5627083.bin diff --git a/prover/witness_generator/tests/data/scheduler/proofs_fri/proof_5627084.bin b/prover/crates/bin/witness_generator/tests/data/scheduler/proofs_fri/proof_5627084.bin similarity index 100% rename from prover/witness_generator/tests/data/scheduler/proofs_fri/proof_5627084.bin rename to prover/crates/bin/witness_generator/tests/data/scheduler/proofs_fri/proof_5627084.bin diff --git a/prover/witness_generator/tests/data/scheduler/proofs_fri/proof_5627085.bin b/prover/crates/bin/witness_generator/tests/data/scheduler/proofs_fri/proof_5627085.bin similarity index 100% rename from prover/witness_generator/tests/data/scheduler/proofs_fri/proof_5627085.bin rename to prover/crates/bin/witness_generator/tests/data/scheduler/proofs_fri/proof_5627085.bin diff --git a/prover/witness_generator/tests/data/scheduler/proofs_fri/proof_5627086.bin b/prover/crates/bin/witness_generator/tests/data/scheduler/proofs_fri/proof_5627086.bin similarity index 100% rename from prover/witness_generator/tests/data/scheduler/proofs_fri/proof_5627086.bin rename to prover/crates/bin/witness_generator/tests/data/scheduler/proofs_fri/proof_5627086.bin diff --git a/prover/witness_generator/tests/data/scheduler/proofs_fri/proof_5627090.bin b/prover/crates/bin/witness_generator/tests/data/scheduler/proofs_fri/proof_5627090.bin similarity index 100% rename from prover/witness_generator/tests/data/scheduler/proofs_fri/proof_5627090.bin rename to prover/crates/bin/witness_generator/tests/data/scheduler/proofs_fri/proof_5627090.bin diff --git a/prover/witness_generator/tests/data/scheduler/proofs_fri/proof_5627091.bin b/prover/crates/bin/witness_generator/tests/data/scheduler/proofs_fri/proof_5627091.bin similarity index 100% rename from prover/witness_generator/tests/data/scheduler/proofs_fri/proof_5627091.bin rename to prover/crates/bin/witness_generator/tests/data/scheduler/proofs_fri/proof_5627091.bin diff --git a/prover/witness_generator/tests/data/scheduler/proofs_fri/proof_5627092.bin b/prover/crates/bin/witness_generator/tests/data/scheduler/proofs_fri/proof_5627092.bin similarity index 100% rename from prover/witness_generator/tests/data/scheduler/proofs_fri/proof_5627092.bin rename to prover/crates/bin/witness_generator/tests/data/scheduler/proofs_fri/proof_5627092.bin diff --git a/prover/witness_generator/tests/data/scheduler/proofs_fri/proof_5627093.bin b/prover/crates/bin/witness_generator/tests/data/scheduler/proofs_fri/proof_5627093.bin similarity index 100% rename from prover/witness_generator/tests/data/scheduler/proofs_fri/proof_5627093.bin rename to prover/crates/bin/witness_generator/tests/data/scheduler/proofs_fri/proof_5627093.bin diff --git a/prover/witness_generator/tests/data/scheduler/proofs_fri/proof_5627094.bin b/prover/crates/bin/witness_generator/tests/data/scheduler/proofs_fri/proof_5627094.bin similarity index 100% rename from prover/witness_generator/tests/data/scheduler/proofs_fri/proof_5627094.bin rename to prover/crates/bin/witness_generator/tests/data/scheduler/proofs_fri/proof_5627094.bin diff --git a/prover/witness_generator/tests/data/scheduler/proofs_fri/proof_5629097.bin b/prover/crates/bin/witness_generator/tests/data/scheduler/proofs_fri/proof_5629097.bin similarity index 100% rename from prover/witness_generator/tests/data/scheduler/proofs_fri/proof_5629097.bin rename to prover/crates/bin/witness_generator/tests/data/scheduler/proofs_fri/proof_5629097.bin diff --git a/prover/witness_generator/tests/data/scheduler/proofs_fri/proof_5631320.bin b/prover/crates/bin/witness_generator/tests/data/scheduler/proofs_fri/proof_5631320.bin similarity index 100% rename from prover/witness_generator/tests/data/scheduler/proofs_fri/proof_5631320.bin rename to prover/crates/bin/witness_generator/tests/data/scheduler/proofs_fri/proof_5631320.bin diff --git a/prover/witness_generator/tests/data/scheduler/proofs_fri/proof_5639969.bin b/prover/crates/bin/witness_generator/tests/data/scheduler/proofs_fri/proof_5639969.bin similarity index 100% rename from prover/witness_generator/tests/data/scheduler/proofs_fri/proof_5639969.bin rename to prover/crates/bin/witness_generator/tests/data/scheduler/proofs_fri/proof_5639969.bin diff --git a/prover/witness_generator/tests/data/scheduler/prover_jobs_fri/128599_0_1_Scheduler_0.bin b/prover/crates/bin/witness_generator/tests/data/scheduler/prover_jobs_fri/128599_0_1_Scheduler_0.bin similarity index 100% rename from prover/witness_generator/tests/data/scheduler/prover_jobs_fri/128599_0_1_Scheduler_0.bin rename to prover/crates/bin/witness_generator/tests/data/scheduler/prover_jobs_fri/128599_0_1_Scheduler_0.bin diff --git a/prover/witness_generator/tests/data/scheduler/scheduler_witness_jobs_fri/scheduler_witness_128599.bin b/prover/crates/bin/witness_generator/tests/data/scheduler/scheduler_witness_jobs_fri/scheduler_witness_128599.bin similarity index 100% rename from prover/witness_generator/tests/data/scheduler/scheduler_witness_jobs_fri/scheduler_witness_128599.bin rename to prover/crates/bin/witness_generator/tests/data/scheduler/scheduler_witness_jobs_fri/scheduler_witness_128599.bin diff --git a/prover/witness_vector_generator/Cargo.toml b/prover/crates/bin/witness_vector_generator/Cargo.toml similarity index 77% rename from prover/witness_vector_generator/Cargo.toml rename to prover/crates/bin/witness_vector_generator/Cargo.toml index 0bd23270cf6e..e8edecdf87b3 100644 --- a/prover/witness_vector_generator/Cargo.toml +++ b/prover/crates/bin/witness_vector_generator/Cargo.toml @@ -12,25 +12,22 @@ categories.workspace = true [dependencies] vise.workspace = true zksync_types.workspace = true -prover_dal.workspace = true +zksync_prover_dal.workspace = true zksync_config.workspace = true zksync_env_config.workspace = true zksync_object_store.workspace = true zksync_prover_fri_utils.workspace = true zksync_utils.workspace = true -prometheus_exporter.workspace = true zksync_prover_fri_types.workspace = true +zksync_core_leftovers.workspace = true zksync_queued_job_processor.workspace = true +zksync_vlog.workspace = true vk_setup_data_generator_server_fri.workspace = true -vlog.workspace = true anyhow.workspace = true tracing.workspace = true -structopt.workspace = true tokio = { workspace = true, features = ["time", "macros"] } -futures = { workspace = true, features = ["compat"] } ctrlc = { workspace = true, features = ["termination"] } -serde = { workspace = true, features = ["derive"] } +clap = { workspace = true, features = ["derive"] } async-trait.workspace = true -queues.workspace = true bincode.workspace = true diff --git a/prover/witness_vector_generator/README.md b/prover/crates/bin/witness_vector_generator/README.md similarity index 100% rename from prover/witness_vector_generator/README.md rename to prover/crates/bin/witness_vector_generator/README.md diff --git a/prover/witness_vector_generator/src/generator.rs b/prover/crates/bin/witness_vector_generator/src/generator.rs similarity index 92% rename from prover/witness_vector_generator/src/generator.rs rename to prover/crates/bin/witness_vector_generator/src/generator.rs index bc03593e0bfb..5574f0f1578d 100644 --- a/prover/witness_vector_generator/src/generator.rs +++ b/prover/crates/bin/witness_vector_generator/src/generator.rs @@ -6,16 +6,16 @@ use std::{ use anyhow::Context as _; use async_trait::async_trait; -use prover_dal::{ConnectionPool, Prover, ProverDal}; use tokio::{task::JoinHandle, time::sleep}; use zksync_config::configs::FriWitnessVectorGeneratorConfig; use zksync_object_store::ObjectStore; +use zksync_prover_dal::{ConnectionPool, Prover, ProverDal}; use zksync_prover_fri_types::{ circuit_definitions::boojum::field::goldilocks::GoldilocksField, CircuitWrapper, ProverJob, WitnessVectorArtifacts, }; use zksync_prover_fri_utils::{ - fetch_next_circuit, get_numeric_circuit_id, socket_utils::send_assembly, + fetch_next_circuit, get_numeric_circuit_id, region_fetcher::Zone, socket_utils::send_assembly, }; use zksync_queued_job_processor::JobProcessor; use zksync_types::{ @@ -30,21 +30,24 @@ pub struct WitnessVectorGenerator { object_store: Arc, pool: ConnectionPool, circuit_ids_for_round_to_be_proven: Vec, - zone: String, + zone: Zone, config: FriWitnessVectorGeneratorConfig, protocol_version: ProtocolSemanticVersion, max_attempts: u32, + setup_data_path: Option, } impl WitnessVectorGenerator { + #[allow(clippy::too_many_arguments)] pub fn new( object_store: Arc, prover_connection_pool: ConnectionPool, circuit_ids_for_round_to_be_proven: Vec, - zone: String, + zone: Zone, config: FriWitnessVectorGeneratorConfig, protocol_version: ProtocolSemanticVersion, max_attempts: u32, + setup_data_path: Option, ) -> Self { Self { object_store, @@ -54,6 +57,7 @@ impl WitnessVectorGenerator { config, protocol_version, max_attempts, + setup_data_path, } } @@ -116,10 +120,17 @@ impl JobProcessor for WitnessVectorGenerator { job: ProverJob, _started_at: Instant, ) -> JoinHandle> { + let setup_data_path = self.setup_data_path.clone(); + tokio::task::spawn_blocking(move || { let block_number = job.block_number; let _span = tracing::info_span!("witness_vector_generator", %block_number).entered(); - Self::generate_witness_vector(job, &Keystore::default()) + let keystore = if let Some(setup_data_path) = setup_data_path { + Keystore::new_with_setup_data_path(setup_data_path) + } else { + Keystore::default() + }; + Self::generate_witness_vector(job, &keystore) }) } @@ -156,7 +167,7 @@ impl JobProcessor for WitnessVectorGenerator { .lock_available_prover( self.config.max_prover_reservation_duration(), self.config.specialized_group_id, - self.zone.clone(), + self.zone.to_string(), self.protocol_version, ) .await; @@ -168,7 +179,8 @@ impl JobProcessor for WitnessVectorGenerator { now.elapsed() ); let result = send_assembly(job_id, &serialized, &address); - handle_send_result(&result, job_id, &address, &self.pool, self.zone.clone()).await; + handle_send_result(&result, job_id, &address, &self.pool, self.zone.to_string()) + .await; if result.is_ok() { METRICS.prover_waiting_time[&circuit_type].observe(now.elapsed()); diff --git a/prover/witness_vector_generator/src/lib.rs b/prover/crates/bin/witness_vector_generator/src/lib.rs similarity index 50% rename from prover/witness_vector_generator/src/lib.rs rename to prover/crates/bin/witness_vector_generator/src/lib.rs index d9d47d54897c..038b5f505b1d 100644 --- a/prover/witness_vector_generator/src/lib.rs +++ b/prover/crates/bin/witness_vector_generator/src/lib.rs @@ -1,3 +1,4 @@ +#![allow(incomplete_features)] // We have to use generic const exprs. #![feature(generic_const_exprs)] pub mod generator; diff --git a/prover/witness_vector_generator/src/main.rs b/prover/crates/bin/witness_vector_generator/src/main.rs similarity index 62% rename from prover/witness_vector_generator/src/main.rs rename to prover/crates/bin/witness_vector_generator/src/main.rs index b319c80e4810..58db6d6d5eb4 100644 --- a/prover/witness_vector_generator/src/main.rs +++ b/prover/crates/bin/witness_vector_generator/src/main.rs @@ -1,49 +1,55 @@ +#![allow(incomplete_features)] // We have to use generic const exprs. #![feature(generic_const_exprs)] use std::time::Duration; use anyhow::Context as _; -use prometheus_exporter::PrometheusExporterConfig; -use prover_dal::ConnectionPool; -use structopt::StructOpt; +use clap::Parser; use tokio::sync::{oneshot, watch}; -use zksync_config::configs::{ - fri_prover_group::FriProverGroupConfig, DatabaseSecrets, FriProverConfig, - FriWitnessVectorGeneratorConfig, ObservabilityConfig, -}; -use zksync_env_config::{object_store::ProverObjectStoreConfig, FromEnv}; +use zksync_core_leftovers::temp_config_store::{load_database_secrets, load_general_config}; +use zksync_env_config::object_store::ProverObjectStoreConfig; use zksync_object_store::ObjectStoreFactory; +use zksync_prover_dal::ConnectionPool; use zksync_prover_fri_types::PROVER_PROTOCOL_SEMANTIC_VERSION; -use zksync_prover_fri_utils::{get_all_circuit_id_round_tuples_for, region_fetcher::get_zone}; +use zksync_prover_fri_utils::{get_all_circuit_id_round_tuples_for, region_fetcher::RegionFetcher}; use zksync_queued_job_processor::JobProcessor; use zksync_utils::wait_for_tasks::ManagedTasks; +use zksync_vlog::prometheus::PrometheusExporterConfig; use crate::generator::WitnessVectorGenerator; mod generator; mod metrics; -#[derive(Debug, StructOpt)] -#[structopt( - name = "zksync_witness_vector_generator", - about = "Tool for generating witness vectors for circuits" -)] -struct Opt { +#[derive(Debug, Parser)] +#[command(author = "Matter Labs", version)] +struct Cli { /// Number of times `witness_vector_generator` should be run. - #[structopt(short = "n", long = "n_iterations")] - number_of_iterations: Option, + #[arg(long)] + #[arg(short)] + n_iterations: Option, + #[arg(long)] + pub(crate) config_path: Option, + #[arg(long)] + pub(crate) secrets_path: Option, } #[tokio::main] async fn main() -> anyhow::Result<()> { - let observability_config = - ObservabilityConfig::from_env().context("ObservabilityConfig::from_env()")?; - let log_format: vlog::LogFormat = observability_config + let opt = Cli::parse(); + + let general_config = load_general_config(opt.config_path).context("general config")?; + let database_secrets = load_database_secrets(opt.secrets_path).context("database secrets")?; + + let observability_config = general_config + .observability + .context("observability config")?; + let log_format: zksync_vlog::LogFormat = observability_config .log_format .parse() .context("Invalid log format")?; - let mut builder = vlog::ObservabilityBuilder::new().with_log_format(log_format); + let mut builder = zksync_vlog::ObservabilityBuilder::new().with_log_format(log_format); if let Some(sentry_url) = &observability_config.sentry_url { builder = builder .with_sentry_url(sentry_url) @@ -61,31 +67,42 @@ async fn main() -> anyhow::Result<()> { } let _guard = builder.build(); - let opt = Opt::from_args(); - let config = FriWitnessVectorGeneratorConfig::from_env() - .context("FriWitnessVectorGeneratorConfig::from_env()")?; + let config = general_config + .witness_vector_generator + .context("witness vector generator config")?; let specialized_group_id = config.specialized_group_id; let exporter_config = PrometheusExporterConfig::pull(config.prometheus_listener_port); - let database_secrets = DatabaseSecrets::from_env().context("DatabaseSecrets::from_env()")?; let pool = ConnectionPool::singleton(database_secrets.prover_url()?) .build() .await .context("failed to build a connection pool")?; - let object_store_config = - ProverObjectStoreConfig::from_env().context("ProverObjectStoreConfig::from_env()")?; + let object_store_config = ProverObjectStoreConfig( + general_config + .prover_config + .clone() + .context("prover config")? + .prover_object_store + .context("object store")?, + ); let object_store = ObjectStoreFactory::new(object_store_config.0) .create_store() .await?; - let circuit_ids_for_round_to_be_proven = FriProverGroupConfig::from_env() - .context("FriProverGroupConfig::from_env()")? + let circuit_ids_for_round_to_be_proven = general_config + .prover_group_config + .expect("prover_group_config") .get_circuit_ids_for_group_id(specialized_group_id) .unwrap_or_default(); let circuit_ids_for_round_to_be_proven = get_all_circuit_id_round_tuples_for(circuit_ids_for_round_to_be_proven); - let fri_prover_config = FriProverConfig::from_env().context("FriProverConfig::from_env()")?; - let zone_url = &fri_prover_config.zone_read_url; - let zone = get_zone(zone_url).await.context("get_zone()")?; + let prover_config = general_config.prover_config.context("prover config")?; + let zone = RegionFetcher::new( + prover_config.cloud_type, + prover_config.zone_read_url.clone(), + ) + .get_zone() + .await + .context("get_zone()")?; let protocol_version = PROVER_PROTOCOL_SEMANTIC_VERSION; @@ -96,7 +113,8 @@ async fn main() -> anyhow::Result<()> { zone.clone(), config, protocol_version, - fri_prover_config.max_attempts, + prover_config.max_attempts, + Some(prover_config.setup_data_path.clone()), ); let (stop_sender, stop_receiver) = watch::channel(false); @@ -114,7 +132,7 @@ async fn main() -> anyhow::Result<()> { let tasks = vec![ tokio::spawn(exporter_config.run(stop_receiver.clone())), - tokio::spawn(witness_vector_generator.run(stop_receiver, opt.number_of_iterations)), + tokio::spawn(witness_vector_generator.run(stop_receiver, opt.n_iterations)), ]; let mut tasks = ManagedTasks::new(tasks); diff --git a/prover/witness_vector_generator/src/metrics.rs b/prover/crates/bin/witness_vector_generator/src/metrics.rs similarity index 100% rename from prover/witness_vector_generator/src/metrics.rs rename to prover/crates/bin/witness_vector_generator/src/metrics.rs diff --git a/prover/witness_vector_generator/tests/basic_test.rs b/prover/crates/bin/witness_vector_generator/tests/basic_test.rs similarity index 100% rename from prover/witness_vector_generator/tests/basic_test.rs rename to prover/crates/bin/witness_vector_generator/tests/basic_test.rs diff --git a/prover/witness_vector_generator/tests/data/base_layer_main_vm.bin b/prover/crates/bin/witness_vector_generator/tests/data/base_layer_main_vm.bin similarity index 100% rename from prover/witness_vector_generator/tests/data/base_layer_main_vm.bin rename to prover/crates/bin/witness_vector_generator/tests/data/base_layer_main_vm.bin diff --git a/prover/prover_dal/.sqlx/query-00b88ec7fcf40bb18e0018b7c76f6e1df560ab1e8935564355236e90b6147d2f.json b/prover/crates/lib/prover_dal/.sqlx/query-00b88ec7fcf40bb18e0018b7c76f6e1df560ab1e8935564355236e90b6147d2f.json similarity index 100% rename from prover/prover_dal/.sqlx/query-00b88ec7fcf40bb18e0018b7c76f6e1df560ab1e8935564355236e90b6147d2f.json rename to prover/crates/lib/prover_dal/.sqlx/query-00b88ec7fcf40bb18e0018b7c76f6e1df560ab1e8935564355236e90b6147d2f.json diff --git a/prover/prover_dal/.sqlx/query-02f2010c60dfa5b93d3f2ee7594579b23540815afa1c6a8d4c36bba951861fe7.json b/prover/crates/lib/prover_dal/.sqlx/query-02f2010c60dfa5b93d3f2ee7594579b23540815afa1c6a8d4c36bba951861fe7.json similarity index 100% rename from prover/prover_dal/.sqlx/query-02f2010c60dfa5b93d3f2ee7594579b23540815afa1c6a8d4c36bba951861fe7.json rename to prover/crates/lib/prover_dal/.sqlx/query-02f2010c60dfa5b93d3f2ee7594579b23540815afa1c6a8d4c36bba951861fe7.json diff --git a/prover/prover_dal/.sqlx/query-069f04bdfafbe2e3628ac3ded93dab9b63eee7f21c450a723e4ba011edc8e2bb.json b/prover/crates/lib/prover_dal/.sqlx/query-069f04bdfafbe2e3628ac3ded93dab9b63eee7f21c450a723e4ba011edc8e2bb.json similarity index 100% rename from prover/prover_dal/.sqlx/query-069f04bdfafbe2e3628ac3ded93dab9b63eee7f21c450a723e4ba011edc8e2bb.json rename to prover/crates/lib/prover_dal/.sqlx/query-069f04bdfafbe2e3628ac3ded93dab9b63eee7f21c450a723e4ba011edc8e2bb.json diff --git a/prover/crates/lib/prover_dal/.sqlx/query-081e2b928f0816c41d6645c1dedbb3402044d201e85e114ff4582394c32bd2bf.json b/prover/crates/lib/prover_dal/.sqlx/query-081e2b928f0816c41d6645c1dedbb3402044d201e85e114ff4582394c32bd2bf.json new file mode 100644 index 000000000000..918fb2817d26 --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-081e2b928f0816c41d6645c1dedbb3402044d201e85e114ff4582394c32bd2bf.json @@ -0,0 +1,34 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n comp.l1_batch_number,\n (comp.updated_at - wit.created_at) AS time_taken,\n wit.created_at\n FROM\n proof_compression_jobs_fri AS comp\n JOIN witness_inputs_fri AS wit ON comp.l1_batch_number = wit.l1_batch_number\n WHERE\n wit.created_at > $1\n ORDER BY\n time_taken DESC;\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "l1_batch_number", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "time_taken", + "type_info": "Interval" + }, + { + "ordinal": 2, + "name": "created_at", + "type_info": "Timestamp" + } + ], + "parameters": { + "Left": [ + "Timestamp" + ] + }, + "nullable": [ + false, + null, + false + ] + }, + "hash": "081e2b928f0816c41d6645c1dedbb3402044d201e85e114ff4582394c32bd2bf" +} diff --git a/prover/prover_dal/.sqlx/query-0a1ec4690d6b4a67d6ad16badcbf113a19feb73c4cf9876855523499998b99c0.json b/prover/crates/lib/prover_dal/.sqlx/query-0a1ec4690d6b4a67d6ad16badcbf113a19feb73c4cf9876855523499998b99c0.json similarity index 100% rename from prover/prover_dal/.sqlx/query-0a1ec4690d6b4a67d6ad16badcbf113a19feb73c4cf9876855523499998b99c0.json rename to prover/crates/lib/prover_dal/.sqlx/query-0a1ec4690d6b4a67d6ad16badcbf113a19feb73c4cf9876855523499998b99c0.json diff --git a/prover/prover_dal/.sqlx/query-0b70c98c2edd8370ad09ac553c18dbc21cccb9a95e3db1c93da239845a5e9036.json b/prover/crates/lib/prover_dal/.sqlx/query-0b70c98c2edd8370ad09ac553c18dbc21cccb9a95e3db1c93da239845a5e9036.json similarity index 100% rename from prover/prover_dal/.sqlx/query-0b70c98c2edd8370ad09ac553c18dbc21cccb9a95e3db1c93da239845a5e9036.json rename to prover/crates/lib/prover_dal/.sqlx/query-0b70c98c2edd8370ad09ac553c18dbc21cccb9a95e3db1c93da239845a5e9036.json diff --git a/prover/prover_dal/.sqlx/query-16548daf69e9ff0528904be2e142254a457665179d9cf0a3c0b18c3fe09e4838.json b/prover/crates/lib/prover_dal/.sqlx/query-16548daf69e9ff0528904be2e142254a457665179d9cf0a3c0b18c3fe09e4838.json similarity index 100% rename from prover/prover_dal/.sqlx/query-16548daf69e9ff0528904be2e142254a457665179d9cf0a3c0b18c3fe09e4838.json rename to prover/crates/lib/prover_dal/.sqlx/query-16548daf69e9ff0528904be2e142254a457665179d9cf0a3c0b18c3fe09e4838.json diff --git a/prover/prover_dal/.sqlx/query-1849cfa3167eed2809e7724a63198f5e2450cc4faee2f80b37fbd5626324dbeb.json b/prover/crates/lib/prover_dal/.sqlx/query-1849cfa3167eed2809e7724a63198f5e2450cc4faee2f80b37fbd5626324dbeb.json similarity index 100% rename from prover/prover_dal/.sqlx/query-1849cfa3167eed2809e7724a63198f5e2450cc4faee2f80b37fbd5626324dbeb.json rename to prover/crates/lib/prover_dal/.sqlx/query-1849cfa3167eed2809e7724a63198f5e2450cc4faee2f80b37fbd5626324dbeb.json diff --git a/prover/prover_dal/.sqlx/query-2095e5646c382ccbc6e3bafdeddaae31358088e142dff51c9f0bde8f386900d3.json b/prover/crates/lib/prover_dal/.sqlx/query-2095e5646c382ccbc6e3bafdeddaae31358088e142dff51c9f0bde8f386900d3.json similarity index 100% rename from prover/prover_dal/.sqlx/query-2095e5646c382ccbc6e3bafdeddaae31358088e142dff51c9f0bde8f386900d3.json rename to prover/crates/lib/prover_dal/.sqlx/query-2095e5646c382ccbc6e3bafdeddaae31358088e142dff51c9f0bde8f386900d3.json diff --git a/prover/prover_dal/.sqlx/query-21621153e545859d71188e2421f5d2832571464e74b5fed92cf54617573c84ec.json b/prover/crates/lib/prover_dal/.sqlx/query-21621153e545859d71188e2421f5d2832571464e74b5fed92cf54617573c84ec.json similarity index 94% rename from prover/prover_dal/.sqlx/query-21621153e545859d71188e2421f5d2832571464e74b5fed92cf54617573c84ec.json rename to prover/crates/lib/prover_dal/.sqlx/query-21621153e545859d71188e2421f5d2832571464e74b5fed92cf54617573c84ec.json index e24d2c979a35..b5f056e1ecd9 100644 --- a/prover/prover_dal/.sqlx/query-21621153e545859d71188e2421f5d2832571464e74b5fed92cf54617573c84ec.json +++ b/prover/crates/lib/prover_dal/.sqlx/query-21621153e545859d71188e2421f5d2832571464e74b5fed92cf54617573c84ec.json @@ -60,26 +60,21 @@ }, { "ordinal": 11, - "name": "is_blob_cleaned", - "type_info": "Bool" - }, - { - "ordinal": 12, "name": "number_of_basic_circuits", "type_info": "Int4" }, { - "ordinal": 13, + "ordinal": 12, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 14, + "ordinal": 13, "name": "picked_by", "type_info": "Text" }, { - "ordinal": 15, + "ordinal": 14, "name": "protocol_version_patch", "type_info": "Int4" } @@ -104,7 +99,6 @@ true, true, true, - true, false ] }, diff --git a/prover/prover_dal/.sqlx/query-28397b5a0b7af832d2a4d3d7011a68a48db6a64afcd41bbe0e17d98fa38fdb19.json b/prover/crates/lib/prover_dal/.sqlx/query-28397b5a0b7af832d2a4d3d7011a68a48db6a64afcd41bbe0e17d98fa38fdb19.json similarity index 100% rename from prover/prover_dal/.sqlx/query-28397b5a0b7af832d2a4d3d7011a68a48db6a64afcd41bbe0e17d98fa38fdb19.json rename to prover/crates/lib/prover_dal/.sqlx/query-28397b5a0b7af832d2a4d3d7011a68a48db6a64afcd41bbe0e17d98fa38fdb19.json diff --git a/prover/prover_dal/.sqlx/query-285d0ff850fa5c9af36564fcb14dd8547a1ad20492ec37c3c0be5639e5d49952.json b/prover/crates/lib/prover_dal/.sqlx/query-285d0ff850fa5c9af36564fcb14dd8547a1ad20492ec37c3c0be5639e5d49952.json similarity index 100% rename from prover/prover_dal/.sqlx/query-285d0ff850fa5c9af36564fcb14dd8547a1ad20492ec37c3c0be5639e5d49952.json rename to prover/crates/lib/prover_dal/.sqlx/query-285d0ff850fa5c9af36564fcb14dd8547a1ad20492ec37c3c0be5639e5d49952.json diff --git a/prover/prover_dal/.sqlx/query-28f03acf565c4b50fe86f606c18a8b699386b3c5b4e02d5ce046f0f2e0ddc388.json b/prover/crates/lib/prover_dal/.sqlx/query-28f03acf565c4b50fe86f606c18a8b699386b3c5b4e02d5ce046f0f2e0ddc388.json similarity index 100% rename from prover/prover_dal/.sqlx/query-28f03acf565c4b50fe86f606c18a8b699386b3c5b4e02d5ce046f0f2e0ddc388.json rename to prover/crates/lib/prover_dal/.sqlx/query-28f03acf565c4b50fe86f606c18a8b699386b3c5b4e02d5ce046f0f2e0ddc388.json diff --git a/prover/prover_dal/.sqlx/query-29ff260b02f7b955f9fe0b657b87def3a97275b66ad33d214054dc9048ddf584.json b/prover/crates/lib/prover_dal/.sqlx/query-29ff260b02f7b955f9fe0b657b87def3a97275b66ad33d214054dc9048ddf584.json similarity index 100% rename from prover/prover_dal/.sqlx/query-29ff260b02f7b955f9fe0b657b87def3a97275b66ad33d214054dc9048ddf584.json rename to prover/crates/lib/prover_dal/.sqlx/query-29ff260b02f7b955f9fe0b657b87def3a97275b66ad33d214054dc9048ddf584.json diff --git a/prover/prover_dal/.sqlx/query-2ab2f83b273c5aa88c1eefc8f70a8ea23052f714cd74c1d28ae1203ce8f0eaa9.json b/prover/crates/lib/prover_dal/.sqlx/query-2ab2f83b273c5aa88c1eefc8f70a8ea23052f714cd74c1d28ae1203ce8f0eaa9.json similarity index 100% rename from prover/prover_dal/.sqlx/query-2ab2f83b273c5aa88c1eefc8f70a8ea23052f714cd74c1d28ae1203ce8f0eaa9.json rename to prover/crates/lib/prover_dal/.sqlx/query-2ab2f83b273c5aa88c1eefc8f70a8ea23052f714cd74c1d28ae1203ce8f0eaa9.json diff --git a/prover/prover_dal/.sqlx/query-2b626262c8003817ee02978f77452554ccfb5b83f00efdc12bed0f60ef439785.json b/prover/crates/lib/prover_dal/.sqlx/query-2b626262c8003817ee02978f77452554ccfb5b83f00efdc12bed0f60ef439785.json similarity index 100% rename from prover/prover_dal/.sqlx/query-2b626262c8003817ee02978f77452554ccfb5b83f00efdc12bed0f60ef439785.json rename to prover/crates/lib/prover_dal/.sqlx/query-2b626262c8003817ee02978f77452554ccfb5b83f00efdc12bed0f60ef439785.json diff --git a/prover/prover_dal/.sqlx/query-2dc6b7bf08cced8791354fc47e319d03f894f40d2ec528382b5643c3d51ec8e7.json b/prover/crates/lib/prover_dal/.sqlx/query-2dc6b7bf08cced8791354fc47e319d03f894f40d2ec528382b5643c3d51ec8e7.json similarity index 100% rename from prover/prover_dal/.sqlx/query-2dc6b7bf08cced8791354fc47e319d03f894f40d2ec528382b5643c3d51ec8e7.json rename to prover/crates/lib/prover_dal/.sqlx/query-2dc6b7bf08cced8791354fc47e319d03f894f40d2ec528382b5643c3d51ec8e7.json diff --git a/prover/prover_dal/.sqlx/query-2df88abaae97b6f916b104375bd7249ec09c0daf4368021788207370213a6d94.json b/prover/crates/lib/prover_dal/.sqlx/query-2df88abaae97b6f916b104375bd7249ec09c0daf4368021788207370213a6d94.json similarity index 100% rename from prover/prover_dal/.sqlx/query-2df88abaae97b6f916b104375bd7249ec09c0daf4368021788207370213a6d94.json rename to prover/crates/lib/prover_dal/.sqlx/query-2df88abaae97b6f916b104375bd7249ec09c0daf4368021788207370213a6d94.json diff --git a/prover/prover_dal/.sqlx/query-2e6e2b68efc28114f44616b68fcf1d4d9a4b83a8b42846d8373ea13b96d612cf.json b/prover/crates/lib/prover_dal/.sqlx/query-2e6e2b68efc28114f44616b68fcf1d4d9a4b83a8b42846d8373ea13b96d612cf.json similarity index 100% rename from prover/prover_dal/.sqlx/query-2e6e2b68efc28114f44616b68fcf1d4d9a4b83a8b42846d8373ea13b96d612cf.json rename to prover/crates/lib/prover_dal/.sqlx/query-2e6e2b68efc28114f44616b68fcf1d4d9a4b83a8b42846d8373ea13b96d612cf.json diff --git a/prover/prover_dal/.sqlx/query-3902f6a8e09cd5ad560d23fe0269fd5b3d210a117bb0027d58c6cb4debd63f33.json b/prover/crates/lib/prover_dal/.sqlx/query-3902f6a8e09cd5ad560d23fe0269fd5b3d210a117bb0027d58c6cb4debd63f33.json similarity index 100% rename from prover/prover_dal/.sqlx/query-3902f6a8e09cd5ad560d23fe0269fd5b3d210a117bb0027d58c6cb4debd63f33.json rename to prover/crates/lib/prover_dal/.sqlx/query-3902f6a8e09cd5ad560d23fe0269fd5b3d210a117bb0027d58c6cb4debd63f33.json diff --git a/prover/crates/lib/prover_dal/.sqlx/query-3941da180ee62a7c5d4e392ff4fe2d3a6ebb3657862b91e3ece34119f098fc2d.json b/prover/crates/lib/prover_dal/.sqlx/query-3941da180ee62a7c5d4e392ff4fe2d3a6ebb3657862b91e3ece34119f098fc2d.json new file mode 100644 index 000000000000..d0dd5f6976b5 --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-3941da180ee62a7c5d4e392ff4fe2d3a6ebb3657862b91e3ece34119f098fc2d.json @@ -0,0 +1,32 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE node_aggregation_witness_jobs_fri\n SET\n status = 'queued'\n WHERE\n (l1_batch_number, circuit_id, depth) IN (\n SELECT\n prover_jobs_fri.l1_batch_number,\n prover_jobs_fri.circuit_id,\n prover_jobs_fri.depth\n FROM\n prover_jobs_fri\n JOIN node_aggregation_witness_jobs_fri nawj ON prover_jobs_fri.l1_batch_number = nawj.l1_batch_number\n AND prover_jobs_fri.circuit_id = nawj.circuit_id\n AND prover_jobs_fri.depth = nawj.depth\n WHERE\n nawj.status = 'waiting_for_proofs'\n AND prover_jobs_fri.status = 'successful'\n AND prover_jobs_fri.aggregation_round = 2\n GROUP BY\n prover_jobs_fri.l1_batch_number,\n prover_jobs_fri.circuit_id,\n prover_jobs_fri.depth,\n nawj.number_of_dependent_jobs\n HAVING\n COUNT(*) = nawj.number_of_dependent_jobs\n )\n RETURNING\n l1_batch_number,\n circuit_id,\n depth;\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "l1_batch_number", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "circuit_id", + "type_info": "Int2" + }, + { + "ordinal": 2, + "name": "depth", + "type_info": "Int4" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false, + false, + false + ] + }, + "hash": "3941da180ee62a7c5d4e392ff4fe2d3a6ebb3657862b91e3ece34119f098fc2d" +} diff --git a/prover/prover_dal/.sqlx/query-3c3abbf689fa64c6da7de69fd916769dbb04d3a61cf232892236c974660ffe64.json b/prover/crates/lib/prover_dal/.sqlx/query-3c3abbf689fa64c6da7de69fd916769dbb04d3a61cf232892236c974660ffe64.json similarity index 100% rename from prover/prover_dal/.sqlx/query-3c3abbf689fa64c6da7de69fd916769dbb04d3a61cf232892236c974660ffe64.json rename to prover/crates/lib/prover_dal/.sqlx/query-3c3abbf689fa64c6da7de69fd916769dbb04d3a61cf232892236c974660ffe64.json diff --git a/prover/prover_dal/.sqlx/query-3e0a1ebc684810c09ff83784bdd0ad195b0dd2a8ce56b1a9eb531103130b5e3e.json b/prover/crates/lib/prover_dal/.sqlx/query-3e0a1ebc684810c09ff83784bdd0ad195b0dd2a8ce56b1a9eb531103130b5e3e.json similarity index 100% rename from prover/prover_dal/.sqlx/query-3e0a1ebc684810c09ff83784bdd0ad195b0dd2a8ce56b1a9eb531103130b5e3e.json rename to prover/crates/lib/prover_dal/.sqlx/query-3e0a1ebc684810c09ff83784bdd0ad195b0dd2a8ce56b1a9eb531103130b5e3e.json diff --git a/prover/prover_dal/.sqlx/query-3ec365c5c81f4678a905ae5bbd48b87ead36f593488437c6f67da629ca81e4fa.json b/prover/crates/lib/prover_dal/.sqlx/query-3ec365c5c81f4678a905ae5bbd48b87ead36f593488437c6f67da629ca81e4fa.json similarity index 100% rename from prover/prover_dal/.sqlx/query-3ec365c5c81f4678a905ae5bbd48b87ead36f593488437c6f67da629ca81e4fa.json rename to prover/crates/lib/prover_dal/.sqlx/query-3ec365c5c81f4678a905ae5bbd48b87ead36f593488437c6f67da629ca81e4fa.json diff --git a/prover/prover_dal/.sqlx/query-412ef600a2f6025d8c22c2df8a497ed410fa47b268a66f1fc56d469c06ae50af.json b/prover/crates/lib/prover_dal/.sqlx/query-412ef600a2f6025d8c22c2df8a497ed410fa47b268a66f1fc56d469c06ae50af.json similarity index 100% rename from prover/prover_dal/.sqlx/query-412ef600a2f6025d8c22c2df8a497ed410fa47b268a66f1fc56d469c06ae50af.json rename to prover/crates/lib/prover_dal/.sqlx/query-412ef600a2f6025d8c22c2df8a497ed410fa47b268a66f1fc56d469c06ae50af.json diff --git a/prover/prover_dal/.sqlx/query-41af30620f8a1f20b8a6c46be162601d35fd2881ac1fd070f0f1a8add4bc388d.json b/prover/crates/lib/prover_dal/.sqlx/query-41af30620f8a1f20b8a6c46be162601d35fd2881ac1fd070f0f1a8add4bc388d.json similarity index 100% rename from prover/prover_dal/.sqlx/query-41af30620f8a1f20b8a6c46be162601d35fd2881ac1fd070f0f1a8add4bc388d.json rename to prover/crates/lib/prover_dal/.sqlx/query-41af30620f8a1f20b8a6c46be162601d35fd2881ac1fd070f0f1a8add4bc388d.json diff --git a/prover/prover_dal/.sqlx/query-46c4696fff5a4b8cc5cb46b05645da82065836fe17687ffad04126a6a8b2b27c.json b/prover/crates/lib/prover_dal/.sqlx/query-46c4696fff5a4b8cc5cb46b05645da82065836fe17687ffad04126a6a8b2b27c.json similarity index 100% rename from prover/prover_dal/.sqlx/query-46c4696fff5a4b8cc5cb46b05645da82065836fe17687ffad04126a6a8b2b27c.json rename to prover/crates/lib/prover_dal/.sqlx/query-46c4696fff5a4b8cc5cb46b05645da82065836fe17687ffad04126a6a8b2b27c.json diff --git a/prover/prover_dal/.sqlx/query-534822a226068cde83ad8c30b569a8f447824a5ab466bb6eea1710e8aeaa2c56.json b/prover/crates/lib/prover_dal/.sqlx/query-534822a226068cde83ad8c30b569a8f447824a5ab466bb6eea1710e8aeaa2c56.json similarity index 100% rename from prover/prover_dal/.sqlx/query-534822a226068cde83ad8c30b569a8f447824a5ab466bb6eea1710e8aeaa2c56.json rename to prover/crates/lib/prover_dal/.sqlx/query-534822a226068cde83ad8c30b569a8f447824a5ab466bb6eea1710e8aeaa2c56.json diff --git a/prover/prover_dal/.sqlx/query-53f78fdee39b113d2f55f6f951bd94f28b7b2b60d551d552a9b0bab1f1791e39.json b/prover/crates/lib/prover_dal/.sqlx/query-53f78fdee39b113d2f55f6f951bd94f28b7b2b60d551d552a9b0bab1f1791e39.json similarity index 100% rename from prover/prover_dal/.sqlx/query-53f78fdee39b113d2f55f6f951bd94f28b7b2b60d551d552a9b0bab1f1791e39.json rename to prover/crates/lib/prover_dal/.sqlx/query-53f78fdee39b113d2f55f6f951bd94f28b7b2b60d551d552a9b0bab1f1791e39.json diff --git a/prover/prover_dal/.sqlx/query-542af2ff4259182310363ac0213592895215e22fd4cf0dfe69b83277f8d05db3.json b/prover/crates/lib/prover_dal/.sqlx/query-542af2ff4259182310363ac0213592895215e22fd4cf0dfe69b83277f8d05db3.json similarity index 100% rename from prover/prover_dal/.sqlx/query-542af2ff4259182310363ac0213592895215e22fd4cf0dfe69b83277f8d05db3.json rename to prover/crates/lib/prover_dal/.sqlx/query-542af2ff4259182310363ac0213592895215e22fd4cf0dfe69b83277f8d05db3.json diff --git a/prover/prover_dal/.sqlx/query-5db868e03dc6901a0afa06f82a37a1a04821495487a80595cc9b523dac6ac8e9.json b/prover/crates/lib/prover_dal/.sqlx/query-5db868e03dc6901a0afa06f82a37a1a04821495487a80595cc9b523dac6ac8e9.json similarity index 100% rename from prover/prover_dal/.sqlx/query-5db868e03dc6901a0afa06f82a37a1a04821495487a80595cc9b523dac6ac8e9.json rename to prover/crates/lib/prover_dal/.sqlx/query-5db868e03dc6901a0afa06f82a37a1a04821495487a80595cc9b523dac6ac8e9.json diff --git a/prover/prover_dal/.sqlx/query-5e781f84ec41edd0941fa84de837effac442434c6e734d977e6682a7484abe7f.json b/prover/crates/lib/prover_dal/.sqlx/query-5e781f84ec41edd0941fa84de837effac442434c6e734d977e6682a7484abe7f.json similarity index 100% rename from prover/prover_dal/.sqlx/query-5e781f84ec41edd0941fa84de837effac442434c6e734d977e6682a7484abe7f.json rename to prover/crates/lib/prover_dal/.sqlx/query-5e781f84ec41edd0941fa84de837effac442434c6e734d977e6682a7484abe7f.json diff --git a/prover/prover_dal/.sqlx/query-61b2b858d4636809c21838635aa52aeb5f06c26f68d131dd242f6ed68816c513.json b/prover/crates/lib/prover_dal/.sqlx/query-61b2b858d4636809c21838635aa52aeb5f06c26f68d131dd242f6ed68816c513.json similarity index 100% rename from prover/prover_dal/.sqlx/query-61b2b858d4636809c21838635aa52aeb5f06c26f68d131dd242f6ed68816c513.json rename to prover/crates/lib/prover_dal/.sqlx/query-61b2b858d4636809c21838635aa52aeb5f06c26f68d131dd242f6ed68816c513.json diff --git a/prover/prover_dal/.sqlx/query-67f5f3a015dc478f02f4f701c90d0fc9ac9a7f3dce2ba48c2d0e6f38b6ba455a.json b/prover/crates/lib/prover_dal/.sqlx/query-67f5f3a015dc478f02f4f701c90d0fc9ac9a7f3dce2ba48c2d0e6f38b6ba455a.json similarity index 100% rename from prover/prover_dal/.sqlx/query-67f5f3a015dc478f02f4f701c90d0fc9ac9a7f3dce2ba48c2d0e6f38b6ba455a.json rename to prover/crates/lib/prover_dal/.sqlx/query-67f5f3a015dc478f02f4f701c90d0fc9ac9a7f3dce2ba48c2d0e6f38b6ba455a.json diff --git a/prover/prover_dal/.sqlx/query-6cfc59d2fc039c706f30ae91b7d9d0c658093dede5eb61489205aa751ad5b8ec.json b/prover/crates/lib/prover_dal/.sqlx/query-6cfc59d2fc039c706f30ae91b7d9d0c658093dede5eb61489205aa751ad5b8ec.json similarity index 100% rename from prover/prover_dal/.sqlx/query-6cfc59d2fc039c706f30ae91b7d9d0c658093dede5eb61489205aa751ad5b8ec.json rename to prover/crates/lib/prover_dal/.sqlx/query-6cfc59d2fc039c706f30ae91b7d9d0c658093dede5eb61489205aa751ad5b8ec.json diff --git a/prover/prover_dal/.sqlx/query-6f20d468efe916f8e92cbf259b37ac83cd32a628d3e01e5cd1949c519683a352.json b/prover/crates/lib/prover_dal/.sqlx/query-6f20d468efe916f8e92cbf259b37ac83cd32a628d3e01e5cd1949c519683a352.json similarity index 100% rename from prover/prover_dal/.sqlx/query-6f20d468efe916f8e92cbf259b37ac83cd32a628d3e01e5cd1949c519683a352.json rename to prover/crates/lib/prover_dal/.sqlx/query-6f20d468efe916f8e92cbf259b37ac83cd32a628d3e01e5cd1949c519683a352.json diff --git a/prover/prover_dal/.sqlx/query-764693ceeb45f8478a20242b592d419667f11d80036cda021ecbf23b0b5f7f42.json b/prover/crates/lib/prover_dal/.sqlx/query-764693ceeb45f8478a20242b592d419667f11d80036cda021ecbf23b0b5f7f42.json similarity index 100% rename from prover/prover_dal/.sqlx/query-764693ceeb45f8478a20242b592d419667f11d80036cda021ecbf23b0b5f7f42.json rename to prover/crates/lib/prover_dal/.sqlx/query-764693ceeb45f8478a20242b592d419667f11d80036cda021ecbf23b0b5f7f42.json diff --git a/prover/prover_dal/.sqlx/query-7a2145e2234a7896031bbc1ce82715e903f3b399886c2c73e838bd924fed6776.json b/prover/crates/lib/prover_dal/.sqlx/query-7a2145e2234a7896031bbc1ce82715e903f3b399886c2c73e838bd924fed6776.json similarity index 100% rename from prover/prover_dal/.sqlx/query-7a2145e2234a7896031bbc1ce82715e903f3b399886c2c73e838bd924fed6776.json rename to prover/crates/lib/prover_dal/.sqlx/query-7a2145e2234a7896031bbc1ce82715e903f3b399886c2c73e838bd924fed6776.json diff --git a/prover/prover_dal/.sqlx/query-7effbacbdcc4bd762386351755f4f32042dfead8a37401558f5fd3b03480f2dd.json b/prover/crates/lib/prover_dal/.sqlx/query-7effbacbdcc4bd762386351755f4f32042dfead8a37401558f5fd3b03480f2dd.json similarity index 100% rename from prover/prover_dal/.sqlx/query-7effbacbdcc4bd762386351755f4f32042dfead8a37401558f5fd3b03480f2dd.json rename to prover/crates/lib/prover_dal/.sqlx/query-7effbacbdcc4bd762386351755f4f32042dfead8a37401558f5fd3b03480f2dd.json diff --git a/prover/prover_dal/.sqlx/query-806b82a9effd885ba537a2a1c7d7227120a8279db1875d26ccae5ee0785f46a9.json b/prover/crates/lib/prover_dal/.sqlx/query-806b82a9effd885ba537a2a1c7d7227120a8279db1875d26ccae5ee0785f46a9.json similarity index 100% rename from prover/prover_dal/.sqlx/query-806b82a9effd885ba537a2a1c7d7227120a8279db1875d26ccae5ee0785f46a9.json rename to prover/crates/lib/prover_dal/.sqlx/query-806b82a9effd885ba537a2a1c7d7227120a8279db1875d26ccae5ee0785f46a9.json diff --git a/prover/prover_dal/.sqlx/query-85a69b433c08847876bf6e7af9bc39ae8a6e053a0e03afd3fb5e02ee17157067.json b/prover/crates/lib/prover_dal/.sqlx/query-85a69b433c08847876bf6e7af9bc39ae8a6e053a0e03afd3fb5e02ee17157067.json similarity index 100% rename from prover/prover_dal/.sqlx/query-85a69b433c08847876bf6e7af9bc39ae8a6e053a0e03afd3fb5e02ee17157067.json rename to prover/crates/lib/prover_dal/.sqlx/query-85a69b433c08847876bf6e7af9bc39ae8a6e053a0e03afd3fb5e02ee17157067.json diff --git a/prover/prover_dal/.sqlx/query-860846c9bcad1edd1a2906542c178815e29440592b2bb00adacf02730b526458.json b/prover/crates/lib/prover_dal/.sqlx/query-860846c9bcad1edd1a2906542c178815e29440592b2bb00adacf02730b526458.json similarity index 100% rename from prover/prover_dal/.sqlx/query-860846c9bcad1edd1a2906542c178815e29440592b2bb00adacf02730b526458.json rename to prover/crates/lib/prover_dal/.sqlx/query-860846c9bcad1edd1a2906542c178815e29440592b2bb00adacf02730b526458.json diff --git a/prover/prover_dal/.sqlx/query-8719c090a9ad2488d556e495238cdce6412e2725cf5162ce7a733f6dceaecb11.json b/prover/crates/lib/prover_dal/.sqlx/query-8719c090a9ad2488d556e495238cdce6412e2725cf5162ce7a733f6dceaecb11.json similarity index 100% rename from prover/prover_dal/.sqlx/query-8719c090a9ad2488d556e495238cdce6412e2725cf5162ce7a733f6dceaecb11.json rename to prover/crates/lib/prover_dal/.sqlx/query-8719c090a9ad2488d556e495238cdce6412e2725cf5162ce7a733f6dceaecb11.json diff --git a/prover/prover_dal/.sqlx/query-8720d411e0c9640afd61e927a89c0b6c018e6a4d279acd24a4ea7d81b5cc5123.json b/prover/crates/lib/prover_dal/.sqlx/query-8720d411e0c9640afd61e927a89c0b6c018e6a4d279acd24a4ea7d81b5cc5123.json similarity index 100% rename from prover/prover_dal/.sqlx/query-8720d411e0c9640afd61e927a89c0b6c018e6a4d279acd24a4ea7d81b5cc5123.json rename to prover/crates/lib/prover_dal/.sqlx/query-8720d411e0c9640afd61e927a89c0b6c018e6a4d279acd24a4ea7d81b5cc5123.json diff --git a/prover/prover_dal/.sqlx/query-87a73aa95a85efeb065428f9e56e085ea80cf93c2fd66fd3949aab428bbdc560.json b/prover/crates/lib/prover_dal/.sqlx/query-87a73aa95a85efeb065428f9e56e085ea80cf93c2fd66fd3949aab428bbdc560.json similarity index 100% rename from prover/prover_dal/.sqlx/query-87a73aa95a85efeb065428f9e56e085ea80cf93c2fd66fd3949aab428bbdc560.json rename to prover/crates/lib/prover_dal/.sqlx/query-87a73aa95a85efeb065428f9e56e085ea80cf93c2fd66fd3949aab428bbdc560.json diff --git a/prover/prover_dal/.sqlx/query-8bcad2be3dd29e36ea731417b68023678f31a1b7f5ee33b643dd551c40e88329.json b/prover/crates/lib/prover_dal/.sqlx/query-8bcad2be3dd29e36ea731417b68023678f31a1b7f5ee33b643dd551c40e88329.json similarity index 100% rename from prover/prover_dal/.sqlx/query-8bcad2be3dd29e36ea731417b68023678f31a1b7f5ee33b643dd551c40e88329.json rename to prover/crates/lib/prover_dal/.sqlx/query-8bcad2be3dd29e36ea731417b68023678f31a1b7f5ee33b643dd551c40e88329.json diff --git a/prover/prover_dal/.sqlx/query-8ffb62f6a17c68af701e790044989daacb88fe5aaf368c5f81a885821522b99c.json b/prover/crates/lib/prover_dal/.sqlx/query-8ffb62f6a17c68af701e790044989daacb88fe5aaf368c5f81a885821522b99c.json similarity index 100% rename from prover/prover_dal/.sqlx/query-8ffb62f6a17c68af701e790044989daacb88fe5aaf368c5f81a885821522b99c.json rename to prover/crates/lib/prover_dal/.sqlx/query-8ffb62f6a17c68af701e790044989daacb88fe5aaf368c5f81a885821522b99c.json diff --git a/prover/crates/lib/prover_dal/.sqlx/query-929419ad8dcc70e8ce986f17075cd21d4645563f93afd4428734196c2b212276.json b/prover/crates/lib/prover_dal/.sqlx/query-929419ad8dcc70e8ce986f17075cd21d4645563f93afd4428734196c2b212276.json new file mode 100644 index 000000000000..cf5fe8117b14 --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-929419ad8dcc70e8ce986f17075cd21d4645563f93afd4428734196c2b212276.json @@ -0,0 +1,17 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n witness_inputs_fri (\n l1_batch_number,\n witness_inputs_blob_url,\n protocol_version,\n status,\n created_at,\n updated_at,\n protocol_version_patch\n )\n VALUES\n ($1, $2, $3, 'queued', NOW(), NOW(), $4)\n ON CONFLICT (l1_batch_number) DO NOTHING\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Text", + "Int4", + "Int4" + ] + }, + "nullable": [] + }, + "hash": "929419ad8dcc70e8ce986f17075cd21d4645563f93afd4428734196c2b212276" +} diff --git a/prover/prover_dal/.sqlx/query-93b9706aa8eb840d574d7c156cc866e8f67a380302762c272bfb27307682d62e.json b/prover/crates/lib/prover_dal/.sqlx/query-93b9706aa8eb840d574d7c156cc866e8f67a380302762c272bfb27307682d62e.json similarity index 100% rename from prover/prover_dal/.sqlx/query-93b9706aa8eb840d574d7c156cc866e8f67a380302762c272bfb27307682d62e.json rename to prover/crates/lib/prover_dal/.sqlx/query-93b9706aa8eb840d574d7c156cc866e8f67a380302762c272bfb27307682d62e.json diff --git a/prover/prover_dal/.sqlx/query-94a75b05ecbab75d6ebf39cca029bfb838c787fc58d7536f9e9976e5e515431a.json b/prover/crates/lib/prover_dal/.sqlx/query-94a75b05ecbab75d6ebf39cca029bfb838c787fc58d7536f9e9976e5e515431a.json similarity index 100% rename from prover/prover_dal/.sqlx/query-94a75b05ecbab75d6ebf39cca029bfb838c787fc58d7536f9e9976e5e515431a.json rename to prover/crates/lib/prover_dal/.sqlx/query-94a75b05ecbab75d6ebf39cca029bfb838c787fc58d7536f9e9976e5e515431a.json diff --git a/prover/prover_dal/.sqlx/query-9b713312b539b4eefa58346f0070767a2cd4488e670674cd9149f7a332c0198d.json b/prover/crates/lib/prover_dal/.sqlx/query-9b713312b539b4eefa58346f0070767a2cd4488e670674cd9149f7a332c0198d.json similarity index 100% rename from prover/prover_dal/.sqlx/query-9b713312b539b4eefa58346f0070767a2cd4488e670674cd9149f7a332c0198d.json rename to prover/crates/lib/prover_dal/.sqlx/query-9b713312b539b4eefa58346f0070767a2cd4488e670674cd9149f7a332c0198d.json diff --git a/prover/prover_dal/.sqlx/query-a0f60a97f09b2467ca73bb6fbebb210d65149cdd4a3411a79b717aadbffb43af.json b/prover/crates/lib/prover_dal/.sqlx/query-a0f60a97f09b2467ca73bb6fbebb210d65149cdd4a3411a79b717aadbffb43af.json similarity index 100% rename from prover/prover_dal/.sqlx/query-a0f60a97f09b2467ca73bb6fbebb210d65149cdd4a3411a79b717aadbffb43af.json rename to prover/crates/lib/prover_dal/.sqlx/query-a0f60a97f09b2467ca73bb6fbebb210d65149cdd4a3411a79b717aadbffb43af.json diff --git a/prover/prover_dal/.sqlx/query-a84ee70bec8c03bd51e1c6bad44c9a64904026506914abae2946e5d353d6a604.json b/prover/crates/lib/prover_dal/.sqlx/query-a84ee70bec8c03bd51e1c6bad44c9a64904026506914abae2946e5d353d6a604.json similarity index 100% rename from prover/prover_dal/.sqlx/query-a84ee70bec8c03bd51e1c6bad44c9a64904026506914abae2946e5d353d6a604.json rename to prover/crates/lib/prover_dal/.sqlx/query-a84ee70bec8c03bd51e1c6bad44c9a64904026506914abae2946e5d353d6a604.json diff --git a/prover/prover_dal/.sqlx/query-a94fffdbc1827dc5df908ea1e99ef3ad13840d2c497760e9bd0513f68dc4271c.json b/prover/crates/lib/prover_dal/.sqlx/query-a94fffdbc1827dc5df908ea1e99ef3ad13840d2c497760e9bd0513f68dc4271c.json similarity index 100% rename from prover/prover_dal/.sqlx/query-a94fffdbc1827dc5df908ea1e99ef3ad13840d2c497760e9bd0513f68dc4271c.json rename to prover/crates/lib/prover_dal/.sqlx/query-a94fffdbc1827dc5df908ea1e99ef3ad13840d2c497760e9bd0513f68dc4271c.json diff --git a/prover/crates/lib/prover_dal/.sqlx/query-abc93d27a8673b23e18d050e84c43c868c63c853edb5c4f41e48a3cc6378eca9.json b/prover/crates/lib/prover_dal/.sqlx/query-abc93d27a8673b23e18d050e84c43c868c63c853edb5c4f41e48a3cc6378eca9.json new file mode 100644 index 000000000000..fae5c1041a5d --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-abc93d27a8673b23e18d050e84c43c868c63c853edb5c4f41e48a3cc6378eca9.json @@ -0,0 +1,32 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE node_aggregation_witness_jobs_fri\n SET\n status = 'queued'\n WHERE\n (l1_batch_number, circuit_id, depth) IN (\n SELECT\n prover_jobs_fri.l1_batch_number,\n prover_jobs_fri.circuit_id,\n prover_jobs_fri.depth\n FROM\n prover_jobs_fri\n JOIN node_aggregation_witness_jobs_fri nawj ON prover_jobs_fri.l1_batch_number = nawj.l1_batch_number\n AND prover_jobs_fri.circuit_id = nawj.circuit_id\n AND prover_jobs_fri.depth = nawj.depth\n WHERE\n nawj.status = 'waiting_for_proofs'\n AND prover_jobs_fri.status = 'successful'\n AND prover_jobs_fri.aggregation_round = 1\n AND prover_jobs_fri.depth = 0\n GROUP BY\n prover_jobs_fri.l1_batch_number,\n prover_jobs_fri.circuit_id,\n prover_jobs_fri.depth,\n nawj.number_of_dependent_jobs\n HAVING\n COUNT(*) = nawj.number_of_dependent_jobs\n )\n RETURNING\n l1_batch_number,\n circuit_id,\n depth;\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "l1_batch_number", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "circuit_id", + "type_info": "Int2" + }, + { + "ordinal": 2, + "name": "depth", + "type_info": "Int4" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false, + false, + false + ] + }, + "hash": "abc93d27a8673b23e18d050e84c43c868c63c853edb5c4f41e48a3cc6378eca9" +} diff --git a/prover/prover_dal/.sqlx/query-b25c66b9705b3f2fb8a3492f1bd20222e177262292241bd8cb89dbb9c1e74c2d.json b/prover/crates/lib/prover_dal/.sqlx/query-b25c66b9705b3f2fb8a3492f1bd20222e177262292241bd8cb89dbb9c1e74c2d.json similarity index 100% rename from prover/prover_dal/.sqlx/query-b25c66b9705b3f2fb8a3492f1bd20222e177262292241bd8cb89dbb9c1e74c2d.json rename to prover/crates/lib/prover_dal/.sqlx/query-b25c66b9705b3f2fb8a3492f1bd20222e177262292241bd8cb89dbb9c1e74c2d.json diff --git a/prover/prover_dal/.sqlx/query-b321c5ba22358cbb1fd9c627f1e7b56187686173327498ac75424593547c19c5.json b/prover/crates/lib/prover_dal/.sqlx/query-b321c5ba22358cbb1fd9c627f1e7b56187686173327498ac75424593547c19c5.json similarity index 100% rename from prover/prover_dal/.sqlx/query-b321c5ba22358cbb1fd9c627f1e7b56187686173327498ac75424593547c19c5.json rename to prover/crates/lib/prover_dal/.sqlx/query-b321c5ba22358cbb1fd9c627f1e7b56187686173327498ac75424593547c19c5.json diff --git a/prover/prover_dal/.sqlx/query-b367ecb1ebee86ec598c4079591f8c12deeca6b8843fe3869cc2b02b30da5de6.json b/prover/crates/lib/prover_dal/.sqlx/query-b367ecb1ebee86ec598c4079591f8c12deeca6b8843fe3869cc2b02b30da5de6.json similarity index 100% rename from prover/prover_dal/.sqlx/query-b367ecb1ebee86ec598c4079591f8c12deeca6b8843fe3869cc2b02b30da5de6.json rename to prover/crates/lib/prover_dal/.sqlx/query-b367ecb1ebee86ec598c4079591f8c12deeca6b8843fe3869cc2b02b30da5de6.json diff --git a/prover/prover_dal/.sqlx/query-b3d71dbe14bcd94131b29b64dcb49b6370c211a7fc24ad03a5f0e327f9d18040.json b/prover/crates/lib/prover_dal/.sqlx/query-b3d71dbe14bcd94131b29b64dcb49b6370c211a7fc24ad03a5f0e327f9d18040.json similarity index 100% rename from prover/prover_dal/.sqlx/query-b3d71dbe14bcd94131b29b64dcb49b6370c211a7fc24ad03a5f0e327f9d18040.json rename to prover/crates/lib/prover_dal/.sqlx/query-b3d71dbe14bcd94131b29b64dcb49b6370c211a7fc24ad03a5f0e327f9d18040.json diff --git a/prover/prover_dal/.sqlx/query-b4794e6a0c2366d5d95ab373c310103263af3ff5cb6c9dc5df59d3cd2a5e56b4.json b/prover/crates/lib/prover_dal/.sqlx/query-b4794e6a0c2366d5d95ab373c310103263af3ff5cb6c9dc5df59d3cd2a5e56b4.json similarity index 100% rename from prover/prover_dal/.sqlx/query-b4794e6a0c2366d5d95ab373c310103263af3ff5cb6c9dc5df59d3cd2a5e56b4.json rename to prover/crates/lib/prover_dal/.sqlx/query-b4794e6a0c2366d5d95ab373c310103263af3ff5cb6c9dc5df59d3cd2a5e56b4.json diff --git a/prover/prover_dal/.sqlx/query-b568f9cb9c2bd53b5dcde15f368a9dc31c7d51476f18cffa80cad653298ad252.json b/prover/crates/lib/prover_dal/.sqlx/query-b568f9cb9c2bd53b5dcde15f368a9dc31c7d51476f18cffa80cad653298ad252.json similarity index 100% rename from prover/prover_dal/.sqlx/query-b568f9cb9c2bd53b5dcde15f368a9dc31c7d51476f18cffa80cad653298ad252.json rename to prover/crates/lib/prover_dal/.sqlx/query-b568f9cb9c2bd53b5dcde15f368a9dc31c7d51476f18cffa80cad653298ad252.json diff --git a/prover/prover_dal/.sqlx/query-bf06bd08d8ccf67fc00bbc364715263556f258565f79cbb40f5ecc1a4f6402f5.json b/prover/crates/lib/prover_dal/.sqlx/query-bf06bd08d8ccf67fc00bbc364715263556f258565f79cbb40f5ecc1a4f6402f5.json similarity index 100% rename from prover/prover_dal/.sqlx/query-bf06bd08d8ccf67fc00bbc364715263556f258565f79cbb40f5ecc1a4f6402f5.json rename to prover/crates/lib/prover_dal/.sqlx/query-bf06bd08d8ccf67fc00bbc364715263556f258565f79cbb40f5ecc1a4f6402f5.json diff --git a/prover/prover_dal/.sqlx/query-bfb80956a18eabf266f5b5a9d62912d57f8eb2a38bdb7884fc812a2897a3a660.json b/prover/crates/lib/prover_dal/.sqlx/query-bfb80956a18eabf266f5b5a9d62912d57f8eb2a38bdb7884fc812a2897a3a660.json similarity index 100% rename from prover/prover_dal/.sqlx/query-bfb80956a18eabf266f5b5a9d62912d57f8eb2a38bdb7884fc812a2897a3a660.json rename to prover/crates/lib/prover_dal/.sqlx/query-bfb80956a18eabf266f5b5a9d62912d57f8eb2a38bdb7884fc812a2897a3a660.json diff --git a/prover/prover_dal/.sqlx/query-c156004a0e5ad5bcc33d3b894fd69718349ac4fc08b455c7f4265d7443f2ec13.json b/prover/crates/lib/prover_dal/.sqlx/query-c156004a0e5ad5bcc33d3b894fd69718349ac4fc08b455c7f4265d7443f2ec13.json similarity index 100% rename from prover/prover_dal/.sqlx/query-c156004a0e5ad5bcc33d3b894fd69718349ac4fc08b455c7f4265d7443f2ec13.json rename to prover/crates/lib/prover_dal/.sqlx/query-c156004a0e5ad5bcc33d3b894fd69718349ac4fc08b455c7f4265d7443f2ec13.json diff --git a/prover/prover_dal/.sqlx/query-c173743af526d8150b6091ea52e6997fcfbc7ad688f2eee3dfab1029344d2382.json b/prover/crates/lib/prover_dal/.sqlx/query-c173743af526d8150b6091ea52e6997fcfbc7ad688f2eee3dfab1029344d2382.json similarity index 100% rename from prover/prover_dal/.sqlx/query-c173743af526d8150b6091ea52e6997fcfbc7ad688f2eee3dfab1029344d2382.json rename to prover/crates/lib/prover_dal/.sqlx/query-c173743af526d8150b6091ea52e6997fcfbc7ad688f2eee3dfab1029344d2382.json diff --git a/prover/prover_dal/.sqlx/query-c2c140d136df5303d7b3a66ccd0d34a5baece02812f8c950fc84d37eeebd33a4.json b/prover/crates/lib/prover_dal/.sqlx/query-c2c140d136df5303d7b3a66ccd0d34a5baece02812f8c950fc84d37eeebd33a4.json similarity index 95% rename from prover/prover_dal/.sqlx/query-c2c140d136df5303d7b3a66ccd0d34a5baece02812f8c950fc84d37eeebd33a4.json rename to prover/crates/lib/prover_dal/.sqlx/query-c2c140d136df5303d7b3a66ccd0d34a5baece02812f8c950fc84d37eeebd33a4.json index 007525bceaef..25a49e191f6e 100644 --- a/prover/prover_dal/.sqlx/query-c2c140d136df5303d7b3a66ccd0d34a5baece02812f8c950fc84d37eeebd33a4.json +++ b/prover/crates/lib/prover_dal/.sqlx/query-c2c140d136df5303d7b3a66ccd0d34a5baece02812f8c950fc84d37eeebd33a4.json @@ -70,36 +70,31 @@ }, { "ordinal": 13, - "name": "is_blob_cleaned", - "type_info": "Bool" - }, - { - "ordinal": 14, "name": "depth", "type_info": "Int4" }, { - "ordinal": 15, + "ordinal": 14, "name": "is_node_final_proof", "type_info": "Bool" }, { - "ordinal": 16, + "ordinal": 15, "name": "proof_blob_url", "type_info": "Text" }, { - "ordinal": 17, + "ordinal": 16, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 18, + "ordinal": 17, "name": "picked_by", "type_info": "Text" }, { - "ordinal": 19, + "ordinal": 18, "name": "protocol_version_patch", "type_info": "Int4" } @@ -124,7 +119,6 @@ false, false, true, - true, false, false, true, diff --git a/prover/prover_dal/.sqlx/query-c340c043c938bf5f4b63d57a1654775c6f7414c7bed75d33b61de00fdbabc349.json b/prover/crates/lib/prover_dal/.sqlx/query-c340c043c938bf5f4b63d57a1654775c6f7414c7bed75d33b61de00fdbabc349.json similarity index 100% rename from prover/prover_dal/.sqlx/query-c340c043c938bf5f4b63d57a1654775c6f7414c7bed75d33b61de00fdbabc349.json rename to prover/crates/lib/prover_dal/.sqlx/query-c340c043c938bf5f4b63d57a1654775c6f7414c7bed75d33b61de00fdbabc349.json diff --git a/prover/prover_dal/.sqlx/query-c706a49ff54f6b424e24d061fe7ac429aac3c030f7e226a1264243d8cdae038d.json b/prover/crates/lib/prover_dal/.sqlx/query-c706a49ff54f6b424e24d061fe7ac429aac3c030f7e226a1264243d8cdae038d.json similarity index 100% rename from prover/prover_dal/.sqlx/query-c706a49ff54f6b424e24d061fe7ac429aac3c030f7e226a1264243d8cdae038d.json rename to prover/crates/lib/prover_dal/.sqlx/query-c706a49ff54f6b424e24d061fe7ac429aac3c030f7e226a1264243d8cdae038d.json diff --git a/prover/prover_dal/.sqlx/query-ca9d06141265b8524ee28c55569cb21a635037d89ce24dd3ad58ffaadb59594a.json b/prover/crates/lib/prover_dal/.sqlx/query-ca9d06141265b8524ee28c55569cb21a635037d89ce24dd3ad58ffaadb59594a.json similarity index 100% rename from prover/prover_dal/.sqlx/query-ca9d06141265b8524ee28c55569cb21a635037d89ce24dd3ad58ffaadb59594a.json rename to prover/crates/lib/prover_dal/.sqlx/query-ca9d06141265b8524ee28c55569cb21a635037d89ce24dd3ad58ffaadb59594a.json diff --git a/prover/prover_dal/.sqlx/query-caff620ae66d7fbe3caff7505173b6da86d1e693be03936730c340121167341f.json b/prover/crates/lib/prover_dal/.sqlx/query-caff620ae66d7fbe3caff7505173b6da86d1e693be03936730c340121167341f.json similarity index 100% rename from prover/prover_dal/.sqlx/query-caff620ae66d7fbe3caff7505173b6da86d1e693be03936730c340121167341f.json rename to prover/crates/lib/prover_dal/.sqlx/query-caff620ae66d7fbe3caff7505173b6da86d1e693be03936730c340121167341f.json diff --git a/prover/prover_dal/.sqlx/query-ce5779092feb8a3d3e2c5e395783e67f08f2ead5f55bfb6594e50346bf9cf2ef.json b/prover/crates/lib/prover_dal/.sqlx/query-ce5779092feb8a3d3e2c5e395783e67f08f2ead5f55bfb6594e50346bf9cf2ef.json similarity index 100% rename from prover/prover_dal/.sqlx/query-ce5779092feb8a3d3e2c5e395783e67f08f2ead5f55bfb6594e50346bf9cf2ef.json rename to prover/crates/lib/prover_dal/.sqlx/query-ce5779092feb8a3d3e2c5e395783e67f08f2ead5f55bfb6594e50346bf9cf2ef.json diff --git a/prover/prover_dal/.sqlx/query-d16278c6025eb3a205266fb5273f029e262be45614404159908af1624349700b.json b/prover/crates/lib/prover_dal/.sqlx/query-d16278c6025eb3a205266fb5273f029e262be45614404159908af1624349700b.json similarity index 100% rename from prover/prover_dal/.sqlx/query-d16278c6025eb3a205266fb5273f029e262be45614404159908af1624349700b.json rename to prover/crates/lib/prover_dal/.sqlx/query-d16278c6025eb3a205266fb5273f029e262be45614404159908af1624349700b.json diff --git a/prover/prover_dal/.sqlx/query-d272c91f1209c277189a31c59ee191a43dc8eafc33ee067bd41e20f25f7625f0.json b/prover/crates/lib/prover_dal/.sqlx/query-d272c91f1209c277189a31c59ee191a43dc8eafc33ee067bd41e20f25f7625f0.json similarity index 96% rename from prover/prover_dal/.sqlx/query-d272c91f1209c277189a31c59ee191a43dc8eafc33ee067bd41e20f25f7625f0.json rename to prover/crates/lib/prover_dal/.sqlx/query-d272c91f1209c277189a31c59ee191a43dc8eafc33ee067bd41e20f25f7625f0.json index a90da33a3333..2c94853eacff 100644 --- a/prover/prover_dal/.sqlx/query-d272c91f1209c277189a31c59ee191a43dc8eafc33ee067bd41e20f25f7625f0.json +++ b/prover/crates/lib/prover_dal/.sqlx/query-d272c91f1209c277189a31c59ee191a43dc8eafc33ee067bd41e20f25f7625f0.json @@ -60,26 +60,21 @@ }, { "ordinal": 11, - "name": "is_blob_cleaned", - "type_info": "Bool" - }, - { - "ordinal": 12, "name": "number_of_basic_circuits", "type_info": "Int4" }, { - "ordinal": 13, + "ordinal": 12, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 14, + "ordinal": 13, "name": "picked_by", "type_info": "Text" }, { - "ordinal": 15, + "ordinal": 14, "name": "protocol_version_patch", "type_info": "Int4" } @@ -106,7 +101,6 @@ true, true, true, - true, false ] }, diff --git a/prover/prover_dal/.sqlx/query-d4949debfe0dc5112204cd196c68b02c44b099e27e3c45c5c810cd5fcd8884ed.json b/prover/crates/lib/prover_dal/.sqlx/query-d4949debfe0dc5112204cd196c68b02c44b099e27e3c45c5c810cd5fcd8884ed.json similarity index 100% rename from prover/prover_dal/.sqlx/query-d4949debfe0dc5112204cd196c68b02c44b099e27e3c45c5c810cd5fcd8884ed.json rename to prover/crates/lib/prover_dal/.sqlx/query-d4949debfe0dc5112204cd196c68b02c44b099e27e3c45c5c810cd5fcd8884ed.json diff --git a/prover/crates/lib/prover_dal/.sqlx/query-d91c931e2a14cf1183a608d041fc6fadb8e12a9218399d189b4d95e2ca4fcc48.json b/prover/crates/lib/prover_dal/.sqlx/query-d91c931e2a14cf1183a608d041fc6fadb8e12a9218399d189b4d95e2ca4fcc48.json new file mode 100644 index 000000000000..c353ecf1bad3 --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-d91c931e2a14cf1183a608d041fc6fadb8e12a9218399d189b4d95e2ca4fcc48.json @@ -0,0 +1,25 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE witness_inputs_fri\n SET\n status = 'in_progress',\n attempts = attempts + 1,\n updated_at = NOW(),\n processing_started_at = NOW(),\n picked_by = $3\n WHERE\n l1_batch_number = (\n SELECT\n l1_batch_number\n FROM\n witness_inputs_fri\n WHERE\n l1_batch_number <= $1\n AND status = 'queued'\n AND protocol_version = $2\n AND protocol_version_patch = $4\n ORDER BY\n l1_batch_number ASC\n LIMIT\n 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING\n witness_inputs_fri.l1_batch_number\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "l1_batch_number", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Int8", + "Int4", + "Text", + "Int4" + ] + }, + "nullable": [ + false + ] + }, + "hash": "d91c931e2a14cf1183a608d041fc6fadb8e12a9218399d189b4d95e2ca4fcc48" +} diff --git a/prover/prover_dal/.sqlx/query-db3e74f0e83ffbf84a6d61e560f2060fbea775dc185f639139fbfd23e4d5f3c6.json b/prover/crates/lib/prover_dal/.sqlx/query-db3e74f0e83ffbf84a6d61e560f2060fbea775dc185f639139fbfd23e4d5f3c6.json similarity index 100% rename from prover/prover_dal/.sqlx/query-db3e74f0e83ffbf84a6d61e560f2060fbea775dc185f639139fbfd23e4d5f3c6.json rename to prover/crates/lib/prover_dal/.sqlx/query-db3e74f0e83ffbf84a6d61e560f2060fbea775dc185f639139fbfd23e4d5f3c6.json diff --git a/prover/prover_dal/.sqlx/query-df00e33809768120e395d8f740770a4e629b2a1cde641e74e4e55bb100df809f.json b/prover/crates/lib/prover_dal/.sqlx/query-df00e33809768120e395d8f740770a4e629b2a1cde641e74e4e55bb100df809f.json similarity index 100% rename from prover/prover_dal/.sqlx/query-df00e33809768120e395d8f740770a4e629b2a1cde641e74e4e55bb100df809f.json rename to prover/crates/lib/prover_dal/.sqlx/query-df00e33809768120e395d8f740770a4e629b2a1cde641e74e4e55bb100df809f.json diff --git a/prover/prover_dal/.sqlx/query-e0a6cc885e437aa7ded9def71f3e118cabc67b6e507efefb7b69e102f1b43c58.json b/prover/crates/lib/prover_dal/.sqlx/query-e0a6cc885e437aa7ded9def71f3e118cabc67b6e507efefb7b69e102f1b43c58.json similarity index 85% rename from prover/prover_dal/.sqlx/query-e0a6cc885e437aa7ded9def71f3e118cabc67b6e507efefb7b69e102f1b43c58.json rename to prover/crates/lib/prover_dal/.sqlx/query-e0a6cc885e437aa7ded9def71f3e118cabc67b6e507efefb7b69e102f1b43c58.json index 738a8b54a0b3..7786dc04a2e7 100644 --- a/prover/prover_dal/.sqlx/query-e0a6cc885e437aa7ded9def71f3e118cabc67b6e507efefb7b69e102f1b43c58.json +++ b/prover/crates/lib/prover_dal/.sqlx/query-e0a6cc885e437aa7ded9def71f3e118cabc67b6e507efefb7b69e102f1b43c58.json @@ -10,68 +10,58 @@ }, { "ordinal": 1, - "name": "merkle_tree_paths_blob_url", - "type_info": "Text" - }, - { - "ordinal": 2, "name": "attempts", "type_info": "Int2" }, { - "ordinal": 3, + "ordinal": 2, "name": "status", "type_info": "Text" }, { - "ordinal": 4, + "ordinal": 3, "name": "error", "type_info": "Text" }, { - "ordinal": 5, + "ordinal": 4, "name": "created_at", "type_info": "Timestamp" }, { - "ordinal": 6, + "ordinal": 5, "name": "updated_at", "type_info": "Timestamp" }, { - "ordinal": 7, + "ordinal": 6, "name": "processing_started_at", "type_info": "Timestamp" }, { - "ordinal": 8, + "ordinal": 7, "name": "time_taken", "type_info": "Time" }, { - "ordinal": 9, - "name": "is_blob_cleaned", - "type_info": "Bool" - }, - { - "ordinal": 10, + "ordinal": 8, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 11, + "ordinal": 9, "name": "picked_by", "type_info": "Text" }, { - "ordinal": 12, - "name": "eip_4844_blobs", - "type_info": "Bytea" - }, - { - "ordinal": 13, + "ordinal": 10, "name": "protocol_version_patch", "type_info": "Int4" + }, + { + "ordinal": 11, + "name": "witness_inputs_blob_url", + "type_info": "Text" } ], "parameters": { @@ -81,7 +71,6 @@ }, "nullable": [ false, - true, false, false, true, @@ -91,9 +80,8 @@ true, true, true, - true, - true, - false + false, + true ] }, "hash": "e0a6cc885e437aa7ded9def71f3e118cabc67b6e507efefb7b69e102f1b43c58" diff --git a/prover/prover_dal/.sqlx/query-e3194873d24e67f8d0e98bf8bf2d4f9a3b98458746972c9860fb9473947d59ff.json b/prover/crates/lib/prover_dal/.sqlx/query-e3194873d24e67f8d0e98bf8bf2d4f9a3b98458746972c9860fb9473947d59ff.json similarity index 100% rename from prover/prover_dal/.sqlx/query-e3194873d24e67f8d0e98bf8bf2d4f9a3b98458746972c9860fb9473947d59ff.json rename to prover/crates/lib/prover_dal/.sqlx/query-e3194873d24e67f8d0e98bf8bf2d4f9a3b98458746972c9860fb9473947d59ff.json diff --git a/prover/prover_dal/.sqlx/query-e32c0d85cb2841efb0b7cea6b049bae42849574731d33539bfdcca21c9b64f4e.json b/prover/crates/lib/prover_dal/.sqlx/query-e32c0d85cb2841efb0b7cea6b049bae42849574731d33539bfdcca21c9b64f4e.json similarity index 100% rename from prover/prover_dal/.sqlx/query-e32c0d85cb2841efb0b7cea6b049bae42849574731d33539bfdcca21c9b64f4e.json rename to prover/crates/lib/prover_dal/.sqlx/query-e32c0d85cb2841efb0b7cea6b049bae42849574731d33539bfdcca21c9b64f4e.json diff --git a/prover/prover_dal/.sqlx/query-e495b78add1c942d89d806e228093a4eb2ee0284aa89bca1ba958f470a2d6254.json b/prover/crates/lib/prover_dal/.sqlx/query-e495b78add1c942d89d806e228093a4eb2ee0284aa89bca1ba958f470a2d6254.json similarity index 100% rename from prover/prover_dal/.sqlx/query-e495b78add1c942d89d806e228093a4eb2ee0284aa89bca1ba958f470a2d6254.json rename to prover/crates/lib/prover_dal/.sqlx/query-e495b78add1c942d89d806e228093a4eb2ee0284aa89bca1ba958f470a2d6254.json diff --git a/prover/crates/lib/prover_dal/.sqlx/query-e743af4c18ec91eb46db5a19556fba74ec2cfc3c89c7e4e2ea475c3ce4092849.json b/prover/crates/lib/prover_dal/.sqlx/query-e743af4c18ec91eb46db5a19556fba74ec2cfc3c89c7e4e2ea475c3ce4092849.json new file mode 100644 index 000000000000..af6210ae91e4 --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-e743af4c18ec91eb46db5a19556fba74ec2cfc3c89c7e4e2ea475c3ce4092849.json @@ -0,0 +1,26 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE leaf_aggregation_witness_jobs_fri\n SET\n status = 'queued'\n WHERE\n (l1_batch_number, circuit_id) IN (\n SELECT\n prover_jobs_fri.l1_batch_number,\n prover_jobs_fri.circuit_id\n FROM\n prover_jobs_fri\n JOIN leaf_aggregation_witness_jobs_fri lawj ON prover_jobs_fri.l1_batch_number = lawj.l1_batch_number\n AND prover_jobs_fri.circuit_id = lawj.circuit_id\n WHERE\n lawj.status = 'waiting_for_proofs'\n AND prover_jobs_fri.status = 'successful'\n AND prover_jobs_fri.aggregation_round = 0\n GROUP BY\n prover_jobs_fri.l1_batch_number,\n prover_jobs_fri.circuit_id,\n lawj.number_of_basic_circuits\n HAVING\n COUNT(*) = lawj.number_of_basic_circuits\n )\n RETURNING\n l1_batch_number,\n circuit_id;\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "l1_batch_number", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "circuit_id", + "type_info": "Int2" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false, + false + ] + }, + "hash": "e743af4c18ec91eb46db5a19556fba74ec2cfc3c89c7e4e2ea475c3ce4092849" +} diff --git a/prover/prover_dal/.sqlx/query-e8066db420e075306235f728d57567878f347bdaf36294e9b24ee9c0aa1e861b.json b/prover/crates/lib/prover_dal/.sqlx/query-e8066db420e075306235f728d57567878f347bdaf36294e9b24ee9c0aa1e861b.json similarity index 100% rename from prover/prover_dal/.sqlx/query-e8066db420e075306235f728d57567878f347bdaf36294e9b24ee9c0aa1e861b.json rename to prover/crates/lib/prover_dal/.sqlx/query-e8066db420e075306235f728d57567878f347bdaf36294e9b24ee9c0aa1e861b.json diff --git a/prover/prover_dal/.sqlx/query-e9c9b69852fa68f463f17b6d63ab99cf505662036f2dd7a9f1807c4c1bad7c7b.json b/prover/crates/lib/prover_dal/.sqlx/query-e9c9b69852fa68f463f17b6d63ab99cf505662036f2dd7a9f1807c4c1bad7c7b.json similarity index 100% rename from prover/prover_dal/.sqlx/query-e9c9b69852fa68f463f17b6d63ab99cf505662036f2dd7a9f1807c4c1bad7c7b.json rename to prover/crates/lib/prover_dal/.sqlx/query-e9c9b69852fa68f463f17b6d63ab99cf505662036f2dd7a9f1807c4c1bad7c7b.json diff --git a/prover/prover_dal/.sqlx/query-ec04b89218111a5dc8d5ade506ac3465e2211ef3013386feb12d4cc04e0eade9.json b/prover/crates/lib/prover_dal/.sqlx/query-ec04b89218111a5dc8d5ade506ac3465e2211ef3013386feb12d4cc04e0eade9.json similarity index 100% rename from prover/prover_dal/.sqlx/query-ec04b89218111a5dc8d5ade506ac3465e2211ef3013386feb12d4cc04e0eade9.json rename to prover/crates/lib/prover_dal/.sqlx/query-ec04b89218111a5dc8d5ade506ac3465e2211ef3013386feb12d4cc04e0eade9.json diff --git a/prover/prover_dal/.sqlx/query-edc61e1285bf6d3837acc67af4f15aaade450980719933089824eb8c494d64a4.json b/prover/crates/lib/prover_dal/.sqlx/query-edc61e1285bf6d3837acc67af4f15aaade450980719933089824eb8c494d64a4.json similarity index 100% rename from prover/prover_dal/.sqlx/query-edc61e1285bf6d3837acc67af4f15aaade450980719933089824eb8c494d64a4.json rename to prover/crates/lib/prover_dal/.sqlx/query-edc61e1285bf6d3837acc67af4f15aaade450980719933089824eb8c494d64a4.json diff --git a/prover/prover_dal/.sqlx/query-eef1b56e87eff63fcf6ffb98791583a7526ae38ceb4bf80543cfd3fb60492fb9.json b/prover/crates/lib/prover_dal/.sqlx/query-eef1b56e87eff63fcf6ffb98791583a7526ae38ceb4bf80543cfd3fb60492fb9.json similarity index 100% rename from prover/prover_dal/.sqlx/query-eef1b56e87eff63fcf6ffb98791583a7526ae38ceb4bf80543cfd3fb60492fb9.json rename to prover/crates/lib/prover_dal/.sqlx/query-eef1b56e87eff63fcf6ffb98791583a7526ae38ceb4bf80543cfd3fb60492fb9.json diff --git a/prover/prover_dal/.sqlx/query-f99c34313e2717ec74b4f274e33dae905acac53b46eeaeb059d23e48a71df3b4.json b/prover/crates/lib/prover_dal/.sqlx/query-f99c34313e2717ec74b4f274e33dae905acac53b46eeaeb059d23e48a71df3b4.json similarity index 100% rename from prover/prover_dal/.sqlx/query-f99c34313e2717ec74b4f274e33dae905acac53b46eeaeb059d23e48a71df3b4.json rename to prover/crates/lib/prover_dal/.sqlx/query-f99c34313e2717ec74b4f274e33dae905acac53b46eeaeb059d23e48a71df3b4.json diff --git a/prover/prover_dal/.sqlx/query-fcddeb96dcd1611dedb2091c1be304e8a35fd65bf37e976b7106f57c57e70b9b.json b/prover/crates/lib/prover_dal/.sqlx/query-fcddeb96dcd1611dedb2091c1be304e8a35fd65bf37e976b7106f57c57e70b9b.json similarity index 100% rename from prover/prover_dal/.sqlx/query-fcddeb96dcd1611dedb2091c1be304e8a35fd65bf37e976b7106f57c57e70b9b.json rename to prover/crates/lib/prover_dal/.sqlx/query-fcddeb96dcd1611dedb2091c1be304e8a35fd65bf37e976b7106f57c57e70b9b.json diff --git a/prover/prover_dal/Cargo.toml b/prover/crates/lib/prover_dal/Cargo.toml similarity index 90% rename from prover/prover_dal/Cargo.toml rename to prover/crates/lib/prover_dal/Cargo.toml index bc07ce18393c..746bb69b0f3c 100644 --- a/prover/prover_dal/Cargo.toml +++ b/prover/crates/lib/prover_dal/Cargo.toml @@ -1,5 +1,6 @@ [package] -name = "prover_dal" +name = "zksync_prover_dal" +description = "ZKsync prover DAL" version.workspace = true edition.workspace = true authors.workspace = true diff --git a/prover/prover_dal/doc/FriProofCompressorDal.md b/prover/crates/lib/prover_dal/doc/FriProofCompressorDal.md similarity index 100% rename from prover/prover_dal/doc/FriProofCompressorDal.md rename to prover/crates/lib/prover_dal/doc/FriProofCompressorDal.md diff --git a/prover/prover_dal/doc/FriProverDal.md b/prover/crates/lib/prover_dal/doc/FriProverDal.md similarity index 100% rename from prover/prover_dal/doc/FriProverDal.md rename to prover/crates/lib/prover_dal/doc/FriProverDal.md diff --git a/prover/prover_dal/doc/FriWitnessGeneratorDal.md b/prover/crates/lib/prover_dal/doc/FriWitnessGeneratorDal.md similarity index 100% rename from prover/prover_dal/doc/FriWitnessGeneratorDal.md rename to prover/crates/lib/prover_dal/doc/FriWitnessGeneratorDal.md diff --git a/prover/prover_dal/migrations/20240131134938_initial-prover-migration.down.sql b/prover/crates/lib/prover_dal/migrations/20240131134938_initial-prover-migration.down.sql similarity index 100% rename from prover/prover_dal/migrations/20240131134938_initial-prover-migration.down.sql rename to prover/crates/lib/prover_dal/migrations/20240131134938_initial-prover-migration.down.sql diff --git a/prover/prover_dal/migrations/20240131134938_initial-prover-migration.up.sql b/prover/crates/lib/prover_dal/migrations/20240131134938_initial-prover-migration.up.sql similarity index 100% rename from prover/prover_dal/migrations/20240131134938_initial-prover-migration.up.sql rename to prover/crates/lib/prover_dal/migrations/20240131134938_initial-prover-migration.up.sql diff --git a/prover/prover_dal/migrations/20240226120310_add_support_for_eip4844.down.sql b/prover/crates/lib/prover_dal/migrations/20240226120310_add_support_for_eip4844.down.sql similarity index 100% rename from prover/prover_dal/migrations/20240226120310_add_support_for_eip4844.down.sql rename to prover/crates/lib/prover_dal/migrations/20240226120310_add_support_for_eip4844.down.sql diff --git a/prover/prover_dal/migrations/20240226120310_add_support_for_eip4844.up.sql b/prover/crates/lib/prover_dal/migrations/20240226120310_add_support_for_eip4844.up.sql similarity index 100% rename from prover/prover_dal/migrations/20240226120310_add_support_for_eip4844.up.sql rename to prover/crates/lib/prover_dal/migrations/20240226120310_add_support_for_eip4844.up.sql diff --git a/prover/prover_dal/migrations/20240403070124_add_archived_at_column_to_prover_queue_archive.down.sql b/prover/crates/lib/prover_dal/migrations/20240403070124_add_archived_at_column_to_prover_queue_archive.down.sql similarity index 100% rename from prover/prover_dal/migrations/20240403070124_add_archived_at_column_to_prover_queue_archive.down.sql rename to prover/crates/lib/prover_dal/migrations/20240403070124_add_archived_at_column_to_prover_queue_archive.down.sql diff --git a/prover/prover_dal/migrations/20240403070124_add_archived_at_column_to_prover_queue_archive.up.sql b/prover/crates/lib/prover_dal/migrations/20240403070124_add_archived_at_column_to_prover_queue_archive.up.sql similarity index 100% rename from prover/prover_dal/migrations/20240403070124_add_archived_at_column_to_prover_queue_archive.up.sql rename to prover/crates/lib/prover_dal/migrations/20240403070124_add_archived_at_column_to_prover_queue_archive.up.sql diff --git a/prover/prover_dal/migrations/20240410141719_add-protocol-versions-to-tables.down.sql b/prover/crates/lib/prover_dal/migrations/20240410141719_add-protocol-versions-to-tables.down.sql similarity index 100% rename from prover/prover_dal/migrations/20240410141719_add-protocol-versions-to-tables.down.sql rename to prover/crates/lib/prover_dal/migrations/20240410141719_add-protocol-versions-to-tables.down.sql diff --git a/prover/prover_dal/migrations/20240410141719_add-protocol-versions-to-tables.up.sql b/prover/crates/lib/prover_dal/migrations/20240410141719_add-protocol-versions-to-tables.up.sql similarity index 100% rename from prover/prover_dal/migrations/20240410141719_add-protocol-versions-to-tables.up.sql rename to prover/crates/lib/prover_dal/migrations/20240410141719_add-protocol-versions-to-tables.up.sql diff --git a/prover/prover_dal/migrations/20240419102606_add_changes_for_recursion_tip.down.sql b/prover/crates/lib/prover_dal/migrations/20240419102606_add_changes_for_recursion_tip.down.sql similarity index 100% rename from prover/prover_dal/migrations/20240419102606_add_changes_for_recursion_tip.down.sql rename to prover/crates/lib/prover_dal/migrations/20240419102606_add_changes_for_recursion_tip.down.sql diff --git a/prover/prover_dal/migrations/20240419102606_add_changes_for_recursion_tip.up.sql b/prover/crates/lib/prover_dal/migrations/20240419102606_add_changes_for_recursion_tip.up.sql similarity index 100% rename from prover/prover_dal/migrations/20240419102606_add_changes_for_recursion_tip.up.sql rename to prover/crates/lib/prover_dal/migrations/20240419102606_add_changes_for_recursion_tip.up.sql diff --git a/prover/prover_dal/migrations/20240524123522_add-patch-columns-for-semver.down.sql b/prover/crates/lib/prover_dal/migrations/20240524123522_add-patch-columns-for-semver.down.sql similarity index 100% rename from prover/prover_dal/migrations/20240524123522_add-patch-columns-for-semver.down.sql rename to prover/crates/lib/prover_dal/migrations/20240524123522_add-patch-columns-for-semver.down.sql diff --git a/prover/prover_dal/migrations/20240524123522_add-patch-columns-for-semver.up.sql b/prover/crates/lib/prover_dal/migrations/20240524123522_add-patch-columns-for-semver.up.sql similarity index 100% rename from prover/prover_dal/migrations/20240524123522_add-patch-columns-for-semver.up.sql rename to prover/crates/lib/prover_dal/migrations/20240524123522_add-patch-columns-for-semver.up.sql diff --git a/prover/prover_dal/migrations/20240613111518_make_number_of_final_node_jobs_mandatory.down.sql b/prover/crates/lib/prover_dal/migrations/20240613111518_make_number_of_final_node_jobs_mandatory.down.sql similarity index 100% rename from prover/prover_dal/migrations/20240613111518_make_number_of_final_node_jobs_mandatory.down.sql rename to prover/crates/lib/prover_dal/migrations/20240613111518_make_number_of_final_node_jobs_mandatory.down.sql diff --git a/prover/prover_dal/migrations/20240613111518_make_number_of_final_node_jobs_mandatory.up.sql b/prover/crates/lib/prover_dal/migrations/20240613111518_make_number_of_final_node_jobs_mandatory.up.sql similarity index 100% rename from prover/prover_dal/migrations/20240613111518_make_number_of_final_node_jobs_mandatory.up.sql rename to prover/crates/lib/prover_dal/migrations/20240613111518_make_number_of_final_node_jobs_mandatory.up.sql diff --git a/prover/crates/lib/prover_dal/migrations/20240703113903_add-vm_run_data-column.down.sql b/prover/crates/lib/prover_dal/migrations/20240703113903_add-vm_run_data-column.down.sql new file mode 100644 index 000000000000..2d62a594cc73 --- /dev/null +++ b/prover/crates/lib/prover_dal/migrations/20240703113903_add-vm_run_data-column.down.sql @@ -0,0 +1 @@ +ALTER TABLE witness_inputs_fri DROP COLUMN IF EXISTS witness_inputs_blob_url; diff --git a/prover/crates/lib/prover_dal/migrations/20240703113903_add-vm_run_data-column.up.sql b/prover/crates/lib/prover_dal/migrations/20240703113903_add-vm_run_data-column.up.sql new file mode 100644 index 000000000000..311244337ca7 --- /dev/null +++ b/prover/crates/lib/prover_dal/migrations/20240703113903_add-vm_run_data-column.up.sql @@ -0,0 +1 @@ +ALTER TABLE witness_inputs_fri ADD COLUMN IF NOT EXISTS witness_inputs_blob_url TEXT DEFAULT NULL; diff --git a/prover/crates/lib/prover_dal/migrations/20240722102219_remove_unused_columns_after_bwip.down.sql b/prover/crates/lib/prover_dal/migrations/20240722102219_remove_unused_columns_after_bwip.down.sql new file mode 100644 index 000000000000..aa57b5f643d8 --- /dev/null +++ b/prover/crates/lib/prover_dal/migrations/20240722102219_remove_unused_columns_after_bwip.down.sql @@ -0,0 +1,6 @@ +ALTER TABLE witness_inputs_fri ADD COLUMN IF NOT EXISTS merkle_tree_paths_blob_url TEXT; +ALTER TABLE witness_inputs_fri ADD COLUMN IF NOT EXISTS eip_4844_blobs TEXT; +ALTER TABLE witness_inputs_fri ADD COLUMN IF NOT EXISTS is_blob_cleaned BOOLEAN; +ALTER TABLE leaf_aggregation_witness_jobs_fri ADD COLUMN IF NOT EXISTS is_blob_cleaned BOOLEAN; +ALTER TABLE prover_jobs_fri ADD COLUMN IF NOT EXISTS is_blob_cleaned BOOLEAN; +ALTER TABLE prover_jobs_fri_archive ADD COLUMN IF NOT EXISTS is_blob_cleaned BOOLEAN; diff --git a/prover/crates/lib/prover_dal/migrations/20240722102219_remove_unused_columns_after_bwip.up.sql b/prover/crates/lib/prover_dal/migrations/20240722102219_remove_unused_columns_after_bwip.up.sql new file mode 100644 index 000000000000..62b32871167f --- /dev/null +++ b/prover/crates/lib/prover_dal/migrations/20240722102219_remove_unused_columns_after_bwip.up.sql @@ -0,0 +1,6 @@ +ALTER TABLE witness_inputs_fri DROP COLUMN IF EXISTS merkle_tree_paths_blob_url; +ALTER TABLE witness_inputs_fri DROP COLUMN IF EXISTS eip_4844_blobs; +ALTER TABLE witness_inputs_fri DROP COLUMN IF EXISTS is_blob_cleaned; +ALTER TABLE leaf_aggregation_witness_jobs_fri DROP COLUMN IF EXISTS is_blob_cleaned; +ALTER TABLE prover_jobs_fri DROP COLUMN IF EXISTS is_blob_cleaned; +ALTER TABLE prover_jobs_fri_archive DROP COLUMN IF EXISTS is_blob_cleaned; diff --git a/prover/prover_dal/src/fri_gpu_prover_queue_dal.rs b/prover/crates/lib/prover_dal/src/fri_gpu_prover_queue_dal.rs similarity index 100% rename from prover/prover_dal/src/fri_gpu_prover_queue_dal.rs rename to prover/crates/lib/prover_dal/src/fri_gpu_prover_queue_dal.rs diff --git a/prover/prover_dal/src/fri_proof_compressor_dal.rs b/prover/crates/lib/prover_dal/src/fri_proof_compressor_dal.rs similarity index 100% rename from prover/prover_dal/src/fri_proof_compressor_dal.rs rename to prover/crates/lib/prover_dal/src/fri_proof_compressor_dal.rs diff --git a/prover/prover_dal/src/fri_protocol_versions_dal.rs b/prover/crates/lib/prover_dal/src/fri_protocol_versions_dal.rs similarity index 100% rename from prover/prover_dal/src/fri_protocol_versions_dal.rs rename to prover/crates/lib/prover_dal/src/fri_protocol_versions_dal.rs diff --git a/prover/prover_dal/src/fri_prover_dal.rs b/prover/crates/lib/prover_dal/src/fri_prover_dal.rs similarity index 99% rename from prover/prover_dal/src/fri_prover_dal.rs rename to prover/crates/lib/prover_dal/src/fri_prover_dal.rs index 419cb635ac53..f6efc6afa6ad 100644 --- a/prover/prover_dal/src/fri_prover_dal.rs +++ b/prover/crates/lib/prover_dal/src/fri_prover_dal.rs @@ -669,7 +669,6 @@ impl FriProverDal<'_, '_> { created_at: row.created_at, updated_at: row.updated_at, time_taken: row.time_taken, - is_blob_cleaned: row.is_blob_cleaned, depth: row.depth as u32, is_node_final_proof: row.is_node_final_proof, proof_blob_url: row.proof_blob_url.clone(), diff --git a/prover/prover_dal/src/fri_witness_generator_dal.rs b/prover/crates/lib/prover_dal/src/fri_witness_generator_dal.rs similarity index 89% rename from prover/prover_dal/src/fri_witness_generator_dal.rs rename to prover/crates/lib/prover_dal/src/fri_witness_generator_dal.rs index 8db30e5a7f11..bc9cde72fde2 100644 --- a/prover/prover_dal/src/fri_witness_generator_dal.rs +++ b/prover/crates/lib/prover_dal/src/fri_witness_generator_dal.rs @@ -1,19 +1,22 @@ #![doc = include_str!("../doc/FriWitnessGeneratorDal.md")] + use std::{collections::HashMap, str::FromStr, time::Duration}; -use sqlx::Row; +use sqlx::{types::chrono::NaiveDateTime, Row}; use zksync_basic_types::{ - basic_fri_types::{AggregationRound, Eip4844Blobs}, + basic_fri_types::AggregationRound, protocol_version::{ProtocolSemanticVersion, ProtocolVersionId, VersionPatch}, prover_dal::{ BasicWitnessGeneratorJobInfo, JobCountStatistics, LeafAggregationJobMetadata, LeafWitnessGeneratorJobInfo, NodeAggregationJobMetadata, NodeWitnessGeneratorJobInfo, - RecursionTipWitnessGeneratorJobInfo, SchedulerWitnessGeneratorJobInfo, StuckJobs, - WitnessJobStatus, + ProofGenerationTime, RecursionTipWitnessGeneratorJobInfo, SchedulerWitnessGeneratorJobInfo, + StuckJobs, WitnessJobStatus, }, L1BatchNumber, }; -use zksync_db_connection::{connection::Connection, metrics::MethodLatency}; +use zksync_db_connection::{ + connection::Connection, metrics::MethodLatency, utils::naive_time_from_pg_interval, +}; use crate::{duration_to_naive_time, pg_interval_from_duration, Prover}; @@ -40,32 +43,28 @@ impl FriWitnessGeneratorDal<'_, '_> { pub async fn save_witness_inputs( &mut self, block_number: L1BatchNumber, - object_key: &str, + witness_inputs_blob_url: &str, protocol_version: ProtocolSemanticVersion, - eip_4844_blobs: Eip4844Blobs, ) { - let blobs_raw = eip_4844_blobs.encode(); sqlx::query!( r#" INSERT INTO witness_inputs_fri ( l1_batch_number, - merkle_tree_paths_blob_url, + witness_inputs_blob_url, protocol_version, - eip_4844_blobs, status, created_at, updated_at, protocol_version_patch ) VALUES - ($1, $2, $3, $4, 'queued', NOW(), NOW(), $5) + ($1, $2, $3, 'queued', NOW(), NOW(), $4) ON CONFLICT (l1_batch_number) DO NOTHING "#, i64::from(block_number.0), - object_key, + witness_inputs_blob_url, protocol_version.minor as i32, - blobs_raw, protocol_version.patch.0 as i32, ) .fetch_optional(self.storage.conn()) @@ -80,7 +79,7 @@ impl FriWitnessGeneratorDal<'_, '_> { last_l1_batch_to_process: u32, protocol_version: ProtocolSemanticVersion, picked_by: &str, - ) -> Option<(L1BatchNumber, Eip4844Blobs)> { + ) -> Option { sqlx::query!( r#" UPDATE witness_inputs_fri @@ -109,7 +108,7 @@ impl FriWitnessGeneratorDal<'_, '_> { SKIP LOCKED ) RETURNING - witness_inputs_fri.* + witness_inputs_fri.l1_batch_number "#, i64::from(last_l1_batch_to_process), protocol_version.minor as i32, @@ -119,21 +118,7 @@ impl FriWitnessGeneratorDal<'_, '_> { .fetch_optional(self.storage.conn()) .await .unwrap() - .map(|row| { - // Blobs can be `None` if we are using an `off-chain DA` - let blobs = if row.eip_4844_blobs.is_none() { - Eip4844Blobs::empty() - } else { - Eip4844Blobs::decode(&row.eip_4844_blobs.unwrap_or_else(|| { - panic!( - "missing eip 4844 blobs from the database for batch {}", - row.l1_batch_number - ) - })) - .expect("failed to decode EIP4844 blobs") - }; - (L1BatchNumber(row.l1_batch_number as u32), blobs) - }) + .map(|row| L1BatchNumber(row.l1_batch_number as u32)) } pub async fn get_basic_circuit_witness_job_attempts( @@ -556,34 +541,34 @@ impl FriWitnessGeneratorDal<'_, '_> { pub async fn move_leaf_aggregation_jobs_from_waiting_to_queued(&mut self) -> Vec<(i64, u8)> { sqlx::query!( - r#" - UPDATE leaf_aggregation_witness_jobs_fri - SET - status = 'queued' - WHERE - (l1_batch_number, circuit_id) IN ( - SELECT - prover_jobs_fri.l1_batch_number, - prover_jobs_fri.circuit_id - FROM - prover_jobs_fri - JOIN leaf_aggregation_witness_jobs_fri lawj ON prover_jobs_fri.l1_batch_number = lawj.l1_batch_number - AND prover_jobs_fri.circuit_id = lawj.circuit_id - WHERE - lawj.status = 'waiting_for_proofs' - AND prover_jobs_fri.status = 'successful' - AND prover_jobs_fri.aggregation_round = 0 - GROUP BY - prover_jobs_fri.l1_batch_number, - prover_jobs_fri.circuit_id, - lawj.number_of_basic_circuits - HAVING - COUNT(*) = lawj.number_of_basic_circuits - ) - RETURNING - l1_batch_number, - circuit_id; - "#, + r#" + UPDATE leaf_aggregation_witness_jobs_fri + SET + status = 'queued' + WHERE + (l1_batch_number, circuit_id) IN ( + SELECT + prover_jobs_fri.l1_batch_number, + prover_jobs_fri.circuit_id + FROM + prover_jobs_fri + JOIN leaf_aggregation_witness_jobs_fri lawj ON prover_jobs_fri.l1_batch_number = lawj.l1_batch_number + AND prover_jobs_fri.circuit_id = lawj.circuit_id + WHERE + lawj.status = 'waiting_for_proofs' + AND prover_jobs_fri.status = 'successful' + AND prover_jobs_fri.aggregation_round = 0 + GROUP BY + prover_jobs_fri.l1_batch_number, + prover_jobs_fri.circuit_id, + lawj.number_of_basic_circuits + HAVING + COUNT(*) = lawj.number_of_basic_circuits + ) + RETURNING + l1_batch_number, + circuit_id; + "#, ) .fetch_all(self.storage.conn()) .await @@ -797,39 +782,39 @@ impl FriWitnessGeneratorDal<'_, '_> { pub async fn move_depth_zero_node_aggregation_jobs(&mut self) -> Vec<(i64, u8, u16)> { sqlx::query!( - r#" - UPDATE node_aggregation_witness_jobs_fri - SET - status = 'queued' - WHERE - (l1_batch_number, circuit_id, depth) IN ( - SELECT - prover_jobs_fri.l1_batch_number, - prover_jobs_fri.circuit_id, - prover_jobs_fri.depth - FROM - prover_jobs_fri - JOIN node_aggregation_witness_jobs_fri nawj ON prover_jobs_fri.l1_batch_number = nawj.l1_batch_number - AND prover_jobs_fri.circuit_id = nawj.circuit_id - AND prover_jobs_fri.depth = nawj.depth - WHERE - nawj.status = 'waiting_for_proofs' - AND prover_jobs_fri.status = 'successful' - AND prover_jobs_fri.aggregation_round = 1 - AND prover_jobs_fri.depth = 0 - GROUP BY - prover_jobs_fri.l1_batch_number, - prover_jobs_fri.circuit_id, - prover_jobs_fri.depth, - nawj.number_of_dependent_jobs - HAVING - COUNT(*) = nawj.number_of_dependent_jobs - ) - RETURNING - l1_batch_number, - circuit_id, - depth; - "#, + r#" + UPDATE node_aggregation_witness_jobs_fri + SET + status = 'queued' + WHERE + (l1_batch_number, circuit_id, depth) IN ( + SELECT + prover_jobs_fri.l1_batch_number, + prover_jobs_fri.circuit_id, + prover_jobs_fri.depth + FROM + prover_jobs_fri + JOIN node_aggregation_witness_jobs_fri nawj ON prover_jobs_fri.l1_batch_number = nawj.l1_batch_number + AND prover_jobs_fri.circuit_id = nawj.circuit_id + AND prover_jobs_fri.depth = nawj.depth + WHERE + nawj.status = 'waiting_for_proofs' + AND prover_jobs_fri.status = 'successful' + AND prover_jobs_fri.aggregation_round = 1 + AND prover_jobs_fri.depth = 0 + GROUP BY + prover_jobs_fri.l1_batch_number, + prover_jobs_fri.circuit_id, + prover_jobs_fri.depth, + nawj.number_of_dependent_jobs + HAVING + COUNT(*) = nawj.number_of_dependent_jobs + ) + RETURNING + l1_batch_number, + circuit_id, + depth; + "#, ) .fetch_all(self.storage.conn()) .await @@ -841,38 +826,38 @@ impl FriWitnessGeneratorDal<'_, '_> { pub async fn move_depth_non_zero_node_aggregation_jobs(&mut self) -> Vec<(i64, u8, u16)> { sqlx::query!( - r#" - UPDATE node_aggregation_witness_jobs_fri - SET - status = 'queued' - WHERE - (l1_batch_number, circuit_id, depth) IN ( - SELECT - prover_jobs_fri.l1_batch_number, - prover_jobs_fri.circuit_id, - prover_jobs_fri.depth - FROM - prover_jobs_fri - JOIN node_aggregation_witness_jobs_fri nawj ON prover_jobs_fri.l1_batch_number = nawj.l1_batch_number - AND prover_jobs_fri.circuit_id = nawj.circuit_id - AND prover_jobs_fri.depth = nawj.depth - WHERE - nawj.status = 'waiting_for_proofs' - AND prover_jobs_fri.status = 'successful' - AND prover_jobs_fri.aggregation_round = 2 - GROUP BY - prover_jobs_fri.l1_batch_number, - prover_jobs_fri.circuit_id, - prover_jobs_fri.depth, - nawj.number_of_dependent_jobs - HAVING - COUNT(*) = nawj.number_of_dependent_jobs - ) - RETURNING - l1_batch_number, - circuit_id, - depth; - "#, + r#" + UPDATE node_aggregation_witness_jobs_fri + SET + status = 'queued' + WHERE + (l1_batch_number, circuit_id, depth) IN ( + SELECT + prover_jobs_fri.l1_batch_number, + prover_jobs_fri.circuit_id, + prover_jobs_fri.depth + FROM + prover_jobs_fri + JOIN node_aggregation_witness_jobs_fri nawj ON prover_jobs_fri.l1_batch_number = nawj.l1_batch_number + AND prover_jobs_fri.circuit_id = nawj.circuit_id + AND prover_jobs_fri.depth = nawj.depth + WHERE + nawj.status = 'waiting_for_proofs' + AND prover_jobs_fri.status = 'successful' + AND prover_jobs_fri.aggregation_round = 2 + GROUP BY + prover_jobs_fri.l1_batch_number, + prover_jobs_fri.circuit_id, + prover_jobs_fri.depth, + nawj.number_of_dependent_jobs + HAVING + COUNT(*) = nawj.number_of_dependent_jobs + ) + RETURNING + l1_batch_number, + circuit_id, + depth; + "#, ) .fetch_all(self.storage.conn()) .await @@ -910,13 +895,13 @@ impl FriWitnessGeneratorDal<'_, '_> { l1_batch_number; "#, AggregationRound::NodeAggregation as i64, - ) - .fetch_all(self.storage.conn()) - .await - .unwrap() - .into_iter() - .map(|row| (row.l1_batch_number as u64)) - .collect() + ) + .fetch_all(self.storage.conn()) + .await + .unwrap() + .into_iter() + .map(|row| (row.l1_batch_number as u64)) + .collect() } pub async fn move_scheduler_jobs_from_waiting_to_queued(&mut self) -> Vec { @@ -1472,7 +1457,7 @@ impl FriWitnessGeneratorDal<'_, '_> { .unwrap() .map(|row| BasicWitnessGeneratorJobInfo { l1_batch_number, - merkle_tree_paths_blob_url: row.merkle_tree_paths_blob_url, + witness_inputs_blob_url: row.witness_inputs_blob_url, attempts: row.attempts as u32, status: row.status.parse::().unwrap(), error: row.error, @@ -1480,15 +1465,8 @@ impl FriWitnessGeneratorDal<'_, '_> { updated_at: row.updated_at, processing_started_at: row.processing_started_at, time_taken: row.time_taken, - is_blob_cleaned: row.is_blob_cleaned, protocol_version: row.protocol_version, picked_by: row.picked_by, - eip_4844_blobs: row - .eip_4844_blobs - .as_deref() - .map(Eip4844Blobs::decode) - .transpose() - .unwrap(), }) } @@ -1523,7 +1501,6 @@ impl FriWitnessGeneratorDal<'_, '_> { updated_at: row.updated_at, processing_started_at: row.processing_started_at, time_taken: row.time_taken, - is_blob_cleaned: row.is_blob_cleaned, protocol_version: row.protocol_version, picked_by: row.picked_by.clone(), number_of_basic_circuits: row.number_of_basic_circuits, @@ -1903,4 +1880,38 @@ impl FriWitnessGeneratorDal<'_, '_> { AggregationRound::LeafAggregation | AggregationRound::NodeAggregation => "id", } } + + pub async fn get_proof_generation_times_for_time_frame( + &mut self, + time_frame: NaiveDateTime, + ) -> sqlx::Result> { + let proof_generation_times = sqlx::query!( + r#" + SELECT + comp.l1_batch_number, + (comp.updated_at - wit.created_at) AS time_taken, + wit.created_at + FROM + proof_compression_jobs_fri AS comp + JOIN witness_inputs_fri AS wit ON comp.l1_batch_number = wit.l1_batch_number + WHERE + wit.created_at > $1 + ORDER BY + time_taken DESC; + "#, + time_frame.into(), + ) + .fetch_all(self.storage.conn()) + .await? + .into_iter() + .map(|row| ProofGenerationTime { + l1_batch_number: L1BatchNumber(row.l1_batch_number as u32), + time_taken: naive_time_from_pg_interval( + row.time_taken.expect("time_taken must be present"), + ), + created_at: row.created_at, + }) + .collect(); + Ok(proof_generation_times) + } } diff --git a/prover/prover_dal/src/lib.rs b/prover/crates/lib/prover_dal/src/lib.rs similarity index 100% rename from prover/prover_dal/src/lib.rs rename to prover/crates/lib/prover_dal/src/lib.rs diff --git a/prover/prover_fri_types/Cargo.toml b/prover/crates/lib/prover_fri_types/Cargo.toml similarity index 100% rename from prover/prover_fri_types/Cargo.toml rename to prover/crates/lib/prover_fri_types/Cargo.toml diff --git a/prover/prover_fri_types/README.md b/prover/crates/lib/prover_fri_types/README.md similarity index 100% rename from prover/prover_fri_types/README.md rename to prover/crates/lib/prover_fri_types/README.md diff --git a/prover/prover_fri_types/src/keys.rs b/prover/crates/lib/prover_fri_types/src/keys.rs similarity index 100% rename from prover/prover_fri_types/src/keys.rs rename to prover/crates/lib/prover_fri_types/src/keys.rs diff --git a/prover/prover_fri_types/src/lib.rs b/prover/crates/lib/prover_fri_types/src/lib.rs similarity index 98% rename from prover/prover_fri_types/src/lib.rs rename to prover/crates/lib/prover_fri_types/src/lib.rs index 0c6557c27ffc..423be1f88fa2 100644 --- a/prover/prover_fri_types/src/lib.rs +++ b/prover/crates/lib/prover_fri_types/src/lib.rs @@ -25,11 +25,9 @@ use crate::keys::FriCircuitKey; pub mod keys; pub mod queue; -pub const EIP_4844_CIRCUIT_ID: u8 = 255; - // THESE VALUES SHOULD BE UPDATED ON ANY PROTOCOL UPGRADE OF PROVERS pub const PROVER_PROTOCOL_VERSION: ProtocolVersionId = ProtocolVersionId::Version24; -pub const PROVER_PROTOCOL_PATCH: VersionPatch = VersionPatch(1); +pub const PROVER_PROTOCOL_PATCH: VersionPatch = VersionPatch(2); pub const PROVER_PROTOCOL_SEMANTIC_VERSION: ProtocolSemanticVersion = ProtocolSemanticVersion { minor: PROVER_PROTOCOL_VERSION, patch: PROVER_PROTOCOL_PATCH, diff --git a/prover/prover_fri_types/src/queue.rs b/prover/crates/lib/prover_fri_types/src/queue.rs similarity index 100% rename from prover/prover_fri_types/src/queue.rs rename to prover/crates/lib/prover_fri_types/src/queue.rs diff --git a/prover/prover_fri_utils/Cargo.toml b/prover/crates/lib/prover_fri_utils/Cargo.toml similarity index 95% rename from prover/prover_fri_utils/Cargo.toml rename to prover/crates/lib/prover_fri_utils/Cargo.toml index c1834c596619..06b3af54cd3b 100644 --- a/prover/prover_fri_utils/Cargo.toml +++ b/prover/crates/lib/prover_fri_utils/Cargo.toml @@ -17,7 +17,7 @@ zksync_object_store.workspace = true zksync_config.workspace = true zksync_types.workspace = true zksync_prover_fri_types.workspace = true -prover_dal.workspace = true +zksync_prover_dal.workspace = true zksync_utils.workspace = true tracing.workspace = true diff --git a/prover/prover_fri_utils/src/lib.rs b/prover/crates/lib/prover_fri_utils/src/lib.rs similarity index 99% rename from prover/prover_fri_utils/src/lib.rs rename to prover/crates/lib/prover_fri_utils/src/lib.rs index 1a1bfe8bb42d..0873d5056285 100644 --- a/prover/prover_fri_utils/src/lib.rs +++ b/prover/crates/lib/prover_fri_utils/src/lib.rs @@ -1,7 +1,7 @@ use std::time::Instant; -use prover_dal::{Connection, Prover, ProverDal}; use zksync_object_store::ObjectStore; +use zksync_prover_dal::{Connection, Prover, ProverDal}; use zksync_prover_fri_types::{ circuit_definitions::{ circuit_definitions::recursion_layer::{ diff --git a/prover/prover_fri_utils/src/metrics.rs b/prover/crates/lib/prover_fri_utils/src/metrics.rs similarity index 100% rename from prover/prover_fri_utils/src/metrics.rs rename to prover/crates/lib/prover_fri_utils/src/metrics.rs diff --git a/prover/crates/lib/prover_fri_utils/src/region_fetcher.rs b/prover/crates/lib/prover_fri_utils/src/region_fetcher.rs new file mode 100644 index 000000000000..c73e83d531b4 --- /dev/null +++ b/prover/crates/lib/prover_fri_utils/src/region_fetcher.rs @@ -0,0 +1,98 @@ +use core::fmt; + +use anyhow::Context; +use regex::Regex; +use reqwest::{ + header::{HeaderMap, HeaderValue}, + Method, +}; +use zksync_config::configs::fri_prover::CloudType; +use zksync_utils::http_with_retries::send_request_with_retries; + +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct RegionFetcher { + cloud_type: CloudType, + zone_url: String, +} + +impl RegionFetcher { + pub fn new(cloud_type: CloudType, zone_url: String) -> Self { + Self { + cloud_type, + zone_url, + } + } + + pub async fn get_zone(&self) -> anyhow::Result { + match self.cloud_type { + CloudType::GCP => GcpZoneFetcher::get_zone(&self.zone_url).await, + CloudType::Local => Ok(Zone("local".to_string())), + } + } +} + +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct Zone(String); + +impl fmt::Display for Zone { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.0) + } +} + +impl Zone { + pub fn new(zone: T) -> Self { + Self(zone.to_string()) + } +} + +#[derive(Debug, Clone, Copy)] +struct GcpZoneFetcher; + +impl GcpZoneFetcher { + pub async fn get_zone(zone_url: &str) -> anyhow::Result { + let data = Self::fetch_from_url(zone_url) + .await + .context("fetch_from_url()")?; + Self::parse_zone(&data).context("parse_zone") + } + + async fn fetch_from_url(url: &str) -> anyhow::Result { + let mut headers = HeaderMap::new(); + headers.insert("Metadata-Flavor", HeaderValue::from_static("Google")); + let response = send_request_with_retries(url, 5, Method::GET, Some(headers), None).await; + response + .map_err(|err| anyhow::anyhow!("Failed fetching response from url: {url}: {err:?}"))? + .text() + .await + .context("Failed to read response as text") + } + + fn parse_zone(data: &str) -> anyhow::Result { + // Statically provided Regex should always compile. + let re = Regex::new(r"^projects/\d+/zones/(\w+-\w+-\w+)$").unwrap(); + if let Some(caps) = re.captures(data) { + let zone = &caps[1]; + return Ok(Zone(zone.to_string())); + } + anyhow::bail!("failed to extract zone from: {data}"); + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_parse_zone() { + let data = "projects/295056426491/zones/us-central1-a"; + let zone = GcpZoneFetcher::parse_zone(data).unwrap(); + assert_eq!(zone, Zone::new("us-central1-a")); + } + + #[test] + fn test_parse_zone_panic() { + let data = "invalid data"; + assert!(GcpZoneFetcher::parse_zone(data).is_err()); + } +} diff --git a/prover/prover_fri_utils/src/socket_utils.rs b/prover/crates/lib/prover_fri_utils/src/socket_utils.rs similarity index 100% rename from prover/prover_fri_utils/src/socket_utils.rs rename to prover/crates/lib/prover_fri_utils/src/socket_utils.rs diff --git a/prover/prover_dal/.sqlx/query-33d6be45b246523ad76f9ae512322ff6372f63ecadb504a329499b02e7d3550e.json b/prover/prover_dal/.sqlx/query-33d6be45b246523ad76f9ae512322ff6372f63ecadb504a329499b02e7d3550e.json deleted file mode 100644 index 76483cd73d31..000000000000 --- a/prover/prover_dal/.sqlx/query-33d6be45b246523ad76f9ae512322ff6372f63ecadb504a329499b02e7d3550e.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE leaf_aggregation_witness_jobs_fri\n SET\n status = 'queued'\n WHERE\n (l1_batch_number, circuit_id) IN (\n SELECT\n prover_jobs_fri.l1_batch_number,\n prover_jobs_fri.circuit_id\n FROM\n prover_jobs_fri\n JOIN leaf_aggregation_witness_jobs_fri lawj ON prover_jobs_fri.l1_batch_number = lawj.l1_batch_number\n AND prover_jobs_fri.circuit_id = lawj.circuit_id\n WHERE\n lawj.status = 'waiting_for_proofs'\n AND prover_jobs_fri.status = 'successful'\n AND prover_jobs_fri.aggregation_round = 0\n GROUP BY\n prover_jobs_fri.l1_batch_number,\n prover_jobs_fri.circuit_id,\n lawj.number_of_basic_circuits\n HAVING\n COUNT(*) = lawj.number_of_basic_circuits\n )\n RETURNING\n l1_batch_number,\n circuit_id;\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "l1_batch_number", - "type_info": "Int8" - }, - { - "ordinal": 1, - "name": "circuit_id", - "type_info": "Int2" - } - ], - "parameters": { - "Left": [] - }, - "nullable": [ - false, - false - ] - }, - "hash": "33d6be45b246523ad76f9ae512322ff6372f63ecadb504a329499b02e7d3550e" -} diff --git a/prover/prover_dal/.sqlx/query-5354ed86960505fe6b159ce859656f870f8bbd15666fec5cc9f398306eeb6136.json b/prover/prover_dal/.sqlx/query-5354ed86960505fe6b159ce859656f870f8bbd15666fec5cc9f398306eeb6136.json deleted file mode 100644 index 298f7bb30aa3..000000000000 --- a/prover/prover_dal/.sqlx/query-5354ed86960505fe6b159ce859656f870f8bbd15666fec5cc9f398306eeb6136.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n witness_inputs_fri (\n l1_batch_number,\n merkle_tree_paths_blob_url,\n protocol_version,\n eip_4844_blobs,\n status,\n created_at,\n updated_at,\n protocol_version_patch\n )\n VALUES\n ($1, $2, $3, $4, 'queued', NOW(), NOW(), $5)\n ON CONFLICT (l1_batch_number) DO NOTHING\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8", - "Text", - "Int4", - "Bytea", - "Int4" - ] - }, - "nullable": [] - }, - "hash": "5354ed86960505fe6b159ce859656f870f8bbd15666fec5cc9f398306eeb6136" -} diff --git a/prover/prover_dal/.sqlx/query-8182690d0326b820d23fba49d391578db18c29cdca85b8b6aad86fe2a9bf6bbe.json b/prover/prover_dal/.sqlx/query-8182690d0326b820d23fba49d391578db18c29cdca85b8b6aad86fe2a9bf6bbe.json deleted file mode 100644 index fac64c1ea3f9..000000000000 --- a/prover/prover_dal/.sqlx/query-8182690d0326b820d23fba49d391578db18c29cdca85b8b6aad86fe2a9bf6bbe.json +++ /dev/null @@ -1,32 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE node_aggregation_witness_jobs_fri\n SET\n status = 'queued'\n WHERE\n (l1_batch_number, circuit_id, depth) IN (\n SELECT\n prover_jobs_fri.l1_batch_number,\n prover_jobs_fri.circuit_id,\n prover_jobs_fri.depth\n FROM\n prover_jobs_fri\n JOIN node_aggregation_witness_jobs_fri nawj ON prover_jobs_fri.l1_batch_number = nawj.l1_batch_number\n AND prover_jobs_fri.circuit_id = nawj.circuit_id\n AND prover_jobs_fri.depth = nawj.depth\n WHERE\n nawj.status = 'waiting_for_proofs'\n AND prover_jobs_fri.status = 'successful'\n AND prover_jobs_fri.aggregation_round = 2\n GROUP BY\n prover_jobs_fri.l1_batch_number,\n prover_jobs_fri.circuit_id,\n prover_jobs_fri.depth,\n nawj.number_of_dependent_jobs\n HAVING\n COUNT(*) = nawj.number_of_dependent_jobs\n )\n RETURNING\n l1_batch_number,\n circuit_id,\n depth;\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "l1_batch_number", - "type_info": "Int8" - }, - { - "ordinal": 1, - "name": "circuit_id", - "type_info": "Int2" - }, - { - "ordinal": 2, - "name": "depth", - "type_info": "Int4" - } - ], - "parameters": { - "Left": [] - }, - "nullable": [ - false, - false, - false - ] - }, - "hash": "8182690d0326b820d23fba49d391578db18c29cdca85b8b6aad86fe2a9bf6bbe" -} diff --git a/prover/prover_dal/.sqlx/query-aa91697157517322b0dbb53dca99f41220c51f58a03c61d6b7789eab0504e320.json b/prover/prover_dal/.sqlx/query-aa91697157517322b0dbb53dca99f41220c51f58a03c61d6b7789eab0504e320.json deleted file mode 100644 index 27d482317286..000000000000 --- a/prover/prover_dal/.sqlx/query-aa91697157517322b0dbb53dca99f41220c51f58a03c61d6b7789eab0504e320.json +++ /dev/null @@ -1,32 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE node_aggregation_witness_jobs_fri\n SET\n status = 'queued'\n WHERE\n (l1_batch_number, circuit_id, depth) IN (\n SELECT\n prover_jobs_fri.l1_batch_number,\n prover_jobs_fri.circuit_id,\n prover_jobs_fri.depth\n FROM\n prover_jobs_fri\n JOIN node_aggregation_witness_jobs_fri nawj ON prover_jobs_fri.l1_batch_number = nawj.l1_batch_number\n AND prover_jobs_fri.circuit_id = nawj.circuit_id\n AND prover_jobs_fri.depth = nawj.depth\n WHERE\n nawj.status = 'waiting_for_proofs'\n AND prover_jobs_fri.status = 'successful'\n AND prover_jobs_fri.aggregation_round = 1\n AND prover_jobs_fri.depth = 0\n GROUP BY\n prover_jobs_fri.l1_batch_number,\n prover_jobs_fri.circuit_id,\n prover_jobs_fri.depth,\n nawj.number_of_dependent_jobs\n HAVING\n COUNT(*) = nawj.number_of_dependent_jobs\n )\n RETURNING\n l1_batch_number,\n circuit_id,\n depth;\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "l1_batch_number", - "type_info": "Int8" - }, - { - "ordinal": 1, - "name": "circuit_id", - "type_info": "Int2" - }, - { - "ordinal": 2, - "name": "depth", - "type_info": "Int4" - } - ], - "parameters": { - "Left": [] - }, - "nullable": [ - false, - false, - false - ] - }, - "hash": "aa91697157517322b0dbb53dca99f41220c51f58a03c61d6b7789eab0504e320" -} diff --git a/prover/prover_dal/.sqlx/query-e8412d5ad1b17269da02f9a5c201ed762158a27449f61d3b1bb80069ca446727.json b/prover/prover_dal/.sqlx/query-e8412d5ad1b17269da02f9a5c201ed762158a27449f61d3b1bb80069ca446727.json deleted file mode 100644 index 4ab8c324ff58..000000000000 --- a/prover/prover_dal/.sqlx/query-e8412d5ad1b17269da02f9a5c201ed762158a27449f61d3b1bb80069ca446727.json +++ /dev/null @@ -1,103 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE witness_inputs_fri\n SET\n status = 'in_progress',\n attempts = attempts + 1,\n updated_at = NOW(),\n processing_started_at = NOW(),\n picked_by = $3\n WHERE\n l1_batch_number = (\n SELECT\n l1_batch_number\n FROM\n witness_inputs_fri\n WHERE\n l1_batch_number <= $1\n AND status = 'queued'\n AND protocol_version = $2\n AND protocol_version_patch = $4\n ORDER BY\n l1_batch_number ASC\n LIMIT\n 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING\n witness_inputs_fri.*\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "l1_batch_number", - "type_info": "Int8" - }, - { - "ordinal": 1, - "name": "merkle_tree_paths_blob_url", - "type_info": "Text" - }, - { - "ordinal": 2, - "name": "attempts", - "type_info": "Int2" - }, - { - "ordinal": 3, - "name": "status", - "type_info": "Text" - }, - { - "ordinal": 4, - "name": "error", - "type_info": "Text" - }, - { - "ordinal": 5, - "name": "created_at", - "type_info": "Timestamp" - }, - { - "ordinal": 6, - "name": "updated_at", - "type_info": "Timestamp" - }, - { - "ordinal": 7, - "name": "processing_started_at", - "type_info": "Timestamp" - }, - { - "ordinal": 8, - "name": "time_taken", - "type_info": "Time" - }, - { - "ordinal": 9, - "name": "is_blob_cleaned", - "type_info": "Bool" - }, - { - "ordinal": 10, - "name": "protocol_version", - "type_info": "Int4" - }, - { - "ordinal": 11, - "name": "picked_by", - "type_info": "Text" - }, - { - "ordinal": 12, - "name": "eip_4844_blobs", - "type_info": "Bytea" - }, - { - "ordinal": 13, - "name": "protocol_version_patch", - "type_info": "Int4" - } - ], - "parameters": { - "Left": [ - "Int8", - "Int4", - "Text", - "Int4" - ] - }, - "nullable": [ - false, - true, - false, - false, - true, - false, - false, - true, - true, - true, - true, - true, - true, - false - ] - }, - "hash": "e8412d5ad1b17269da02f9a5c201ed762158a27449f61d3b1bb80069ca446727" -} diff --git a/prover/prover_fri_gateway/src/api_data_fetcher.rs b/prover/prover_fri_gateway/src/api_data_fetcher.rs deleted file mode 100644 index 6a95acc0cd0b..000000000000 --- a/prover/prover_fri_gateway/src/api_data_fetcher.rs +++ /dev/null @@ -1,109 +0,0 @@ -use std::{sync::Arc, time::Duration}; - -use async_trait::async_trait; -use prover_dal::{ConnectionPool, Prover}; -use reqwest::Client; -use serde::{de::DeserializeOwned, Serialize}; -use tokio::{sync::watch, time::sleep}; -use zksync_object_store::ObjectStore; - -use crate::metrics::METRICS; - -/// The path to the API endpoint that returns the next proof generation data. -pub(crate) const PROOF_GENERATION_DATA_PATH: &str = "/proof_generation_data"; - -/// The path to the API endpoint that submits the proof. -pub(crate) const SUBMIT_PROOF_PATH: &str = "/submit_proof"; - -pub(crate) struct PeriodicApiStruct { - pub(crate) blob_store: Arc, - pub(crate) pool: ConnectionPool, - pub(crate) api_url: String, - pub(crate) poll_duration: Duration, - pub(crate) client: Client, -} - -impl PeriodicApiStruct { - pub(crate) async fn send_http_request( - &self, - request: Req, - endpoint: &str, - ) -> Result - where - Req: Serialize, - Resp: DeserializeOwned, - { - tracing::info!("Sending request to {}", endpoint); - - self.client - .post(endpoint) - .json(&request) - .send() - .await? - .error_for_status()? - .json::() - .await - } - - pub(crate) async fn run( - self, - mut stop_receiver: watch::Receiver, - ) -> anyhow::Result<()> - where - Req: Send, - Self: PeriodicApi, - { - tracing::info!( - "Starting periodic job: {} with frequency: {:?}", - Self::SERVICE_NAME, - self.poll_duration - ); - - loop { - if *stop_receiver.borrow() { - tracing::warn!("Stop signal received, shutting down {}", Self::SERVICE_NAME); - return Ok(()); - } - - if let Some((job_id, request)) = self.get_next_request().await { - match self.send_request(job_id, request).await { - Ok(response) => { - self.handle_response(job_id, response).await; - } - Err(err) => { - METRICS.http_error[&Self::SERVICE_NAME].inc(); - tracing::error!("HTTP request failed due to error: {}", err); - } - } - } - tokio::select! { - _ = stop_receiver.changed() => { - tracing::warn!("Stop signal received, shutting down {}", Self::SERVICE_NAME); - return Ok(()); - } - _ = sleep(self.poll_duration) => {} - } - } - } -} - -/// Trait for fetching data from an API periodically. -#[async_trait] -pub(crate) trait PeriodicApi: Sync + Send { - type JobId: Send + Copy; - type Response: Send; - - const SERVICE_NAME: &'static str; - - /// Returns the next request to be sent to the API and the endpoint to send it to. - async fn get_next_request(&self) -> Option<(Self::JobId, Req)>; - - /// Handles the response from the API. - async fn send_request( - &self, - job_id: Self::JobId, - request: Req, - ) -> reqwest::Result; - - async fn handle_response(&self, job_id: Self::JobId, response: Self::Response); -} diff --git a/prover/prover_fri_utils/src/region_fetcher.rs b/prover/prover_fri_utils/src/region_fetcher.rs deleted file mode 100644 index cae211c26cbe..000000000000 --- a/prover/prover_fri_utils/src/region_fetcher.rs +++ /dev/null @@ -1,51 +0,0 @@ -use anyhow::Context; -use regex::Regex; -use reqwest::{ - header::{HeaderMap, HeaderValue}, - Method, -}; -use zksync_utils::http_with_retries::send_request_with_retries; - -pub async fn get_zone(zone_url: &str) -> anyhow::Result { - let data = fetch_from_url(zone_url).await.context("fetch_from_url()")?; - parse_zone(&data).context("parse_zone") -} - -async fn fetch_from_url(url: &str) -> anyhow::Result { - let mut headers = HeaderMap::new(); - headers.insert("Metadata-Flavor", HeaderValue::from_static("Google")); - let response = send_request_with_retries(url, 5, Method::GET, Some(headers), None).await; - response - .map_err(|err| anyhow::anyhow!("Failed fetching response from url: {url}: {err:?}"))? - .text() - .await - .context("Failed to read response as text") -} - -fn parse_zone(data: &str) -> anyhow::Result { - // Statically provided Regex should always compile. - let re = Regex::new(r"^projects/\d+/zones/(\w+-\w+-\w+)$").unwrap(); - if let Some(caps) = re.captures(data) { - let zone = &caps[1]; - return Ok(zone.to_string()); - } - anyhow::bail!("failed to extract zone from: {data}"); -} - -#[cfg(test)] -mod tests { - use crate::region_fetcher::parse_zone; - - #[test] - fn test_parse_zone() { - let data = "projects/295056426491/zones/us-central1-a"; - let zone = parse_zone(data).unwrap(); - assert_eq!(zone, "us-central1-a"); - } - - #[test] - fn test_parse_zone_panic() { - let data = "invalid data"; - assert!(parse_zone(data).is_err()); - } -} diff --git a/yarn.lock b/yarn.lock index b7e2b98c431e..173a06e631f6 100644 --- a/yarn.lock +++ b/yarn.lock @@ -7,6 +7,11 @@ resolved "https://registry.yarnpkg.com/@aashutoshrathi/word-wrap/-/word-wrap-1.2.6.tgz#bd9154aec9983f77b3a034ecaa015c2e4201f6cf" integrity sha512-1Yjs2SvM8TflER/OD3cOjhWWOZb58A2t7wpE2S9XfBYTiIl+XFhQG2bjy4Pu1I+EAlCNUzRDYDdFwFYUKvXcIA== +"@adraffy/ens-normalize@1.10.1": + version "1.10.1" + resolved "https://registry.yarnpkg.com/@adraffy/ens-normalize/-/ens-normalize-1.10.1.tgz#63430d04bd8c5e74f8d7d049338f1cd9d4f02069" + integrity sha512-96Z2IP3mYmF1Xg2cDm8f1gWGf/HUVedQ3FMifV4kG/PQ4yEP51xDtRAEfhVNt5f/uzpNkZHwWQuUcu6D6K+Ekw== + "@ampproject/remapping@^2.2.0": version "2.3.0" resolved "https://registry.yarnpkg.com/@ampproject/remapping/-/remapping-2.3.0.tgz#ed441b6fa600072520ce18b43d2c8cc8caecc7f4" @@ -30,6 +35,14 @@ "@babel/highlight" "^7.24.2" picocolors "^1.0.0" +"@babel/code-frame@^7.24.6": + version "7.24.6" + resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.24.6.tgz#ab88da19344445c3d8889af2216606d3329f3ef2" + integrity sha512-ZJhac6FkEd1yhG2AHOmfcXG4ceoLltoCVJjN5XsWN9BifBQr+cHJbWi0h68HZuSORq+3WtJ2z0hwF2NG1b5kcA== + dependencies: + "@babel/highlight" "^7.24.6" + picocolors "^1.0.0" + "@babel/compat-data@^7.23.5": version "7.24.4" resolved "https://registry.yarnpkg.com/@babel/compat-data/-/compat-data-7.24.4.tgz#6f102372e9094f25d908ca0d34fc74c74606059a" @@ -66,6 +79,16 @@ "@jridgewell/trace-mapping" "^0.3.25" jsesc "^2.5.1" +"@babel/generator@^7.24.6": + version "7.24.6" + resolved "https://registry.yarnpkg.com/@babel/generator/-/generator-7.24.6.tgz#dfac82a228582a9d30c959fe50ad28951d4737a7" + integrity sha512-S7m4eNa6YAPJRHmKsLHIDJhNAGNKoWNiWefz1MBbpnt8g9lvMDl1hir4P9bo/57bQEmuwEhnRU/AMWsD0G/Fbg== + dependencies: + "@babel/types" "^7.24.6" + "@jridgewell/gen-mapping" "^0.3.5" + "@jridgewell/trace-mapping" "^0.3.25" + jsesc "^2.5.1" + "@babel/helper-compilation-targets@^7.23.6": version "7.23.6" resolved "https://registry.yarnpkg.com/@babel/helper-compilation-targets/-/helper-compilation-targets-7.23.6.tgz#4d79069b16cbcf1461289eccfbbd81501ae39991" @@ -82,6 +105,11 @@ resolved "https://registry.yarnpkg.com/@babel/helper-environment-visitor/-/helper-environment-visitor-7.22.20.tgz#96159db61d34a29dba454c959f5ae4a649ba9167" integrity sha512-zfedSIzFhat/gFhWfHtgWvlec0nqB9YEIVrpuwjruLlXfUSnA8cJB0miHKwqDnQ7d32aKo2xt88/xZptwxbfhA== +"@babel/helper-environment-visitor@^7.24.6": + version "7.24.6" + resolved "https://registry.yarnpkg.com/@babel/helper-environment-visitor/-/helper-environment-visitor-7.24.6.tgz#ac7ad5517821641550f6698dd5468f8cef78620d" + integrity sha512-Y50Cg3k0LKLMjxdPjIl40SdJgMB85iXn27Vk/qbHZCFx/o5XO3PSnpi675h1KEmmDb6OFArfd5SCQEQ5Q4H88g== + "@babel/helper-function-name@^7.23.0": version "7.23.0" resolved "https://registry.yarnpkg.com/@babel/helper-function-name/-/helper-function-name-7.23.0.tgz#1f9a3cdbd5b2698a670c30d2735f9af95ed52759" @@ -90,6 +118,14 @@ "@babel/template" "^7.22.15" "@babel/types" "^7.23.0" +"@babel/helper-function-name@^7.24.6": + version "7.24.6" + resolved "https://registry.yarnpkg.com/@babel/helper-function-name/-/helper-function-name-7.24.6.tgz#cebdd063386fdb95d511d84b117e51fc68fec0c8" + integrity sha512-xpeLqeeRkbxhnYimfr2PC+iA0Q7ljX/d1eZ9/inYbmfG2jpl8Lu3DyXvpOAnrS5kxkfOWJjioIMQsaMBXFI05w== + dependencies: + "@babel/template" "^7.24.6" + "@babel/types" "^7.24.6" + "@babel/helper-hoist-variables@^7.22.5": version "7.22.5" resolved "https://registry.yarnpkg.com/@babel/helper-hoist-variables/-/helper-hoist-variables-7.22.5.tgz#c01a007dac05c085914e8fb652b339db50d823bb" @@ -97,6 +133,13 @@ dependencies: "@babel/types" "^7.22.5" +"@babel/helper-hoist-variables@^7.24.6": + version "7.24.6" + resolved "https://registry.yarnpkg.com/@babel/helper-hoist-variables/-/helper-hoist-variables-7.24.6.tgz#8a7ece8c26756826b6ffcdd0e3cf65de275af7f9" + integrity sha512-SF/EMrC3OD7dSta1bLJIlrsVxwtd0UpjRJqLno6125epQMJ/kyFmpTT4pbvPbdQHzCHg+biQ7Syo8lnDtbR+uA== + dependencies: + "@babel/types" "^7.24.6" + "@babel/helper-module-imports@^7.22.15": version "7.24.3" resolved "https://registry.yarnpkg.com/@babel/helper-module-imports/-/helper-module-imports-7.24.3.tgz#6ac476e6d168c7c23ff3ba3cf4f7841d46ac8128" @@ -134,16 +177,33 @@ dependencies: "@babel/types" "^7.22.5" +"@babel/helper-split-export-declaration@^7.24.6": + version "7.24.6" + resolved "https://registry.yarnpkg.com/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.24.6.tgz#e830068f7ba8861c53b7421c284da30ae656d7a3" + integrity sha512-CvLSkwXGWnYlF9+J3iZUvwgAxKiYzK3BWuo+mLzD/MDGOZDj7Gq8+hqaOkMxmJwmlv0iu86uH5fdADd9Hxkymw== + dependencies: + "@babel/types" "^7.24.6" + "@babel/helper-string-parser@^7.23.4": version "7.24.1" resolved "https://registry.yarnpkg.com/@babel/helper-string-parser/-/helper-string-parser-7.24.1.tgz#f99c36d3593db9540705d0739a1f10b5e20c696e" integrity sha512-2ofRCjnnA9y+wk8b9IAREroeUP02KHp431N2mhKniy2yKIDKpbrHv9eXwm8cBeWQYcJmzv5qKCu65P47eCF7CQ== +"@babel/helper-string-parser@^7.24.6": + version "7.24.6" + resolved "https://registry.yarnpkg.com/@babel/helper-string-parser/-/helper-string-parser-7.24.6.tgz#28583c28b15f2a3339cfafafeaad42f9a0e828df" + integrity sha512-WdJjwMEkmBicq5T9fm/cHND3+UlFa2Yj8ALLgmoSQAJZysYbBjw+azChSGPN4DSPLXOcooGRvDwZWMcF/mLO2Q== + "@babel/helper-validator-identifier@^7.22.20": version "7.22.20" resolved "https://registry.yarnpkg.com/@babel/helper-validator-identifier/-/helper-validator-identifier-7.22.20.tgz#c4ae002c61d2879e724581d96665583dbc1dc0e0" integrity sha512-Y4OZ+ytlatR8AI+8KZfKuL5urKp7qey08ha31L8b3BwewJAoJamTzyvxPR/5D+KkdJCGPq/+8TukHBlY10FX9A== +"@babel/helper-validator-identifier@^7.24.6": + version "7.24.6" + resolved "https://registry.yarnpkg.com/@babel/helper-validator-identifier/-/helper-validator-identifier-7.24.6.tgz#08bb6612b11bdec78f3feed3db196da682454a5e" + integrity sha512-4yA7s865JHaqUdRbnaxarZREuPTHrjpDT+pXoAZ1yhyo6uFnIEpS8VMu16siFOHDpZNKYv5BObhsB//ycbICyw== + "@babel/helper-validator-option@^7.23.5": version "7.23.5" resolved "https://registry.yarnpkg.com/@babel/helper-validator-option/-/helper-validator-option-7.23.5.tgz#907a3fbd4523426285365d1206c423c4c5520307" @@ -168,11 +228,26 @@ js-tokens "^4.0.0" picocolors "^1.0.0" -"@babel/parser@^7.1.0", "@babel/parser@^7.14.7", "@babel/parser@^7.20.7", "@babel/parser@^7.23.9", "@babel/parser@^7.24.0", "@babel/parser@^7.24.1", "@babel/parser@^7.24.4", "@babel/parser@^7.7.0": +"@babel/highlight@^7.24.6": + version "7.24.6" + resolved "https://registry.yarnpkg.com/@babel/highlight/-/highlight-7.24.6.tgz#6d610c1ebd2c6e061cade0153bf69b0590b7b3df" + integrity sha512-2YnuOp4HAk2BsBrJJvYCbItHx0zWscI1C3zgWkz+wDyD9I7GIVrfnLyrR4Y1VR+7p+chAEcrgRQYZAGIKMV7vQ== + dependencies: + "@babel/helper-validator-identifier" "^7.24.6" + chalk "^2.4.2" + js-tokens "^4.0.0" + picocolors "^1.0.0" + +"@babel/parser@^7.1.0", "@babel/parser@^7.14.7", "@babel/parser@^7.20.7", "@babel/parser@^7.23.9", "@babel/parser@^7.24.0", "@babel/parser@^7.24.1", "@babel/parser@^7.24.4": version "7.24.4" resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.24.4.tgz#234487a110d89ad5a3ed4a8a566c36b9453e8c88" integrity sha512-zTvEBcghmeBma9QIGunWevvBAp4/Qu9Bdq+2k0Ot4fVMD6v3dsC9WOcRSKk7tRRyBM/53yKMJko9xOatGQAwSg== +"@babel/parser@^7.24.6", "@babel/parser@^7.7.0": + version "7.24.6" + resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.24.6.tgz#5e030f440c3c6c78d195528c3b688b101a365328" + integrity sha512-eNZXdfU35nJC2h24RznROuOpO94h6x8sg9ju0tT9biNtLZ2vuP8SduLqqV+/8+cebSLV9SJEAN5Z3zQbJG/M+Q== + "@babel/plugin-syntax-async-generators@^7.8.4": version "7.8.4" resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-async-generators/-/plugin-syntax-async-generators-7.8.4.tgz#a983fb1aeb2ec3f6ed042a210f640e90e786fe0d" @@ -280,7 +355,16 @@ "@babel/parser" "^7.24.0" "@babel/types" "^7.24.0" -"@babel/traverse@^7.24.1", "@babel/traverse@^7.7.0": +"@babel/template@^7.24.6": + version "7.24.6" + resolved "https://registry.yarnpkg.com/@babel/template/-/template-7.24.6.tgz#048c347b2787a6072b24c723664c8d02b67a44f9" + integrity sha512-3vgazJlLwNXi9jhrR1ef8qiB65L1RK90+lEQwv4OxveHnqC3BfmnHdgySwRLzf6akhlOYenT+b7AfWq+a//AHw== + dependencies: + "@babel/code-frame" "^7.24.6" + "@babel/parser" "^7.24.6" + "@babel/types" "^7.24.6" + +"@babel/traverse@^7.24.1": version "7.24.1" resolved "https://registry.yarnpkg.com/@babel/traverse/-/traverse-7.24.1.tgz#d65c36ac9dd17282175d1e4a3c49d5b7988f530c" integrity sha512-xuU6o9m68KeqZbQuDt2TcKSxUw/mrsvavlEqQ1leZ/B+C9tk6E4sRWy97WaXgvq5E+nU3cXMxv3WKOCanVMCmQ== @@ -296,7 +380,23 @@ debug "^4.3.1" globals "^11.1.0" -"@babel/types@^7.0.0", "@babel/types@^7.20.7", "@babel/types@^7.22.5", "@babel/types@^7.23.0", "@babel/types@^7.24.0", "@babel/types@^7.3.3", "@babel/types@^7.7.0": +"@babel/traverse@^7.7.0": + version "7.24.6" + resolved "https://registry.yarnpkg.com/@babel/traverse/-/traverse-7.24.6.tgz#0941ec50cdeaeacad0911eb67ae227a4f8424edc" + integrity sha512-OsNjaJwT9Zn8ozxcfoBc+RaHdj3gFmCmYoQLUII1o6ZrUwku0BMg80FoOTPx+Gi6XhcQxAYE4xyjPTo4SxEQqw== + dependencies: + "@babel/code-frame" "^7.24.6" + "@babel/generator" "^7.24.6" + "@babel/helper-environment-visitor" "^7.24.6" + "@babel/helper-function-name" "^7.24.6" + "@babel/helper-hoist-variables" "^7.24.6" + "@babel/helper-split-export-declaration" "^7.24.6" + "@babel/parser" "^7.24.6" + "@babel/types" "^7.24.6" + debug "^4.3.1" + globals "^11.1.0" + +"@babel/types@^7.0.0", "@babel/types@^7.20.7", "@babel/types@^7.22.5", "@babel/types@^7.23.0", "@babel/types@^7.24.0", "@babel/types@^7.3.3": version "7.24.0" resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.24.0.tgz#3b951f435a92e7333eba05b7566fd297960ea1bf" integrity sha512-+j7a5c253RfKh8iABBhywc8NSfP5LURe7Uh4qpsh6jc+aLJguvmIUBdjSdEMQv2bENrCR5MfRdjGo7vzS/ob7w== @@ -305,6 +405,15 @@ "@babel/helper-validator-identifier" "^7.22.20" to-fast-properties "^2.0.0" +"@babel/types@^7.24.6", "@babel/types@^7.7.0": + version "7.24.6" + resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.24.6.tgz#ba4e1f59870c10dc2fa95a274ac4feec23b21912" + integrity sha512-WaMsgi6Q8zMgMth93GvWPXkhAIEobfsIkLTacoVZoK1J0CevIPGYY2Vo5YvJGqyHqXM6P4ppOYGsIRU8MM9pFQ== + dependencies: + "@babel/helper-string-parser" "^7.24.6" + "@babel/helper-validator-identifier" "^7.24.6" + to-fast-properties "^2.0.0" + "@balena/dockerignore@^1.0.2": version "1.0.2" resolved "https://registry.yarnpkg.com/@balena/dockerignore/-/dockerignore-1.0.2.tgz#9ffe4726915251e8eb69f44ef3547e0da2c03e0d" @@ -333,360 +442,6 @@ resolved "https://registry.yarnpkg.com/@colors/colors/-/colors-1.5.0.tgz#bb504579c1cae923e6576a4f5da43d25f97bdbd9" integrity sha512-ooWCrlZP11i8GImSjTHYHLkvFDP48nS4+204nGb1RiX/WXYHmJA2III9/e2DWVabCESdW7hBAEzHRqUn9OUVvQ== -"@cspell/cspell-bundled-dicts@8.6.1": - version "8.6.1" - resolved "https://registry.yarnpkg.com/@cspell/cspell-bundled-dicts/-/cspell-bundled-dicts-8.6.1.tgz#127b11ac24885aa4b725ab4ea6c0a0a18927e513" - integrity sha512-s6Av1xIgctYLuUiazKZjQ2WRUXc9dU38BOZXwM/lb7y8grQMEuTjST1c+8MOkZkppx48/sO7GHIF3k9rEzD3fg== - dependencies: - "@cspell/dict-ada" "^4.0.2" - "@cspell/dict-aws" "^4.0.1" - "@cspell/dict-bash" "^4.1.3" - "@cspell/dict-companies" "^3.0.31" - "@cspell/dict-cpp" "^5.1.3" - "@cspell/dict-cryptocurrencies" "^5.0.0" - "@cspell/dict-csharp" "^4.0.2" - "@cspell/dict-css" "^4.0.12" - "@cspell/dict-dart" "^2.0.3" - "@cspell/dict-django" "^4.1.0" - "@cspell/dict-docker" "^1.1.7" - "@cspell/dict-dotnet" "^5.0.0" - "@cspell/dict-elixir" "^4.0.3" - "@cspell/dict-en-common-misspellings" "^2.0.0" - "@cspell/dict-en-gb" "1.1.33" - "@cspell/dict-en_us" "^4.3.17" - "@cspell/dict-filetypes" "^3.0.3" - "@cspell/dict-fonts" "^4.0.0" - "@cspell/dict-fsharp" "^1.0.1" - "@cspell/dict-fullstack" "^3.1.5" - "@cspell/dict-gaming-terms" "^1.0.5" - "@cspell/dict-git" "^3.0.0" - "@cspell/dict-golang" "^6.0.5" - "@cspell/dict-haskell" "^4.0.1" - "@cspell/dict-html" "^4.0.5" - "@cspell/dict-html-symbol-entities" "^4.0.0" - "@cspell/dict-java" "^5.0.6" - "@cspell/dict-julia" "^1.0.1" - "@cspell/dict-k8s" "^1.0.2" - "@cspell/dict-latex" "^4.0.0" - "@cspell/dict-lorem-ipsum" "^4.0.0" - "@cspell/dict-lua" "^4.0.3" - "@cspell/dict-makefile" "^1.0.0" - "@cspell/dict-node" "^4.0.3" - "@cspell/dict-npm" "^5.0.15" - "@cspell/dict-php" "^4.0.6" - "@cspell/dict-powershell" "^5.0.3" - "@cspell/dict-public-licenses" "^2.0.6" - "@cspell/dict-python" "^4.1.11" - "@cspell/dict-r" "^2.0.1" - "@cspell/dict-ruby" "^5.0.2" - "@cspell/dict-rust" "^4.0.2" - "@cspell/dict-scala" "^5.0.0" - "@cspell/dict-software-terms" "^3.3.18" - "@cspell/dict-sql" "^2.1.3" - "@cspell/dict-svelte" "^1.0.2" - "@cspell/dict-swift" "^2.0.1" - "@cspell/dict-terraform" "^1.0.0" - "@cspell/dict-typescript" "^3.1.2" - "@cspell/dict-vue" "^3.0.0" - -"@cspell/cspell-json-reporter@8.6.1": - version "8.6.1" - resolved "https://registry.yarnpkg.com/@cspell/cspell-json-reporter/-/cspell-json-reporter-8.6.1.tgz#d92e86a196d9f560cde49bd37139f7a9d8cc5ec3" - integrity sha512-75cmJgU9iQgrDnLFIUyvgybySJJi29BPw71z+8ZO9WhNofufxoSjaWepZeYV2nK0nHXM+MbdQG5Mmj/Lv6J1FA== - dependencies: - "@cspell/cspell-types" "8.6.1" - -"@cspell/cspell-pipe@8.6.1": - version "8.6.1" - resolved "https://registry.yarnpkg.com/@cspell/cspell-pipe/-/cspell-pipe-8.6.1.tgz#b4ae588a331b0751be1e7e11211bcc3b54358233" - integrity sha512-guIlGhhOLQwfqevBSgp26b+SX4I1hCH+puAksWAk93bybKkcGtGpcavAQSN9qvamox4zcHnvGutEPF+UcXuceQ== - -"@cspell/cspell-resolver@8.6.1": - version "8.6.1" - resolved "https://registry.yarnpkg.com/@cspell/cspell-resolver/-/cspell-resolver-8.6.1.tgz#0da1b57340cadf414b7416a065d1d166b4c521cc" - integrity sha512-ZUbYcvEhfokHG9qfUlIylUqEobG84PiDozCkE8U4h/rTSmYkf/nAD+M6yg+jQ0F2aTFGNbvpKKGFlfXFXveX7A== - dependencies: - global-directory "^4.0.1" - -"@cspell/cspell-service-bus@8.6.1": - version "8.6.1" - resolved "https://registry.yarnpkg.com/@cspell/cspell-service-bus/-/cspell-service-bus-8.6.1.tgz#ea0b1f257de6de750ef3a4075aa0fbbfbdf92bce" - integrity sha512-WpI3fSW8t00UMetfd6tS8f9+xE3+ElIUO/bQ1YKK95TMIRdEUcH+QDxcHM66pJXEm4WiaN3H/MfWk1fIhGlJ8g== - -"@cspell/cspell-types@8.6.1": - version "8.6.1" - resolved "https://registry.yarnpkg.com/@cspell/cspell-types/-/cspell-types-8.6.1.tgz#a1cfaa0f1412662733f75015992a97072b6d65ef" - integrity sha512-MXa9v6sXbbwyiNno7v7vczNph6AsMNWnpMRCcW3h/siXNQYRuMssdxqT5sQJ8Kurh3M/Wo7DlKX4n74elKL3iQ== - -"@cspell/dict-ada@^4.0.2": - version "4.0.2" - resolved "https://registry.yarnpkg.com/@cspell/dict-ada/-/dict-ada-4.0.2.tgz#8da2216660aeb831a0d9055399a364a01db5805a" - integrity sha512-0kENOWQeHjUlfyId/aCM/mKXtkEgV0Zu2RhUXCBr4hHo9F9vph+Uu8Ww2b0i5a4ZixoIkudGA+eJvyxrG1jUpA== - -"@cspell/dict-aws@^4.0.1": - version "4.0.1" - resolved "https://registry.yarnpkg.com/@cspell/dict-aws/-/dict-aws-4.0.1.tgz#a0e758531ae81792b928a3f406618296291a658a" - integrity sha512-NXO+kTPQGqaaJKa4kO92NAXoqS+i99dQzf3/L1BxxWVSBS3/k1f3uhmqIh7Crb/n22W793lOm0D9x952BFga3Q== - -"@cspell/dict-bash@^4.1.3": - version "4.1.3" - resolved "https://registry.yarnpkg.com/@cspell/dict-bash/-/dict-bash-4.1.3.tgz#25fba40825ac10083676ab2c777e471c3f71b36e" - integrity sha512-tOdI3QVJDbQSwPjUkOiQFhYcu2eedmX/PtEpVWg0aFps/r6AyjUQINtTgpqMYnYuq8O1QUIQqnpx21aovcgZCw== - -"@cspell/dict-companies@^3.0.31": - version "3.0.31" - resolved "https://registry.yarnpkg.com/@cspell/dict-companies/-/dict-companies-3.0.31.tgz#f0dacabc5308096c0f12db8a8b802ece604d6bf7" - integrity sha512-hKVpV/lcGKP4/DpEPS8P4osPvFH/YVLJaDn9cBIOH6/HSmL5LbFgJNKpMGaYRbhm2FEX56MKE3yn/MNeNYuesQ== - -"@cspell/dict-cpp@^5.1.3": - version "5.1.3" - resolved "https://registry.yarnpkg.com/@cspell/dict-cpp/-/dict-cpp-5.1.3.tgz#c0c34ccdecc3ff954877a56dbbf07a7bf53b218e" - integrity sha512-sqnriXRAInZH9W75C+APBh6dtben9filPqVbIsiRMUXGg+s02ekz0z6LbS7kXeJ5mD2qXoMLBrv13qH2eIwutQ== - -"@cspell/dict-cryptocurrencies@^5.0.0": - version "5.0.0" - resolved "https://registry.yarnpkg.com/@cspell/dict-cryptocurrencies/-/dict-cryptocurrencies-5.0.0.tgz#19fbc7bdbec76ce64daf7d53a6d0f3cfff7d0038" - integrity sha512-Z4ARIw5+bvmShL+4ZrhDzGhnc9znaAGHOEMaB/GURdS/jdoreEDY34wdN0NtdLHDO5KO7GduZnZyqGdRoiSmYA== - -"@cspell/dict-csharp@^4.0.2": - version "4.0.2" - resolved "https://registry.yarnpkg.com/@cspell/dict-csharp/-/dict-csharp-4.0.2.tgz#e55659dbe594e744d86b1baf0f3397fe57b1e283" - integrity sha512-1JMofhLK+4p4KairF75D3A924m5ERMgd1GvzhwK2geuYgd2ZKuGW72gvXpIV7aGf52E3Uu1kDXxxGAiZ5uVG7g== - -"@cspell/dict-css@^4.0.12": - version "4.0.12" - resolved "https://registry.yarnpkg.com/@cspell/dict-css/-/dict-css-4.0.12.tgz#59abf3512ae729835c933c38f64a3d8a5f09ce3d" - integrity sha512-vGBgPM92MkHQF5/2jsWcnaahOZ+C6OE/fPvd5ScBP72oFY9tn5GLuomcyO0z8vWCr2e0nUSX1OGimPtcQAlvSw== - -"@cspell/dict-dart@^2.0.3": - version "2.0.3" - resolved "https://registry.yarnpkg.com/@cspell/dict-dart/-/dict-dart-2.0.3.tgz#75e7ffe47d5889c2c831af35acdd92ebdbd4cf12" - integrity sha512-cLkwo1KT5CJY5N5RJVHks2genFkNCl/WLfj+0fFjqNR+tk3tBI1LY7ldr9piCtSFSm4x9pO1x6IV3kRUY1lLiw== - -"@cspell/dict-data-science@^1.0.11": - version "1.0.11" - resolved "https://registry.yarnpkg.com/@cspell/dict-data-science/-/dict-data-science-1.0.11.tgz#4eabba75c21d27253c1114b4fbbade0ead739ffc" - integrity sha512-TaHAZRVe0Zlcc3C23StZqqbzC0NrodRwoSAc8dis+5qLeLLnOCtagYQeROQvDlcDg3X/VVEO9Whh4W/z4PAmYQ== - -"@cspell/dict-django@^4.1.0": - version "4.1.0" - resolved "https://registry.yarnpkg.com/@cspell/dict-django/-/dict-django-4.1.0.tgz#2d4b765daf3c83e733ef3e06887ea34403a4de7a" - integrity sha512-bKJ4gPyrf+1c78Z0Oc4trEB9MuhcB+Yg+uTTWsvhY6O2ncFYbB/LbEZfqhfmmuK/XJJixXfI1laF2zicyf+l0w== - -"@cspell/dict-docker@^1.1.7": - version "1.1.7" - resolved "https://registry.yarnpkg.com/@cspell/dict-docker/-/dict-docker-1.1.7.tgz#bcf933283fbdfef19c71a642e7e8c38baf9014f2" - integrity sha512-XlXHAr822euV36GGsl2J1CkBIVg3fZ6879ZOg5dxTIssuhUOCiV2BuzKZmt6aIFmcdPmR14+9i9Xq+3zuxeX0A== - -"@cspell/dict-dotnet@^5.0.0": - version "5.0.0" - resolved "https://registry.yarnpkg.com/@cspell/dict-dotnet/-/dict-dotnet-5.0.0.tgz#13690aafe14b240ad17a30225ac1ec29a5a6a510" - integrity sha512-EOwGd533v47aP5QYV8GlSSKkmM9Eq8P3G/eBzSpH3Nl2+IneDOYOBLEUraHuiCtnOkNsz0xtZHArYhAB2bHWAw== - -"@cspell/dict-elixir@^4.0.3": - version "4.0.3" - resolved "https://registry.yarnpkg.com/@cspell/dict-elixir/-/dict-elixir-4.0.3.tgz#57c25843e46cf3463f97da72d9ef8e37c818296f" - integrity sha512-g+uKLWvOp9IEZvrIvBPTr/oaO6619uH/wyqypqvwpmnmpjcfi8+/hqZH8YNKt15oviK8k4CkINIqNhyndG9d9Q== - -"@cspell/dict-en-common-misspellings@^2.0.0": - version "2.0.0" - resolved "https://registry.yarnpkg.com/@cspell/dict-en-common-misspellings/-/dict-en-common-misspellings-2.0.0.tgz#708f424d75dc65237a6fcb8d253bc1e7ab641380" - integrity sha512-NOg8dlv37/YqLkCfBs5OXeJm/Wcfb/CzeOmOZJ2ZXRuxwsNuolb4TREUce0yAXRqMhawahY5TSDRJJBgKjBOdw== - -"@cspell/dict-en-gb@1.1.33": - version "1.1.33" - resolved "https://registry.yarnpkg.com/@cspell/dict-en-gb/-/dict-en-gb-1.1.33.tgz#7f1fd90fc364a5cb77111b5438fc9fcf9cc6da0e" - integrity sha512-tKSSUf9BJEV+GJQAYGw5e+ouhEe2ZXE620S7BLKe3ZmpnjlNG9JqlnaBhkIMxKnNFkLY2BP/EARzw31AZnOv4g== - -"@cspell/dict-en_us@^4.3.17": - version "4.3.17" - resolved "https://registry.yarnpkg.com/@cspell/dict-en_us/-/dict-en_us-4.3.17.tgz#a39546b9ec4cc4fb1e9607575b2682b1155dda07" - integrity sha512-CS0Tb2f2YwQZ4VZ6+WLAO5uOzb0iO/iYSRl34kX4enq6quXxLYzwdfGAwv85wSYHPdga8tGiZFP+p8GPsi2JEg== - -"@cspell/dict-filetypes@^3.0.3": - version "3.0.3" - resolved "https://registry.yarnpkg.com/@cspell/dict-filetypes/-/dict-filetypes-3.0.3.tgz#ab0723ca2f4d3d5674e9c9745efc9f144e49c905" - integrity sha512-J9UP+qwwBLfOQ8Qg9tAsKtSY/WWmjj21uj6zXTI9hRLD1eG1uUOLcfVovAmtmVqUWziPSKMr87F6SXI3xmJXgw== - -"@cspell/dict-fonts@^4.0.0": - version "4.0.0" - resolved "https://registry.yarnpkg.com/@cspell/dict-fonts/-/dict-fonts-4.0.0.tgz#9bc8beb2a7b068b4fdb45cb994b36fd184316327" - integrity sha512-t9V4GeN/m517UZn63kZPUYP3OQg5f0OBLSd3Md5CU3eH1IFogSvTzHHnz4Wqqbv8NNRiBZ3HfdY/pqREZ6br3Q== - -"@cspell/dict-fsharp@^1.0.1": - version "1.0.1" - resolved "https://registry.yarnpkg.com/@cspell/dict-fsharp/-/dict-fsharp-1.0.1.tgz#d62c699550a39174f182f23c8c1330a795ab5f53" - integrity sha512-23xyPcD+j+NnqOjRHgW3IU7Li912SX9wmeefcY0QxukbAxJ/vAN4rBpjSwwYZeQPAn3fxdfdNZs03fg+UM+4yQ== - -"@cspell/dict-fullstack@^3.1.5": - version "3.1.5" - resolved "https://registry.yarnpkg.com/@cspell/dict-fullstack/-/dict-fullstack-3.1.5.tgz#35d18678161f214575cc613dd95564e05422a19c" - integrity sha512-6ppvo1dkXUZ3fbYn/wwzERxCa76RtDDl5Afzv2lijLoijGGUw5yYdLBKJnx8PJBGNLh829X352ftE7BElG4leA== - -"@cspell/dict-gaming-terms@^1.0.5": - version "1.0.5" - resolved "https://registry.yarnpkg.com/@cspell/dict-gaming-terms/-/dict-gaming-terms-1.0.5.tgz#d6ca40eb34a4c99847fd58a7354cd2c651065156" - integrity sha512-C3riccZDD3d9caJQQs1+MPfrUrQ+0KHdlj9iUR1QD92FgTOF6UxoBpvHUUZ9YSezslcmpFQK4xQQ5FUGS7uWfw== - -"@cspell/dict-git@^3.0.0": - version "3.0.0" - resolved "https://registry.yarnpkg.com/@cspell/dict-git/-/dict-git-3.0.0.tgz#c275af86041a2b59a7facce37525e2af05653b95" - integrity sha512-simGS/lIiXbEaqJu9E2VPoYW1OTC2xrwPPXNXFMa2uo/50av56qOuaxDrZ5eH1LidFXwoc8HROCHYeKoNrDLSw== - -"@cspell/dict-golang@^6.0.5": - version "6.0.5" - resolved "https://registry.yarnpkg.com/@cspell/dict-golang/-/dict-golang-6.0.5.tgz#4dd2e2fda419730a21fb77ade3b90241ad4a5bcc" - integrity sha512-w4mEqGz4/wV+BBljLxduFNkMrd3rstBNDXmoX5kD4UTzIb4Sy0QybWCtg2iVT+R0KWiRRA56QKOvBsgXiddksA== - -"@cspell/dict-haskell@^4.0.1": - version "4.0.1" - resolved "https://registry.yarnpkg.com/@cspell/dict-haskell/-/dict-haskell-4.0.1.tgz#e9fca7c452411ff11926e23ffed2b50bb9b95e47" - integrity sha512-uRrl65mGrOmwT7NxspB4xKXFUenNC7IikmpRZW8Uzqbqcu7ZRCUfstuVH7T1rmjRgRkjcIjE4PC11luDou4wEQ== - -"@cspell/dict-html-symbol-entities@^4.0.0": - version "4.0.0" - resolved "https://registry.yarnpkg.com/@cspell/dict-html-symbol-entities/-/dict-html-symbol-entities-4.0.0.tgz#4d86ac18a4a11fdb61dfb6f5929acd768a52564f" - integrity sha512-HGRu+48ErJjoweR5IbcixxETRewrBb0uxQBd6xFGcxbEYCX8CnQFTAmKI5xNaIt2PKaZiJH3ijodGSqbKdsxhw== - -"@cspell/dict-html@^4.0.5": - version "4.0.5" - resolved "https://registry.yarnpkg.com/@cspell/dict-html/-/dict-html-4.0.5.tgz#03a5182148d80e6c25f71339dbb2b7c5b9894ef8" - integrity sha512-p0brEnRybzSSWi8sGbuVEf7jSTDmXPx7XhQUb5bgG6b54uj+Z0Qf0V2n8b/LWwIPJNd1GygaO9l8k3HTCy1h4w== - -"@cspell/dict-java@^5.0.6": - version "5.0.6" - resolved "https://registry.yarnpkg.com/@cspell/dict-java/-/dict-java-5.0.6.tgz#2462d6fc15f79ec15eb88ecf875b6ad2a7bf7a6a" - integrity sha512-kdE4AHHHrixyZ5p6zyms1SLoYpaJarPxrz8Tveo6gddszBVVwIUZ+JkQE1bWNLK740GWzIXdkznpUfw1hP9nXw== - -"@cspell/dict-julia@^1.0.1": - version "1.0.1" - resolved "https://registry.yarnpkg.com/@cspell/dict-julia/-/dict-julia-1.0.1.tgz#900001417f1c4ea689530adfcc034c848458a0aa" - integrity sha512-4JsCLCRhhLMLiaHpmR7zHFjj1qOauzDI5ZzCNQS31TUMfsOo26jAKDfo0jljFAKgw5M2fEG7sKr8IlPpQAYrmQ== - -"@cspell/dict-k8s@^1.0.2": - version "1.0.2" - resolved "https://registry.yarnpkg.com/@cspell/dict-k8s/-/dict-k8s-1.0.2.tgz#b19e66f4ac8a4264c0f3981ac6e23e88a60f1c91" - integrity sha512-tLT7gZpNPnGa+IIFvK9SP1LrSpPpJ94a/DulzAPOb1Q2UBFwdpFd82UWhio0RNShduvKG/WiMZf/wGl98pn+VQ== - -"@cspell/dict-latex@^4.0.0": - version "4.0.0" - resolved "https://registry.yarnpkg.com/@cspell/dict-latex/-/dict-latex-4.0.0.tgz#85054903db834ea867174795d162e2a8f0e9c51e" - integrity sha512-LPY4y6D5oI7D3d+5JMJHK/wxYTQa2lJMSNxps2JtuF8hbAnBQb3igoWEjEbIbRRH1XBM0X8dQqemnjQNCiAtxQ== - -"@cspell/dict-lorem-ipsum@^4.0.0": - version "4.0.0" - resolved "https://registry.yarnpkg.com/@cspell/dict-lorem-ipsum/-/dict-lorem-ipsum-4.0.0.tgz#2793a5dbfde474a546b0caecc40c38fdf076306e" - integrity sha512-1l3yjfNvMzZPibW8A7mQU4kTozwVZVw0AvFEdy+NcqtbxH+TvbSkNMqROOFWrkD2PjnKG0+Ea0tHI2Pi6Gchnw== - -"@cspell/dict-lua@^4.0.3": - version "4.0.3" - resolved "https://registry.yarnpkg.com/@cspell/dict-lua/-/dict-lua-4.0.3.tgz#2d23c8f7e74b4e62000678d80e7d1ebb10b003e0" - integrity sha512-lDHKjsrrbqPaea13+G9s0rtXjMO06gPXPYRjRYawbNmo4E/e3XFfVzeci3OQDQNDmf2cPOwt9Ef5lu2lDmwfJg== - -"@cspell/dict-makefile@^1.0.0": - version "1.0.0" - resolved "https://registry.yarnpkg.com/@cspell/dict-makefile/-/dict-makefile-1.0.0.tgz#5afb2910873ebbc01ab8d9c38661c4c93d0e5a40" - integrity sha512-3W9tHPcSbJa6s0bcqWo6VisEDTSN5zOtDbnPabF7rbyjRpNo0uHXHRJQF8gAbFzoTzBBhgkTmrfSiuyQm7vBUQ== - -"@cspell/dict-node@^4.0.3": - version "4.0.3" - resolved "https://registry.yarnpkg.com/@cspell/dict-node/-/dict-node-4.0.3.tgz#5ae0222d72871e82978049f8e11ea627ca42fca3" - integrity sha512-sFlUNI5kOogy49KtPg8SMQYirDGIAoKBO3+cDLIwD4MLdsWy1q0upc7pzGht3mrjuyMiPRUV14Bb0rkVLrxOhg== - -"@cspell/dict-npm@^5.0.15": - version "5.0.15" - resolved "https://registry.yarnpkg.com/@cspell/dict-npm/-/dict-npm-5.0.15.tgz#c1d1646011fd0eb8ee119b481818a92223c459d1" - integrity sha512-sX0X5YWNW54F4baW7b5JJB6705OCBIZtUqjOghlJNORS5No7QY1IX1zc5FxNNu4gsaCZITAmfMi4ityXEsEThA== - -"@cspell/dict-php@^4.0.6": - version "4.0.6" - resolved "https://registry.yarnpkg.com/@cspell/dict-php/-/dict-php-4.0.6.tgz#fcdee4d850f279b2757eb55c4f69a3a221ac1f7e" - integrity sha512-ySAXisf7twoVFZqBV2o/DKiCLIDTHNqfnj0EfH9OoOUR7HL3rb6zJkm0viLUFDO2G/8SyIi6YrN/6KX+Scjjjg== - -"@cspell/dict-powershell@^5.0.3": - version "5.0.3" - resolved "https://registry.yarnpkg.com/@cspell/dict-powershell/-/dict-powershell-5.0.3.tgz#7bceb4e7db39f87479a6d2af3a033ce26796ae49" - integrity sha512-lEdzrcyau6mgzu1ie98GjOEegwVHvoaWtzQnm1ie4DyZgMr+N6D0Iyj1lzvtmt0snvsDFa5F2bsYzf3IMKcpcA== - -"@cspell/dict-public-licenses@^2.0.6": - version "2.0.6" - resolved "https://registry.yarnpkg.com/@cspell/dict-public-licenses/-/dict-public-licenses-2.0.6.tgz#e6ac8e5cb3b0ef8503d67da14435ae86a875b6cc" - integrity sha512-bHqpSpJvLCUcWxj1ov/Ki8WjmESpYwRpQlqfdchekOTc93Huhvjm/RXVN1R4fVf4Hspyem1QVkCGqAmjJMj6sw== - -"@cspell/dict-python@^4.1.11": - version "4.1.11" - resolved "https://registry.yarnpkg.com/@cspell/dict-python/-/dict-python-4.1.11.tgz#4e339def01bf468b32d459c46ecb6894970b7eb8" - integrity sha512-XG+v3PumfzUW38huSbfT15Vqt3ihNb462ulfXifpQllPok5OWynhszCLCRQjQReV+dgz784ST4ggRxW452/kVg== - dependencies: - "@cspell/dict-data-science" "^1.0.11" - -"@cspell/dict-r@^2.0.1": - version "2.0.1" - resolved "https://registry.yarnpkg.com/@cspell/dict-r/-/dict-r-2.0.1.tgz#73474fb7cce45deb9094ebf61083fbf5913f440a" - integrity sha512-KCmKaeYMLm2Ip79mlYPc8p+B2uzwBp4KMkzeLd5E6jUlCL93Y5Nvq68wV5fRLDRTf7N1LvofkVFWfDcednFOgA== - -"@cspell/dict-ruby@^5.0.2": - version "5.0.2" - resolved "https://registry.yarnpkg.com/@cspell/dict-ruby/-/dict-ruby-5.0.2.tgz#cf1a71380c633dec0857143d3270cb503b10679a" - integrity sha512-cIh8KTjpldzFzKGgrqUX4bFyav5lC52hXDKo4LbRuMVncs3zg4hcSf4HtURY+f2AfEZzN6ZKzXafQpThq3dl2g== - -"@cspell/dict-rust@^4.0.2": - version "4.0.2" - resolved "https://registry.yarnpkg.com/@cspell/dict-rust/-/dict-rust-4.0.2.tgz#e9111f0105ee6d836a1be8314f47347fd9f8fc3a" - integrity sha512-RhziKDrklzOntxAbY3AvNR58wnFGIo3YS8+dNeLY36GFuWOvXDHFStYw5Pod4f/VXbO/+1tXtywCC4zWfB2p1w== - -"@cspell/dict-scala@^5.0.0": - version "5.0.0" - resolved "https://registry.yarnpkg.com/@cspell/dict-scala/-/dict-scala-5.0.0.tgz#b64365ad559110a36d44ccd90edf7151ea648022" - integrity sha512-ph0twaRoV+ylui022clEO1dZ35QbeEQaKTaV2sPOsdwIokABPIiK09oWwGK9qg7jRGQwVaRPEq0Vp+IG1GpqSQ== - -"@cspell/dict-software-terms@^3.3.18": - version "3.3.18" - resolved "https://registry.yarnpkg.com/@cspell/dict-software-terms/-/dict-software-terms-3.3.18.tgz#f25863c316eea195d74b170d41711e2c7402e9ca" - integrity sha512-LJZGGMGqS8KzgXJrSMs3T+6GoqHG9z8Bc+rqLzLzbtoR3FbsMasE9U8oP2PmS3q7jJLFjQkzmg508DrcuZuo2g== - -"@cspell/dict-sql@^2.1.3": - version "2.1.3" - resolved "https://registry.yarnpkg.com/@cspell/dict-sql/-/dict-sql-2.1.3.tgz#8d9666a82e35b310d0be4064032c0d891fbd2702" - integrity sha512-SEyTNKJrjqD6PAzZ9WpdSu6P7wgdNtGV2RV8Kpuw1x6bV+YsSptuClYG+JSdRExBTE6LwIe1bTklejUp3ZP8TQ== - -"@cspell/dict-svelte@^1.0.2": - version "1.0.2" - resolved "https://registry.yarnpkg.com/@cspell/dict-svelte/-/dict-svelte-1.0.2.tgz#0c866b08a7a6b33bbc1a3bdbe6a1b484ca15cdaa" - integrity sha512-rPJmnn/GsDs0btNvrRBciOhngKV98yZ9SHmg8qI6HLS8hZKvcXc0LMsf9LLuMK1TmS2+WQFAan6qeqg6bBxL2Q== - -"@cspell/dict-swift@^2.0.1": - version "2.0.1" - resolved "https://registry.yarnpkg.com/@cspell/dict-swift/-/dict-swift-2.0.1.tgz#06ec86e52e9630c441d3c19605657457e33d7bb6" - integrity sha512-gxrCMUOndOk7xZFmXNtkCEeroZRnS2VbeaIPiymGRHj5H+qfTAzAKxtv7jJbVA3YYvEzWcVE2oKDP4wcbhIERw== - -"@cspell/dict-terraform@^1.0.0": - version "1.0.0" - resolved "https://registry.yarnpkg.com/@cspell/dict-terraform/-/dict-terraform-1.0.0.tgz#c7b073bb3a03683f64cc70ccaa55ce9742c46086" - integrity sha512-Ak+vy4HP/bOgzf06BAMC30+ZvL9mzv21xLM2XtfnBLTDJGdxlk/nK0U6QT8VfFLqJ0ZZSpyOxGsUebWDCTr/zQ== - -"@cspell/dict-typescript@^3.1.2": - version "3.1.2" - resolved "https://registry.yarnpkg.com/@cspell/dict-typescript/-/dict-typescript-3.1.2.tgz#14d05f54db2984feaa24ea133b583d19c04cc104" - integrity sha512-lcNOYWjLUvDZdLa0UMNd/LwfVdxhE9rKA+agZBGjL3lTA3uNvH7IUqSJM/IXhJoBpLLMVEOk8v1N9xi+vDuCdA== - -"@cspell/dict-vue@^3.0.0": - version "3.0.0" - resolved "https://registry.yarnpkg.com/@cspell/dict-vue/-/dict-vue-3.0.0.tgz#68ccb432ad93fcb0fd665352d075ae9a64ea9250" - integrity sha512-niiEMPWPV9IeRBRzZ0TBZmNnkK3olkOPYxC1Ny2AX4TGlYRajcW0WUtoSHmvvjZNfWLSg2L6ruiBeuPSbjnG6A== - -"@cspell/dynamic-import@8.6.1": - version "8.6.1" - resolved "https://registry.yarnpkg.com/@cspell/dynamic-import/-/dynamic-import-8.6.1.tgz#bc627779db48b39feb1536741534901c57e0a277" - integrity sha512-Fjvkcb5umIAcHfw/iiciYWgO2mXVuRZzQAWPSub6UFCxxcJlRz39YPXa+3O/m3lnXCeo8ChoaEN8qnuV4ogk6g== - dependencies: - import-meta-resolve "^4.0.0" - -"@cspell/strong-weak-map@8.6.1": - version "8.6.1" - resolved "https://registry.yarnpkg.com/@cspell/strong-weak-map/-/strong-weak-map-8.6.1.tgz#33c58f0d799624981399751dfb0c67328f0efdec" - integrity sha512-X6/7cy+GGVJFXsfrZapxVKn5mtehNTr7hTlg0bVj3iFoNYEPW9zq9l6WIcI4psmaU8G4DSrNsBK7pp87W3u16A== - "@cspotcode/source-map-support@^0.8.0": version "0.8.1" resolved "https://registry.yarnpkg.com/@cspotcode/source-map-support/-/source-map-support-0.8.1.tgz#00629c35a688e05a88b1cda684fb9d5e73f000a1" @@ -1012,7 +767,7 @@ "@ethersproject/bytes" "^5.7.0" "@ethersproject/properties" "^5.7.0" -"@ethersproject/bignumber@5.5.0", "@ethersproject/bignumber@~5.5.0": +"@ethersproject/bignumber@5.5.0": version "5.5.0" resolved "https://registry.yarnpkg.com/@ethersproject/bignumber/-/bignumber-5.5.0.tgz#875b143f04a216f4f8b96245bde942d42d279527" integrity sha512-6Xytlwvy6Rn3U3gKEc1vP7nR92frHkv6wtVr95LFR3jREXiCPzdWxKQ1cx4JGQBXxcguAwjA8murlYN2TSiEbg== @@ -1669,6 +1424,18 @@ resolved "https://registry.yarnpkg.com/@iarna/toml/-/toml-2.2.5.tgz#b32366c89b43c6f8cefbdefac778b9c828e3ba8c" integrity sha512-trnsAYxU3xnS1gPHPyU961coFyLkh4gAD/0zQ5mymY4yOZ+CYvsPqUbOFSw0aDM4y0tV7tiFxL/1XfXPNC6IPg== +"@isaacs/cliui@^8.0.2": + version "8.0.2" + resolved "https://registry.yarnpkg.com/@isaacs/cliui/-/cliui-8.0.2.tgz#b37667b7bc181c168782259bab42474fbf52b550" + integrity sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA== + dependencies: + string-width "^5.1.2" + string-width-cjs "npm:string-width@^4.2.0" + strip-ansi "^7.0.1" + strip-ansi-cjs "npm:strip-ansi@^6.0.1" + wrap-ansi "^8.1.0" + wrap-ansi-cjs "npm:wrap-ansi@^7.0.0" + "@istanbuljs/load-nyc-config@^1.0.0": version "1.1.0" resolved "https://registry.yarnpkg.com/@istanbuljs/load-nyc-config/-/load-nyc-config-1.1.0.tgz#fd3db1d59ecf7cf121e80650bb86712f9b55eced" @@ -1936,6 +1703,21 @@ chalk "4.1.2" ts-morph "^19.0.0" +"@matterlabs/hardhat-zksync-deploy@^1.3.0": + version "1.3.0" + resolved "https://registry.yarnpkg.com/@matterlabs/hardhat-zksync-deploy/-/hardhat-zksync-deploy-1.3.0.tgz#5c2b723318ddf6c4d3929ec225401864ff54557a" + integrity sha512-4UHOgOwIBC4JA3W8DE9GHqbAuBhCPAjtM+Oew1aiYYGkIsPUAMYsH35+4I2FzJsYyE6mD6ATmoS/HfZweQHTlQ== + dependencies: + "@matterlabs/hardhat-zksync-solc" "^1.0.4" + chai "^4.3.6" + chalk "4.1.2" + fs-extra "^11.2.0" + glob "^10.3.10" + lodash "^4.17.21" + sinon "^17.0.1" + sinon-chai "^3.7.0" + ts-morph "^21.0.1" + "@matterlabs/hardhat-zksync-node@^0.0.1-beta.7": version "0.0.1" resolved "https://registry.yarnpkg.com/@matterlabs/hardhat-zksync-node/-/hardhat-zksync-node-0.0.1.tgz#d44bda3c0069b149e2a67c9697eb81166b169ea6" @@ -1978,7 +1760,7 @@ chalk "4.1.2" dockerode "^3.3.4" -"@matterlabs/hardhat-zksync-solc@^1.0.5", "@matterlabs/hardhat-zksync-solc@^1.1.4": +"@matterlabs/hardhat-zksync-solc@^1.0.4", "@matterlabs/hardhat-zksync-solc@^1.0.5", "@matterlabs/hardhat-zksync-solc@^1.1.4": version "1.1.4" resolved "https://registry.yarnpkg.com/@matterlabs/hardhat-zksync-solc/-/hardhat-zksync-solc-1.1.4.tgz#04a2fad6fb6b6944c64ad969080ee65b9af3f617" integrity sha512-4/usbogh9neewR2/v8Dn2OzqVblZMUuT/iH2MyPZgPRZYQlL4SlZtMvokU9UQjZT6iSoaKCbbdWESHDHSzfUjA== @@ -1995,6 +1777,23 @@ sinon-chai "^3.7.0" undici "^5.14.0" +"@matterlabs/hardhat-zksync-solc@^1.2.0": + version "1.2.0" + resolved "https://registry.yarnpkg.com/@matterlabs/hardhat-zksync-solc/-/hardhat-zksync-solc-1.2.0.tgz#c1ccd1eca0381840196f220b339da08320ad9583" + integrity sha512-zM3LY6jeCVfFe2MZfiK/6k8GUcxk9BcCBiNs1Ywh4PZ4OaabYOP3HuFFmVo89BFisIRROnQ+IyT9fayKKVbFCg== + dependencies: + "@nomiclabs/hardhat-docker" "^2.0.2" + chai "^4.3.4" + chalk "^4.1.2" + debug "^4.3.5" + dockerode "^4.0.2" + fs-extra "^11.2.0" + proper-lockfile "^4.1.2" + semver "^7.6.2" + sinon "^18.0.0" + sinon-chai "^3.7.0" + undici "^6.18.2" + "@matterlabs/hardhat-zksync-verify@^0.2.0": version "0.2.2" resolved "https://registry.yarnpkg.com/@matterlabs/hardhat-zksync-verify/-/hardhat-zksync-verify-0.2.2.tgz#daa34bc4404096ed0f44461ee366c1cb0e5a4f2f" @@ -2006,7 +1805,26 @@ chalk "4.1.2" dockerode "^3.3.4" -"@matterlabs/hardhat-zksync-vyper@^1.0.0": +"@matterlabs/hardhat-zksync-verify@^1.4.3": + version "1.5.0" + resolved "https://registry.yarnpkg.com/@matterlabs/hardhat-zksync-verify/-/hardhat-zksync-verify-1.5.0.tgz#a04ef9aed6fee1c4571aa7f9ba15ea452d49dd1c" + integrity sha512-dHprx+QNfGgoWYpSMHinXIjGyrC31TgiYlYxfnKmRLzfG4/ge3uirS0N2BDmo2Cl+S0SqzMrc4BJoDfnkA6tKw== + dependencies: + "@ethersproject/abi" "^5.7.0" + "@ethersproject/address" "5.7.0" + "@matterlabs/hardhat-zksync-solc" "^1.2.0" + "@nomicfoundation/hardhat-verify" "^2.0.8" + axios "^1.7.2" + cbor "^9.0.2" + chai "^4.3.4" + chalk "^4.1.2" + debug "^4.3.5" + hardhat "^2.22.5" + semver "^7.6.2" + sinon "^18.0.0" + sinon-chai "^3.7.0" + +"@matterlabs/hardhat-zksync-vyper@^1.0.8": version "1.0.8" resolved "https://registry.yarnpkg.com/@matterlabs/hardhat-zksync-vyper/-/hardhat-zksync-vyper-1.0.8.tgz#d5bd496715a1e322b0bf3926b4146b4e18ab64ff" integrity sha512-XR7rbfDuBG5/LZWYfhQTP9gD+U24hSJHDuZ9U55wgIfiQTOxPoztFwEbQNiC39vjT5MjP/Nv8/IDrlEBkaVCgw== @@ -2037,6 +1855,13 @@ tweetnacl "^1.0.3" tweetnacl-util "^0.15.1" +"@noble/curves@1.2.0": + version "1.2.0" + resolved "https://registry.yarnpkg.com/@noble/curves/-/curves-1.2.0.tgz#92d7e12e4e49b23105a2555c6984d41733d65c35" + integrity sha512-oYclrNgRaM9SsBUBVbb8M6DTV7ZHRTKugureoYEncY5c65HOmRzvSiTE3y5CYaPYJA/GVkrhXEoF0M3Ya9PMnw== + dependencies: + "@noble/hashes" "1.3.2" + "@noble/curves@1.3.0", "@noble/curves@~1.3.0": version "1.3.0" resolved "https://registry.yarnpkg.com/@noble/curves/-/curves-1.3.0.tgz#01be46da4fd195822dab821e72f71bf4aeec635e" @@ -2049,6 +1874,11 @@ resolved "https://registry.yarnpkg.com/@noble/hashes/-/hashes-1.2.0.tgz#a3150eeb09cc7ab207ebf6d7b9ad311a9bdbed12" integrity sha512-FZfhjEDbT5GRswV3C6uvLPHMiVD6lQBmpoX5+eSiPaMTXte/IKqI5dykDxzZB/WBeK/CDuQRBWarPdi3FNY2zQ== +"@noble/hashes@1.3.2": + version "1.3.2" + resolved "https://registry.yarnpkg.com/@noble/hashes/-/hashes-1.3.2.tgz#6f26dbc8fbc7205873ce3cee2f690eba0d421b39" + integrity sha512-MVC8EAQp7MvEcm30KWENFjgR+Mkmf+D189XJTkFIlwohU5hcBbn1ZkKq7KVTi2Hme3PMGF390DaL52beVrIihQ== + "@noble/hashes@1.3.3", "@noble/hashes@~1.3.2": version "1.3.3" resolved "https://registry.yarnpkg.com/@noble/hashes/-/hashes-1.3.3.tgz#39908da56a4adc270147bb07968bf3b16cfe1699" @@ -2090,31 +1920,61 @@ resolved "https://registry.yarnpkg.com/@nomicfoundation/edr-darwin-arm64/-/edr-darwin-arm64-0.3.4.tgz#e5aac2b7726f44cffe120bdd7e25e1f120471591" integrity sha512-tjavrUFLWnkn0PI+jk0D83hP2jjbmeXT1QLd5NtIleyGrJ00ZWVl+sfuA2Lle3kzfOceoI2VTR0n1pZB4KJGbQ== +"@nomicfoundation/edr-darwin-arm64@0.4.0": + version "0.4.0" + resolved "https://registry.yarnpkg.com/@nomicfoundation/edr-darwin-arm64/-/edr-darwin-arm64-0.4.0.tgz#bbb43f0e01f40839b0bd38c2c443cb6910ae955f" + integrity sha512-7+rraFk9tCqvfemv9Ita5vTlSBAeO/S5aDKOgGRgYt0JEKZlrX161nDW6UfzMPxWl9GOLEDUzCEaYuNmXseUlg== + "@nomicfoundation/edr-darwin-x64@0.3.4": version "0.3.4" resolved "https://registry.yarnpkg.com/@nomicfoundation/edr-darwin-x64/-/edr-darwin-x64-0.3.4.tgz#cbcc0a2dcda0a7c0a900a74efc6918cff134dc23" integrity sha512-dXO0vlIoBosp8gf5/ah3dESMymjwit0Daef1E4Ew3gZ8q3LAdku0RC+YEQJi9f0I3QNfdgIrBTzibRZUoP+kVA== +"@nomicfoundation/edr-darwin-x64@0.4.0": + version "0.4.0" + resolved "https://registry.yarnpkg.com/@nomicfoundation/edr-darwin-x64/-/edr-darwin-x64-0.4.0.tgz#b1ffcd9142418fd8498de34a7336b3f977907c86" + integrity sha512-+Hrc0mP9L6vhICJSfyGo/2taOToy1AIzVZawO3lU8Lf7oDQXfhQ4UkZnkWAs9SVu1eUwHUGGGE0qB8644piYgg== + "@nomicfoundation/edr-linux-arm64-gnu@0.3.4": version "0.3.4" resolved "https://registry.yarnpkg.com/@nomicfoundation/edr-linux-arm64-gnu/-/edr-linux-arm64-gnu-0.3.4.tgz#12073f97d310176bb24ad7d48c25128ea8eff093" integrity sha512-dv38qmFUaqkkeeA9S0JjerqruytTfHav7gbPLpZUAEXPlJGo49R0+HQxd45I0msbm6NAXbkmKEchTLApp1ohaA== +"@nomicfoundation/edr-linux-arm64-gnu@0.4.0": + version "0.4.0" + resolved "https://registry.yarnpkg.com/@nomicfoundation/edr-linux-arm64-gnu/-/edr-linux-arm64-gnu-0.4.0.tgz#8173d16d4f6f2b3e82ba7096d2a1ea3619d8bfa7" + integrity sha512-4HUDMchNClQrVRfVTqBeSX92hM/3khCgpZkXP52qrnJPqgbdCxosOehlQYZ65wu0b/kaaZSyvACgvCLSQ5oSzQ== + "@nomicfoundation/edr-linux-arm64-musl@0.3.4": version "0.3.4" resolved "https://registry.yarnpkg.com/@nomicfoundation/edr-linux-arm64-musl/-/edr-linux-arm64-musl-0.3.4.tgz#c9bc685d4d14bf21d9c3e326edd44e009e24492d" integrity sha512-CfEsb6gdCMVIlRSpWYTxoongEKHB60V6alE/y8mkfjIo7tA95wyiuvCtyo3fpiia3wQV7XoMYgIJHObHiKLKtA== +"@nomicfoundation/edr-linux-arm64-musl@0.4.0": + version "0.4.0" + resolved "https://registry.yarnpkg.com/@nomicfoundation/edr-linux-arm64-musl/-/edr-linux-arm64-musl-0.4.0.tgz#b1ce293a7c3e0d9f70391e1aef1a82b83b997567" + integrity sha512-D4J935ZRL8xfnP3zIFlCI9jXInJ0loDUkCTLeCEbOf2uuDumWDghKNQlF1itUS+EHaR1pFVBbuwqq8hVK0dASg== + "@nomicfoundation/edr-linux-x64-gnu@0.3.4": version "0.3.4" resolved "https://registry.yarnpkg.com/@nomicfoundation/edr-linux-x64-gnu/-/edr-linux-x64-gnu-0.3.4.tgz#37486cbe317b8caf7961e500fc0150c45c895a56" integrity sha512-V0CpJA2lYWulgTR+zP11ftBAEwkpMAAki/AuMu3vd7HoPfjwIDzWDQR5KFU17qFmqAVz0ICRxsxDlvvBZ/PUxA== +"@nomicfoundation/edr-linux-x64-gnu@0.4.0": + version "0.4.0" + resolved "https://registry.yarnpkg.com/@nomicfoundation/edr-linux-x64-gnu/-/edr-linux-x64-gnu-0.4.0.tgz#4c12c4e4bfd3d837f5663ad7cbf7cb6d5634ef83" + integrity sha512-6x7HPy+uN5Cb9N77e2XMmT6+QSJ+7mRbHnhkGJ8jm4cZvWuj2Io7npOaeHQ3YHK+TiQpTnlbkjoOIpEwpY3XZA== + "@nomicfoundation/edr-linux-x64-musl@0.3.4": version "0.3.4" resolved "https://registry.yarnpkg.com/@nomicfoundation/edr-linux-x64-musl/-/edr-linux-x64-musl-0.3.4.tgz#399278807100a1833f6c8a39c17d5beaaf7a9223" integrity sha512-0sgTrwZajarukerU/QSb+oRdlQLnJdd7of8OlXq2wtpeTNTqemgCOwY2l2qImbWboMpVrYgcmGbINXNVPCmuJw== +"@nomicfoundation/edr-linux-x64-musl@0.4.0": + version "0.4.0" + resolved "https://registry.yarnpkg.com/@nomicfoundation/edr-linux-x64-musl/-/edr-linux-x64-musl-0.4.0.tgz#8842004aa1a47c504f10863687da28b65dca7baa" + integrity sha512-3HFIJSXgyubOiaN4MWGXx2xhTnhwlJk0PiSYNf9+L/fjBtcRkb2nM910ZJHTvqCb6OT98cUnaKuAYdXIW2amgw== + "@nomicfoundation/edr-win32-arm64-msvc@0.3.4": version "0.3.4" resolved "https://registry.yarnpkg.com/@nomicfoundation/edr-win32-arm64-msvc/-/edr-win32-arm64-msvc-0.3.4.tgz#879028e2708538fd54efc349c1a4de107a15abb4" @@ -2130,6 +1990,11 @@ resolved "https://registry.yarnpkg.com/@nomicfoundation/edr-win32-x64-msvc/-/edr-win32-x64-msvc-0.3.4.tgz#abfc447eb6bd1a9be868bec5c9d14546398ab609" integrity sha512-fResvsL/fSucep1K5W6iOs8lqqKKovHLsAmigMzAYVovqkyZKgCGVS/D8IVxA0nxuGCOlNxFnVmwWtph3pbKWA== +"@nomicfoundation/edr-win32-x64-msvc@0.4.0": + version "0.4.0" + resolved "https://registry.yarnpkg.com/@nomicfoundation/edr-win32-x64-msvc/-/edr-win32-x64-msvc-0.4.0.tgz#29d8bbb2edf9912a95f5453855cf17cdcb269957" + integrity sha512-CP4GsllEfXEz+lidcGYxKe5rDJ60TM5/blB5z/04ELVvw6/CK9eLcYeku7HV0jvV7VE6dADYKSdQyUkvd0El+A== + "@nomicfoundation/edr@^0.3.1": version "0.3.4" resolved "https://registry.yarnpkg.com/@nomicfoundation/edr/-/edr-0.3.4.tgz#e8eaf41963460139c47b0785f1a6a2a1c1b24ae0" @@ -2145,6 +2010,19 @@ "@nomicfoundation/edr-win32-ia32-msvc" "0.3.4" "@nomicfoundation/edr-win32-x64-msvc" "0.3.4" +"@nomicfoundation/edr@^0.4.0": + version "0.4.0" + resolved "https://registry.yarnpkg.com/@nomicfoundation/edr/-/edr-0.4.0.tgz#4895ecb6ef321136db837458949c37cce4a29459" + integrity sha512-T96DMSogO8TCdbKKctvxfsDljbhFOUKWc9fHJhSeUh71EEho2qR4951LKQF7t7UWEzguVYh/idQr5L/E3QeaMw== + dependencies: + "@nomicfoundation/edr-darwin-arm64" "0.4.0" + "@nomicfoundation/edr-darwin-x64" "0.4.0" + "@nomicfoundation/edr-linux-arm64-gnu" "0.4.0" + "@nomicfoundation/edr-linux-arm64-musl" "0.4.0" + "@nomicfoundation/edr-linux-x64-gnu" "0.4.0" + "@nomicfoundation/edr-linux-x64-musl" "0.4.0" + "@nomicfoundation/edr-win32-x64-msvc" "0.4.0" + "@nomicfoundation/ethereumjs-common@4.0.4": version "4.0.4" resolved "https://registry.yarnpkg.com/@nomicfoundation/ethereumjs-common/-/ethereumjs-common-4.0.4.tgz#9901f513af2d4802da87c66d6f255b510bef5acb" @@ -2209,6 +2087,21 @@ table "^6.8.0" undici "^5.14.0" +"@nomicfoundation/hardhat-verify@^2.0.8": + version "2.0.8" + resolved "https://registry.yarnpkg.com/@nomicfoundation/hardhat-verify/-/hardhat-verify-2.0.8.tgz#6a77dc03de990a1a3aa8e6dc073c393263dbf258" + integrity sha512-x/OYya7A2Kcz+3W/J78dyDHxr0ezU23DKTrRKfy5wDPCnePqnr79vm8EXqX3gYps6IjPBYyGPZ9K6E5BnrWx5Q== + dependencies: + "@ethersproject/abi" "^5.1.2" + "@ethersproject/address" "^5.0.2" + cbor "^8.1.0" + chalk "^2.4.2" + debug "^4.1.1" + lodash.clonedeep "^4.5.0" + semver "^6.3.0" + table "^6.8.0" + undici "^5.14.0" + "@nomicfoundation/solidity-analyzer-darwin-arm64@0.1.1": version "0.1.1" resolved "https://registry.yarnpkg.com/@nomicfoundation/solidity-analyzer-darwin-arm64/-/solidity-analyzer-darwin-arm64-0.1.1.tgz#4c858096b1c17fe58a474fe81b46815f93645c15" @@ -2275,7 +2168,7 @@ "@nomicfoundation/solidity-analyzer-win32-ia32-msvc" "0.1.1" "@nomicfoundation/solidity-analyzer-win32-x64-msvc" "0.1.1" -"@nomiclabs/hardhat-docker@^2.0.0": +"@nomiclabs/hardhat-docker@^2.0.0", "@nomiclabs/hardhat-docker@^2.0.2": version "2.0.2" resolved "https://registry.yarnpkg.com/@nomiclabs/hardhat-docker/-/hardhat-docker-2.0.2.tgz#ae964be17951275a55859ff7358e9e7c77448846" integrity sha512-XgGEpRT3wlA1VslyB57zyAHV+oll8KnV1TjwnxxC1tpAL04/lbdwpdO5KxInVN8irMSepqFpsiSkqlcnvbE7Ng== @@ -2305,10 +2198,10 @@ table "^6.8.0" undici "^5.14.0" -"@nomiclabs/hardhat-vyper@^3.0.5": - version "3.0.5" - resolved "https://registry.yarnpkg.com/@nomiclabs/hardhat-vyper/-/hardhat-vyper-3.0.5.tgz#44594b8a27e9c627534013fdebe6a485275f846e" - integrity sha512-i/Q771sr4vnSTaNTMGe3kX4Nl2on7hiXHHcz1MrW0+MKAJfi3A4sEloXX3aim6TylCPFq0M1/esDX+Y0WPmfbQ== +"@nomiclabs/hardhat-vyper@^3.0.6": + version "3.0.6" + resolved "https://registry.yarnpkg.com/@nomiclabs/hardhat-vyper/-/hardhat-vyper-3.0.6.tgz#ffad8028e4e002a92029cc4ba5c098b796ad74fb" + integrity sha512-htemsSSF8JYIemL/HI7fTPZfby0uo+5Ue4K9sG42jMdK+wT4wiOxnO4ZFGQAEJTICiDtu2MCfMq0qmCCmrT7ww== dependencies: debug "^4.1.1" fs-extra "^7.0.1" @@ -2336,6 +2229,11 @@ resolved "https://registry.yarnpkg.com/@openzeppelin/contracts/-/contracts-4.9.6.tgz#2a880a24eb19b4f8b25adc2a5095f2aa27f39677" integrity sha512-xSmezSupL+y9VkHZJGDoCBpmnB2ogM13ccaYDWqJTfS3dbuHkgjuwDFUmaFauBCboQMGB/S5UqUl2y54X99BmA== +"@pkgjs/parseargs@^0.11.0": + version "0.11.0" + resolved "https://registry.yarnpkg.com/@pkgjs/parseargs/-/parseargs-0.11.0.tgz#a77ea742fab25775145434eb1d2328cf5013ac33" + integrity sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg== + "@pkgr/core@^0.1.0": version "0.1.1" resolved "https://registry.yarnpkg.com/@pkgr/core/-/core-0.1.1.tgz#1ec17e2edbec25c8306d424ecfbf13c7de1aaa31" @@ -2550,7 +2448,7 @@ dependencies: type-detect "4.0.8" -"@sinonjs/commons@^3.0.0": +"@sinonjs/commons@^3.0.0", "@sinonjs/commons@^3.0.1": version "3.0.1" resolved "https://registry.yarnpkg.com/@sinonjs/commons/-/commons-3.0.1.tgz#1029357e44ca901a615585f6d27738dbc89084cd" integrity sha512-K3mCHKQ9sVh8o1C9cxkwxaOmXoAMlDxC1mYyHrjqOWEcBjYr76t96zL2zlj5dUGZ3HSw240X1qgH3Mjf1yJWpQ== @@ -2633,6 +2531,16 @@ mkdirp "^2.1.6" path-browserify "^1.0.1" +"@ts-morph/common@~0.22.0": + version "0.22.0" + resolved "https://registry.yarnpkg.com/@ts-morph/common/-/common-0.22.0.tgz#8951d451622a26472fbc3a227d6c3a90e687a683" + integrity sha512-HqNBuV/oIlMKdkLshXd1zKBqNQCsuPEsgQOkfFQ/eUKjRlwndXW1AjN9LVkBEIukm00gGXSRmfkl0Wv5VXLnlw== + dependencies: + fast-glob "^3.3.2" + minimatch "^9.0.3" + mkdirp "^3.0.1" + path-browserify "^1.0.1" + "@tsconfig/node10@^1.0.7": version "1.0.11" resolved "https://registry.yarnpkg.com/@tsconfig/node10/-/node10-1.0.11.tgz#6ee46400685f130e278128c7b38b7e031ff5b2f2" @@ -2888,6 +2796,11 @@ resolved "https://registry.yarnpkg.com/@types/node/-/node-11.11.6.tgz#df929d1bb2eee5afdda598a41930fe50b43eaa6a" integrity sha512-Exw4yUWMBXM3X+8oqzJNRqZSwUAaS4+7NdvHqQuFi/d+synz++xmX3QIf+BFqneW8N31R8Ky+sikfZUXq07ggQ== +"@types/node@18.15.13": + version "18.15.13" + resolved "https://registry.yarnpkg.com/@types/node/-/node-18.15.13.tgz#f64277c341150c979e42b00e4ac289290c9df469" + integrity sha512-N+0kuo9KgrUQ1Sn/ifDXsvg0TTleP7rIy4zOBGECxAljqvqfqpTfzx0Q1NUedOixRMBfe2Whhb056a42cWs26Q== + "@types/node@^10.0.3": version "10.17.60" resolved "https://registry.yarnpkg.com/@types/node/-/node-10.17.60.tgz#35f3d6213daed95da7f0f73e75bcc6980e90597b" @@ -3206,6 +3119,11 @@ aes-js@3.0.0: resolved "https://registry.yarnpkg.com/aes-js/-/aes-js-3.0.0.tgz#e21df10ad6c2053295bcbb8dab40b09dbea87e4d" integrity sha512-H7wUZRn8WpTq9jocdxQ2c8x2sKo9ZVmzfRE13GiNJXfp7NcKYEdvl3vspKjXox6RIG2VtaRe4JFvxG4rqp2Zuw== +aes-js@4.0.0-beta.5: + version "4.0.0-beta.5" + resolved "https://registry.yarnpkg.com/aes-js/-/aes-js-4.0.0-beta.5.tgz#8d2452c52adedebc3a3e28465d858c11ca315873" + integrity sha512-G965FqalsNyrPqgEGON7nIx1e/OVENSgiEIzyC63haUMuvNnwIgIjMs52hlTCKhkBny7A2ORNlfY9Zu+jmGk1Q== + agent-base@6: version "6.0.2" resolved "https://registry.yarnpkg.com/agent-base/-/agent-base-6.0.2.tgz#49fff58577cfee3f37176feab4c22e00f86d7f77" @@ -3314,6 +3232,11 @@ ansi-styles@^5.0.0: resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-5.2.0.tgz#07449690ad45777d1924ac2abb2fc8895dba836b" integrity sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA== +ansi-styles@^6.1.0: + version "6.2.1" + resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-6.2.1.tgz#0e62320cf99c21afff3b3012192546aacbfb05c5" + integrity sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug== + antlr4@^4.11.0: version "4.13.1" resolved "https://registry.yarnpkg.com/antlr4/-/antlr4-4.13.1.tgz#1e0a1830a08faeb86217cb2e6c34716004e4253d" @@ -3393,11 +3316,6 @@ array-includes@^3.1.7: get-intrinsic "^1.2.4" is-string "^1.0.7" -array-timsort@^1.0.3: - version "1.0.3" - resolved "https://registry.yarnpkg.com/array-timsort/-/array-timsort-1.0.3.tgz#3c9e4199e54fb2b9c3fe5976396a21614ef0d926" - integrity sha512-/+3GRL7dDAGEfM6TseQk/U+mi18TU2Ms9I3UlLdUMhz2hbvGNTKdj9xniwXfUqgYhHxRx0+8UnKkvlNwVU+cWQ== - array-union@^2.1.0: version "2.1.0" resolved "https://registry.yarnpkg.com/array-union/-/array-union-2.1.0.tgz#b798420adbeb1de828d84acd8a2e23d3efe85e8d" @@ -3543,6 +3461,15 @@ axios@^1.4.0, axios@^1.5.1: form-data "^4.0.0" proxy-from-env "^1.1.0" +axios@^1.7.2: + version "1.7.2" + resolved "https://registry.yarnpkg.com/axios/-/axios-1.7.2.tgz#b625db8a7051fbea61c35a3cbb3a1daa7b9c7621" + integrity sha512-2A8QhOMrbomlDuiLeK9XibIBzuHeRcqqNOHp0Cyp5EoJ1IFDh+XZH3A6BkXtv0K4gFGCI0Y4BM7B1wOEi0Rmgw== + dependencies: + follow-redirects "^1.15.6" + form-data "^4.0.0" + proxy-from-env "^1.1.0" + babel-eslint@^10.1.0: version "10.1.0" resolved "https://registry.yarnpkg.com/babel-eslint/-/babel-eslint-10.1.0.tgz#6968e568a910b78fb3779cdd8b6ac2f479943232" @@ -3896,7 +3823,7 @@ call-bind@^1.0.2, call-bind@^1.0.5, call-bind@^1.0.6, call-bind@^1.0.7: get-intrinsic "^1.2.4" set-function-length "^1.2.1" -callsites@^3.0.0, callsites@^3.1.0: +callsites@^3.0.0: version "3.1.0" resolved "https://registry.yarnpkg.com/callsites/-/callsites-3.1.0.tgz#b3630abd8943432f54b3f0519238e33cd7df2f73" integrity sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ== @@ -3933,6 +3860,13 @@ cbor@^8.1.0: dependencies: nofilter "^3.1.0" +cbor@^9.0.2: + version "9.0.2" + resolved "https://registry.yarnpkg.com/cbor/-/cbor-9.0.2.tgz#536b4f2d544411e70ec2b19a2453f10f83cd9fdb" + integrity sha512-JPypkxsB10s9QOWwa6zwPzqE1Md3vqpPc+cai4sAecuCsRyAtAl/pMyhPlMbT/xtPnm2dznJZYRLui57qiRhaQ== + dependencies: + nofilter "^3.1.0" + chai-as-promised@^7.1.1: version "7.1.1" resolved "https://registry.yarnpkg.com/chai-as-promised/-/chai-as-promised-7.1.1.tgz#08645d825deb8696ee61725dbf590c012eb00ca0" @@ -3953,13 +3887,6 @@ chai@^4.3.10, chai@^4.3.4, chai@^4.3.6: pathval "^1.1.1" type-detect "^4.0.8" -chalk-template@^1.1.0: - version "1.1.0" - resolved "https://registry.yarnpkg.com/chalk-template/-/chalk-template-1.1.0.tgz#ffc55db6dd745e9394b85327c8ac8466edb7a7b1" - integrity sha512-T2VJbcDuZQ0Tb2EWwSotMPJjgpy1/tGee1BTpUNsGZ/qgNjV2t7Mvu+d4600U564nbLesN1x2dPL+xii174Ekg== - dependencies: - chalk "^5.2.0" - chalk@4.1.2, chalk@^4.0.0, chalk@^4.1.0, chalk@^4.1.2: version "4.1.2" resolved "https://registry.yarnpkg.com/chalk/-/chalk-4.1.2.tgz#aac4e2b7734a740867aeb16bf02aad556a1e7a01" @@ -3977,11 +3904,6 @@ chalk@^2.4.1, chalk@^2.4.2: escape-string-regexp "^1.0.5" supports-color "^5.3.0" -chalk@^5.2.0, chalk@^5.3.0: - version "5.3.0" - resolved "https://registry.yarnpkg.com/chalk/-/chalk-5.3.0.tgz#67c20a7ebef70e7f3970a01f90fa210cb6860385" - integrity sha512-dLitG79d+GV1Nb/VYcCDFivJeK1hiukt9QjRNVOsUtTy1rR1YJsmpGGTZ3qJos+uw7WmWF4wUwBd9jxjocFC2w== - char-regex@^1.0.2: version "1.0.2" resolved "https://registry.yarnpkg.com/char-regex/-/char-regex-1.0.2.tgz#d744358226217f981ed58f479b1d6bcc29545dcf" @@ -4067,14 +3989,6 @@ clean-stack@^2.0.0: resolved "https://registry.yarnpkg.com/clean-stack/-/clean-stack-2.2.0.tgz#ee8472dbb129e727b31e8a10a427dee9dfe4008b" integrity sha512-4diC9HaTE+KRAMWhDhrGOECgWZxoevMc5TlkObMqNSsVU62PYzXZ/SMTjzyGAFF1YusgxGcSWTEXBhp0CPwQ1A== -clear-module@^4.1.2: - version "4.1.2" - resolved "https://registry.yarnpkg.com/clear-module/-/clear-module-4.1.2.tgz#5a58a5c9f8dccf363545ad7284cad3c887352a80" - integrity sha512-LWAxzHqdHsAZlPlEyJ2Poz6AIs384mPeqLVCru2p0BrP9G/kVGuhNyZYClLO6cXlnuJjzC8xtsJIuMjKqLXoAw== - dependencies: - parent-module "^2.0.0" - resolve-from "^5.0.0" - cli-boxes@^2.2.1: version "2.2.1" resolved "https://registry.yarnpkg.com/cli-boxes/-/cli-boxes-2.2.1.tgz#ddd5035d25094fce220e9cab40a45840a440318f" @@ -4231,11 +4145,6 @@ commander@^10.0.0: resolved "https://registry.yarnpkg.com/commander/-/commander-10.0.1.tgz#881ee46b4f77d1c1dccc5823433aa39b022cbe06" integrity sha512-y4Mg2tXshplEbSGzx7amzPwKKOCGuoSRP/CjEdwwk0FOGlUbq6lKuoyDZTNZkmxHdJtp54hdfY/JUrdL7Xfdug== -commander@^12.0.0: - version "12.0.0" - resolved "https://registry.yarnpkg.com/commander/-/commander-12.0.0.tgz#b929db6df8546080adfd004ab215ed48cf6f2592" - integrity sha512-MwVNWlYjDTtOjX5PiD7o5pK0UrFU/OYgcJfjjK4RaHZETNtjJqrZa9Y9ds88+A+f+d5lv+561eZ+yCKoS3gbAA== - commander@^2.19.0: version "2.20.3" resolved "https://registry.yarnpkg.com/commander/-/commander-2.20.3.tgz#fd485e84c03eb4881c20722ba48035e8531aeb33" @@ -4268,17 +4177,6 @@ commander@~9.4.1: resolved "https://registry.yarnpkg.com/commander/-/commander-9.4.1.tgz#d1dd8f2ce6faf93147295c0df13c7c21141cfbdd" integrity sha512-5EEkTNyHNGFPD2H+c/dXXfQZYa/scCKasxWcXJaWnNJ99pnQN9Vnmqow+p+PlFPE63Q6mThaZws1T+HxfpgtPw== -comment-json@^4.2.3: - version "4.2.3" - resolved "https://registry.yarnpkg.com/comment-json/-/comment-json-4.2.3.tgz#50b487ebbf43abe44431f575ebda07d30d015365" - integrity sha512-SsxdiOf064DWoZLH799Ata6u7iV658A11PlWtZATDlXPpKGJnbJZ5Z24ybixAi+LUUqJ/GKowAejtC5GFUG7Tw== - dependencies: - array-timsort "^1.0.3" - core-util-is "^1.0.3" - esprima "^4.0.1" - has-own-prop "^2.0.0" - repeat-string "^1.6.1" - concat-map@0.0.1: version "0.0.1" resolved "https://registry.yarnpkg.com/concat-map/-/concat-map-0.0.1.tgz#d8a96bd77fd68df7793a73036a3ba0d5405d477b" @@ -4294,17 +4192,6 @@ concat-stream@^1.6.0, concat-stream@^1.6.2, concat-stream@~1.6.2: readable-stream "^2.2.2" typedarray "^0.0.6" -configstore@^6.0.0: - version "6.0.0" - resolved "https://registry.yarnpkg.com/configstore/-/configstore-6.0.0.tgz#49eca2ebc80983f77e09394a1a56e0aca8235566" - integrity sha512-cD31W1v3GqUlQvbBCGcXmd2Nj9SvLDOP1oQ0YFuLETufzSPaKp11rYBsSOm7rCsW3OnIRAFM3OxRhceaXNYHkA== - dependencies: - dot-prop "^6.0.1" - graceful-fs "^4.2.6" - unique-string "^3.0.0" - write-file-atomic "^3.0.3" - xdg-basedir "^5.0.1" - convert-source-map@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/convert-source-map/-/convert-source-map-2.0.0.tgz#4b560f649fc4e918dd0ab75cf4961e8bc882d82a" @@ -4330,7 +4217,7 @@ core-util-is@1.0.2: resolved "https://registry.yarnpkg.com/core-util-is/-/core-util-is-1.0.2.tgz#b5fd54220aa2bc5ab57aab7140c940754503c1a7" integrity sha512-3lqz5YjWTYnW6dlDa5TLaTCcShfar1e40rmcJVwCBJC6mWlFuj0eCHIElmG1g5kyuJ/GD+8Wn4FFCcz4gJPfaQ== -core-util-is@^1.0.3, core-util-is@~1.0.0: +core-util-is@~1.0.0: version "1.0.3" resolved "https://registry.yarnpkg.com/core-util-is/-/core-util-is-1.0.3.tgz#a6042d3634c2b27e9328f837b965fac83808db85" integrity sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ== @@ -4410,7 +4297,7 @@ cross-spawn@^6.0.5: shebang-command "^1.2.0" which "^1.2.9" -cross-spawn@^7.0.2, cross-spawn@^7.0.3: +cross-spawn@^7.0.0, cross-spawn@^7.0.2, cross-spawn@^7.0.3: version "7.0.3" resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-7.0.3.tgz#f73a85b9d5d41d045551c177e2882d4ac85728a6" integrity sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w== @@ -4429,123 +4316,6 @@ crypto-js@^4.2.0: resolved "https://registry.yarnpkg.com/crypto-js/-/crypto-js-4.2.0.tgz#4d931639ecdfd12ff80e8186dba6af2c2e856631" integrity sha512-KALDyEYgpY+Rlob/iriUtjV6d5Eq+Y191A5g4UqLAi8CyGP9N1+FdVbkc1SxKc2r4YAYqG8JzO2KGL+AizD70Q== -crypto-random-string@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/crypto-random-string/-/crypto-random-string-4.0.0.tgz#5a3cc53d7dd86183df5da0312816ceeeb5bb1fc2" - integrity sha512-x8dy3RnvYdlUcPOjkEHqozhiwzKNSq7GcPuXFbnyMOCHxX8V3OgIg/pYuabl2sbUPfIJaeAQB7PMOK8DFIdoRA== - dependencies: - type-fest "^1.0.1" - -cspell-config-lib@8.6.1: - version "8.6.1" - resolved "https://registry.yarnpkg.com/cspell-config-lib/-/cspell-config-lib-8.6.1.tgz#951052d985756e684c540f92f8c6c4df25869519" - integrity sha512-I6LatgXJb8mxKFzIywO81TlUD/qWnUDrhB6yTUPdP90bwZcXMmGoCsZxhd2Rvl9fz5fWne0T839I1coShfm86g== - dependencies: - "@cspell/cspell-types" "8.6.1" - comment-json "^4.2.3" - yaml "^2.4.1" - -cspell-dictionary@8.6.1: - version "8.6.1" - resolved "https://registry.yarnpkg.com/cspell-dictionary/-/cspell-dictionary-8.6.1.tgz#c39a86ddd2ec5d31783414ff963db65c838177bc" - integrity sha512-0SfKPi1QoWbGpZ/rWMR7Jn0+GaQT9PAMLWjVOu66PUNUXI5f4oCTHpnZE1Xts+5VX8shZC3TAMHEgtgKuQn4RQ== - dependencies: - "@cspell/cspell-pipe" "8.6.1" - "@cspell/cspell-types" "8.6.1" - cspell-trie-lib "8.6.1" - fast-equals "^5.0.1" - gensequence "^7.0.0" - -cspell-gitignore@8.6.1: - version "8.6.1" - resolved "https://registry.yarnpkg.com/cspell-gitignore/-/cspell-gitignore-8.6.1.tgz#abb25f15ef25377cf0f071dba958635bd9ded4e8" - integrity sha512-3gtt351sSDfN826aMXTqGHVLz2lz9ZHr8uemImUc24Q+676sXkJM9lXzqP8PUqwGhLyt5qSf+9pt0ieNwQy/cA== - dependencies: - cspell-glob "8.6.1" - find-up-simple "^1.0.0" - -cspell-glob@8.6.1: - version "8.6.1" - resolved "https://registry.yarnpkg.com/cspell-glob/-/cspell-glob-8.6.1.tgz#6d80f703e9df15d0f63d3b36dcd5bc07ca908325" - integrity sha512-QjtngIR0XsUQLmHHDO86hps/JR5sRxSBwCvcsNCEmSdpdofLFc8cuxi3o33JWge7UAPBCQOLGfpA7/Wx31srmw== - dependencies: - micromatch "^4.0.5" - -cspell-grammar@8.6.1: - version "8.6.1" - resolved "https://registry.yarnpkg.com/cspell-grammar/-/cspell-grammar-8.6.1.tgz#d623475a0752b662769fc2a4de4745c25f7c0cbd" - integrity sha512-MaG0e/F0b2FnIRULCZ61JxEiJgTP/6rsbUoR5nG9X+WmJYItYmxC1F/FPPrVeTu+jJr/8O4pdnslE20pimHaCw== - dependencies: - "@cspell/cspell-pipe" "8.6.1" - "@cspell/cspell-types" "8.6.1" - -cspell-io@8.6.1: - version "8.6.1" - resolved "https://registry.yarnpkg.com/cspell-io/-/cspell-io-8.6.1.tgz#3b0fc769a609df8b027d3f189272f59ec3c0f642" - integrity sha512-ofxBB8QtUPvh/bOwKLYsqU1hwQCet8E98jkn/5f4jtG+/x5Zd80I0Ez+tlbjiBmrrQfOKh+i8ipfzHD8JtoreQ== - dependencies: - "@cspell/cspell-service-bus" "8.6.1" - -cspell-lib@8.6.1: - version "8.6.1" - resolved "https://registry.yarnpkg.com/cspell-lib/-/cspell-lib-8.6.1.tgz#691b1fc80c128eea3c4a24b59d20b1de95a912e2" - integrity sha512-kGeDUypRtThFT81IdUK7yU8eUwO5MYWj8pGQ0N8WFsqbCahJrUdcocceVSpnCX48W3CXu12DkqYG9kv5Umn7Xw== - dependencies: - "@cspell/cspell-bundled-dicts" "8.6.1" - "@cspell/cspell-pipe" "8.6.1" - "@cspell/cspell-resolver" "8.6.1" - "@cspell/cspell-types" "8.6.1" - "@cspell/dynamic-import" "8.6.1" - "@cspell/strong-weak-map" "8.6.1" - clear-module "^4.1.2" - comment-json "^4.2.3" - configstore "^6.0.0" - cspell-config-lib "8.6.1" - cspell-dictionary "8.6.1" - cspell-glob "8.6.1" - cspell-grammar "8.6.1" - cspell-io "8.6.1" - cspell-trie-lib "8.6.1" - fast-equals "^5.0.1" - gensequence "^7.0.0" - import-fresh "^3.3.0" - resolve-from "^5.0.0" - vscode-languageserver-textdocument "^1.0.11" - vscode-uri "^3.0.8" - -cspell-trie-lib@8.6.1: - version "8.6.1" - resolved "https://registry.yarnpkg.com/cspell-trie-lib/-/cspell-trie-lib-8.6.1.tgz#7ff0e5992602808aa50d292bccd2b2e9484f5c28" - integrity sha512-iuJuAyWoqTH/TpFAR/ISJGQQoW3oiw54GyvXIucPoCJt/jgQONDuzqPW+skiLvcgcTbXCN9dutZTb2gImIkmpw== - dependencies: - "@cspell/cspell-pipe" "8.6.1" - "@cspell/cspell-types" "8.6.1" - gensequence "^7.0.0" - -cspell@^8.3.2: - version "8.6.1" - resolved "https://registry.yarnpkg.com/cspell/-/cspell-8.6.1.tgz#b3fd935c2bcbec64b47377a4de5b569ab50daa47" - integrity sha512-/Qle15v4IQe7tViSWX0+RCZJ2HJ4HUCZV9Z4uOVasNUz+DWCrxysNR+pfCRYuLX/6lQdqCM9QCR9GZc7a2KIVA== - dependencies: - "@cspell/cspell-json-reporter" "8.6.1" - "@cspell/cspell-pipe" "8.6.1" - "@cspell/cspell-types" "8.6.1" - "@cspell/dynamic-import" "8.6.1" - chalk "^5.3.0" - chalk-template "^1.1.0" - commander "^12.0.0" - cspell-gitignore "8.6.1" - cspell-glob "8.6.1" - cspell-io "8.6.1" - cspell-lib "8.6.1" - fast-glob "^3.3.2" - fast-json-stable-stringify "^2.1.0" - file-entry-cache "^8.0.0" - get-stdin "^9.0.0" - semver "^7.6.0" - strip-ansi "^7.1.0" - vscode-uri "^3.0.8" - dashdash@^1.12.0: version "1.14.1" resolved "https://registry.yarnpkg.com/dashdash/-/dashdash-1.14.1.tgz#853cfa0f7cbe2fed5de20326b8dd581035f6e2f0" @@ -4613,6 +4383,13 @@ debug@^3.1.0, debug@^3.2.6, debug@^3.2.7: dependencies: ms "^2.1.1" +debug@^4.3.5: + version "4.3.5" + resolved "https://registry.yarnpkg.com/debug/-/debug-4.3.5.tgz#e83444eceb9fedd4a1da56d671ae2446a01a6e1e" + integrity sha512-pt0bNEmneDIvdL1Xsd9oDQ/wrQRkXDT4AUWlNZNPKvW5x/jyO9VFXkJUP07vQ2upmw5PlaITaPKc31jK13V+jg== + dependencies: + ms "2.1.2" + decamelize@^4.0.0: version "4.0.0" resolved "https://registry.yarnpkg.com/decamelize/-/decamelize-4.0.0.tgz#aa472d7bf660eb15f3494efd531cab7f2a709837" @@ -4706,7 +4483,7 @@ diff@^4.0.1: resolved "https://registry.yarnpkg.com/diff/-/diff-4.0.2.tgz#60f3aecb89d5fae520c11aa19efc2bb982aade7d" integrity sha512-58lmxKSA4BNyLz+HHMUzlOEpg09FV+ev6ZMe3vJihgdxzgcwZ8VoEEPmALCZG9LmqfVoNMMKpttIYTVG6uDY7A== -diff@^5.1.0: +diff@^5.1.0, diff@^5.2.0: version "5.2.0" resolved "https://registry.yarnpkg.com/diff/-/diff-5.2.0.tgz#26ded047cd1179b78b9537d5ef725503ce1ae531" integrity sha512-uIFDxqpRZGZ6ThOk84hEfqWoHx2devRFvpTZcTHur85vImfaxUbTW9Ryh4CpCuDnToOP1CEtXKIgytHBPVff5A== @@ -4801,13 +4578,6 @@ doctrine@^3.0.0: dependencies: esutils "^2.0.2" -dot-prop@^6.0.1: - version "6.0.1" - resolved "https://registry.yarnpkg.com/dot-prop/-/dot-prop-6.0.1.tgz#fc26b3cf142b9e59b74dbd39ed66ce620c681083" - integrity sha512-tE7ztYzXHIeyvc7N+hR3oi7FIbf/NIjVP9hmAt3yMXzrQ072/fpjGLx2GxNxGxUl5V73MEqYzioOMoVhGMJ5cA== - dependencies: - is-obj "^2.0.0" - dotenv@^16.0.3: version "16.4.5" resolved "https://registry.yarnpkg.com/dotenv/-/dotenv-16.4.5.tgz#cdd3b3b604cb327e286b4762e13502f717cb099f" @@ -4818,6 +4588,11 @@ dotenv@^8.2.0: resolved "https://registry.yarnpkg.com/dotenv/-/dotenv-8.6.0.tgz#061af664d19f7f4d8fc6e4ff9b584ce237adcb8b" integrity sha512-IrPdXQsk2BbzvCBGBOTmmSH5SodmqZNt4ERAZDmW4CT+tL8VtvinqywuANaFu4bOMWki16nqf0e4oC0QIaDr/g== +eastasianwidth@^0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/eastasianwidth/-/eastasianwidth-0.2.0.tgz#696ce2ec0aa0e6ea93a397ffcf24aa7840c827cb" + integrity sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA== + ecc-jsbn@~0.1.1: version "0.1.2" resolved "https://registry.yarnpkg.com/ecc-jsbn/-/ecc-jsbn-0.1.2.tgz#3a83a904e54353287874c564b7549386849a98c9" @@ -4884,6 +4659,11 @@ emoji-regex@^8.0.0: resolved "https://registry.yarnpkg.com/emoji-regex/-/emoji-regex-8.0.0.tgz#e818fd69ce5ccfcb404594f842963bf53164cc37" integrity sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A== +emoji-regex@^9.2.2: + version "9.2.2" + resolved "https://registry.yarnpkg.com/emoji-regex/-/emoji-regex-9.2.2.tgz#840c8803b0d8047f4ff0cf963176b32d4ef3ed72" + integrity sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg== + encoding-down@^6.3.0: version "6.3.0" resolved "https://registry.yarnpkg.com/encoding-down/-/encoding-down-6.3.0.tgz#b1c4eb0e1728c146ecaef8e32963c549e76d082b" @@ -5295,7 +5075,7 @@ esprima@2.7.x, esprima@^2.7.1: resolved "https://registry.yarnpkg.com/esprima/-/esprima-2.7.3.tgz#96e3b70d5779f6ad49cd032673d1c312767ba581" integrity sha512-OarPfz0lFCiW4/AV2Oy1Rp9qu0iusTKqykwTspGCZtPxmF81JR4MmIebvF1F9+UOKth2ZubLQ4XGGaU+hSn99A== -esprima@^4.0.0, esprima@^4.0.1: +esprima@^4.0.0: version "4.0.1" resolved "https://registry.yarnpkg.com/esprima/-/esprima-4.0.1.tgz#13b04cdb3e6c5d19df91ab6987a8695619b0aa71" integrity sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A== @@ -5492,6 +5272,19 @@ ethers@^5.0.2, ethers@^5.7.0, ethers@^5.7.2, ethers@~5.7.0: "@ethersproject/web" "5.7.1" "@ethersproject/wordlists" "5.7.0" +ethers@^6.7.1: + version "6.12.1" + resolved "https://registry.yarnpkg.com/ethers/-/ethers-6.12.1.tgz#517ff6d66d4fd5433e38e903051da3e57c87ff37" + integrity sha512-j6wcVoZf06nqEcBbDWkKg8Fp895SS96dSnTCjiXT+8vt2o02raTn4Lo9ERUuIVU5bAjoPYeA+7ytQFexFmLuVw== + dependencies: + "@adraffy/ens-normalize" "1.10.1" + "@noble/curves" "1.2.0" + "@noble/hashes" "1.3.2" + "@types/node" "18.15.13" + aes-js "4.0.0-beta.5" + tslib "2.4.0" + ws "8.5.0" + ethers@~5.5.0: version "5.5.4" resolved "https://registry.yarnpkg.com/ethers/-/ethers-5.5.4.tgz#e1155b73376a2f5da448e4a33351b57a885f4352" @@ -5716,11 +5509,6 @@ fast-diff@^1.1.2, fast-diff@^1.2.0: resolved "https://registry.yarnpkg.com/fast-diff/-/fast-diff-1.3.0.tgz#ece407fa550a64d638536cd727e129c61616e0f0" integrity sha512-VxPP4NqbUjj6MaAOafWeUn2cXWLcCtljklUtZf0Ind4XQ+QPtmA0b18zZy0jIQx+ExRVCR/ZQpBmik5lXshNsw== -fast-equals@^5.0.1: - version "5.0.1" - resolved "https://registry.yarnpkg.com/fast-equals/-/fast-equals-5.0.1.tgz#a4eefe3c5d1c0d021aeed0bc10ba5e0c12ee405d" - integrity sha512-WF1Wi8PwwSY7/6Kx0vKXtw8RwuSGoM1bvDaJbu7MxDlR1vovZjIAKrnzyrThgAjm6JDTu0fVgWXDlMGspodfoQ== - fast-glob@^3.0.3, fast-glob@^3.2.12, fast-glob@^3.2.9, fast-glob@^3.3.1, fast-glob@^3.3.2: version "3.3.2" resolved "https://registry.yarnpkg.com/fast-glob/-/fast-glob-3.3.2.tgz#a904501e57cfdd2ffcded45e99a54fef55e46129" @@ -5770,13 +5558,6 @@ file-entry-cache@^6.0.1: dependencies: flat-cache "^3.0.4" -file-entry-cache@^8.0.0: - version "8.0.0" - resolved "https://registry.yarnpkg.com/file-entry-cache/-/file-entry-cache-8.0.0.tgz#7787bddcf1131bffb92636c69457bbc0edd6d81f" - integrity sha512-XXTUwCvisa5oacNGRP9SfNtYBNAMi+RPwBFmblZEF7N7swHYQS6/Zfk7SRwx4D5j3CH211YNRco1DEMNVfZCnQ== - dependencies: - flat-cache "^4.0.0" - fill-range@^7.0.1: version "7.0.1" resolved "https://registry.yarnpkg.com/fill-range/-/fill-range-7.0.1.tgz#1919a6a7c75fe38b2c7c77e5198535da9acdda40" @@ -5799,11 +5580,6 @@ find-replace@^3.0.0: dependencies: array-back "^3.0.1" -find-up-simple@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/find-up-simple/-/find-up-simple-1.0.0.tgz#21d035fde9fdbd56c8f4d2f63f32fd93a1cfc368" - integrity sha512-q7Us7kcjj2VMePAa02hDAF6d+MzsdsAWEwYyOpwUtlerRBkOEPBCRZrAV4XfcSN8fHAgaD0hP7miwoay6DCprw== - find-up@5.0.0, find-up@^5.0.0: version "5.0.0" resolved "https://registry.yarnpkg.com/find-up/-/find-up-5.0.0.tgz#4c92819ecb7083561e4f4a240a86be5198f536fc" @@ -5836,14 +5612,6 @@ flat-cache@^3.0.4: keyv "^4.5.3" rimraf "^3.0.2" -flat-cache@^4.0.0: - version "4.0.1" - resolved "https://registry.yarnpkg.com/flat-cache/-/flat-cache-4.0.1.tgz#0ece39fcb14ee012f4b0410bd33dd9c1f011127c" - integrity sha512-f7ccFPK3SXFHpx15UIGyRJ/FJQctuKZ0zVuN3frBo4HnK3cay9VEW0R6yPYFHC0AgqhukPzKjq22t5DmAyqGyw== - dependencies: - flatted "^3.2.9" - keyv "^4.5.4" - flat@^5.0.2: version "5.0.2" resolved "https://registry.yarnpkg.com/flat/-/flat-5.0.2.tgz#8ca6fe332069ffa9d324c327198c598259ceb241" @@ -5866,6 +5634,14 @@ for-each@^0.3.3: dependencies: is-callable "^1.1.3" +foreground-child@^3.1.0: + version "3.1.1" + resolved "https://registry.yarnpkg.com/foreground-child/-/foreground-child-3.1.1.tgz#1d173e776d75d2772fed08efe4a0de1ea1b12d0d" + integrity sha512-TMKDUnIte6bfb5nWv7V/caI169OHgvwjb7V4WkeUvbQQdjr5rWKqHFiKWb/fcOwB+CzBT+qbWjvj+DVwRskpIg== + dependencies: + cross-spawn "^7.0.0" + signal-exit "^4.0.1" + forever-agent@~0.6.1: version "0.6.1" resolved "https://registry.yarnpkg.com/forever-agent/-/forever-agent-0.6.1.tgz#fbc71f0c41adeb37f96c577ad1ed42d8fdacca91" @@ -5924,7 +5700,7 @@ fs-extra@^0.30.0: path-is-absolute "^1.0.0" rimraf "^2.2.8" -fs-extra@^11.1.1: +fs-extra@^11.1.1, fs-extra@^11.2.0: version "11.2.0" resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-11.2.0.tgz#e70e17dfad64232287d01929399e0ea7c86b0e5b" integrity sha512-PmDi3uwK5nFuXh7XDTlVnS17xJS7vW36is2+w3xcv8SVxiB4NyATf4ctkVY5bkSjX0Y4nbvZCq1/EjtEyr9ktw== @@ -6013,11 +5789,6 @@ ganache@7.4.3: bufferutil "4.0.5" utf-8-validate "5.0.7" -gensequence@^7.0.0: - version "7.0.0" - resolved "https://registry.yarnpkg.com/gensequence/-/gensequence-7.0.0.tgz#bb6aedec8ff665e3a6c42f92823121e3a6ea7718" - integrity sha512-47Frx13aZh01afHJTB3zTtKIlFI6vWY+MYCN9Qpew6i52rfKjnhCF/l1YlC8UmEMvvntZZ6z4PiCcmyuedR2aQ== - gensync@^1.0.0-beta.2: version "1.0.0-beta.2" resolved "https://registry.yarnpkg.com/gensync/-/gensync-1.0.0-beta.2.tgz#32a6ee76c3d7f52d46b2b1ae5d93fea8580a25e0" @@ -6059,16 +5830,16 @@ get-stdin@=8.0.0: resolved "https://registry.yarnpkg.com/get-stdin/-/get-stdin-8.0.0.tgz#cbad6a73feb75f6eeb22ba9e01f89aa28aa97a53" integrity sha512-sY22aA6xchAzprjyqmSEQv4UbAAzRN0L2dQB0NlN5acTTK9Don6nhoc3eAbUnpZiCANAMfd/+40kVdKfFygohg== -get-stdin@^9.0.0, get-stdin@~9.0.0: - version "9.0.0" - resolved "https://registry.yarnpkg.com/get-stdin/-/get-stdin-9.0.0.tgz#3983ff82e03d56f1b2ea0d3e60325f39d703a575" - integrity sha512-dVKBjfWisLAicarI2Sf+JuBE/DghV4UzNAVe9yhEJuzeREd3JhOTE9cUaJTeSa77fsbQUK3pcOpJfM59+VKZaA== - get-stdin@~5.0.1: version "5.0.1" resolved "https://registry.yarnpkg.com/get-stdin/-/get-stdin-5.0.1.tgz#122e161591e21ff4c52530305693f20e6393a398" integrity sha512-jZV7n6jGE3Gt7fgSTJoz91Ak5MuTLwMwkoYdjxuJ/AmjIsE1UC03y/IWkZCQGEvVNS9qoRNwy5BCqxImv0FVeA== +get-stdin@~9.0.0: + version "9.0.0" + resolved "https://registry.yarnpkg.com/get-stdin/-/get-stdin-9.0.0.tgz#3983ff82e03d56f1b2ea0d3e60325f39d703a575" + integrity sha512-dVKBjfWisLAicarI2Sf+JuBE/DghV4UzNAVe9yhEJuzeREd3JhOTE9cUaJTeSa77fsbQUK3pcOpJfM59+VKZaA== + get-stream@^6.0.0: version "6.0.1" resolved "https://registry.yarnpkg.com/get-stream/-/get-stream-6.0.1.tgz#a262d8eef67aced57c2852ad6167526a43cbf7b7" @@ -6154,6 +5925,17 @@ glob@8.1.0, glob@^8.0.3: minimatch "^5.0.1" once "^1.3.0" +glob@^10.3.10: + version "10.3.16" + resolved "https://registry.yarnpkg.com/glob/-/glob-10.3.16.tgz#bf6679d5d51279c8cfae4febe0d051d2a4bf4c6f" + integrity sha512-JDKXl1DiuuHJ6fVS2FXjownaavciiHNUU4mOvV/B793RLh05vZL1rcPnCSaOgv1hDT6RDlY7AB7ZUvFYAtPgAw== + dependencies: + foreground-child "^3.1.0" + jackspeak "^3.1.2" + minimatch "^9.0.1" + minipass "^7.0.4" + path-scurry "^1.11.0" + glob@^5.0.15: version "5.0.15" resolved "https://registry.yarnpkg.com/glob/-/glob-5.0.15.tgz#1bc936b9e02f4a603fcc222ecf7633d30b8b93b1" @@ -6188,13 +5970,6 @@ glob@~8.0.3: minimatch "^5.0.1" once "^1.3.0" -global-directory@^4.0.1: - version "4.0.1" - resolved "https://registry.yarnpkg.com/global-directory/-/global-directory-4.0.1.tgz#4d7ac7cfd2cb73f304c53b8810891748df5e361e" - integrity sha512-wHTUcDUoZ1H5/0iVqEudYW4/kAlN5cZ3j/bXn0Dpbizl9iaUVeWSHqiOjsgk6OW2bkLclbBjzewBz6weQ1zA2Q== - dependencies: - ini "4.1.1" - global-modules@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/global-modules/-/global-modules-2.0.0.tgz#997605ad2345f27f51539bea26574421215c7780" @@ -6263,7 +6038,7 @@ gopd@^1.0.1: dependencies: get-intrinsic "^1.1.3" -graceful-fs@^4.1.2, graceful-fs@^4.1.6, graceful-fs@^4.1.9, graceful-fs@^4.2.0, graceful-fs@^4.2.4, graceful-fs@^4.2.6, graceful-fs@^4.2.9: +graceful-fs@^4.1.2, graceful-fs@^4.1.6, graceful-fs@^4.1.9, graceful-fs@^4.2.0, graceful-fs@^4.2.4, graceful-fs@^4.2.9: version "4.2.11" resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.2.11.tgz#4183e4e8bf08bb6e05bbb2f7d2e0c8f712ca40e3" integrity sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ== @@ -6331,7 +6106,7 @@ hardhat-typechain@^0.3.3: resolved "https://registry.yarnpkg.com/hardhat-typechain/-/hardhat-typechain-0.3.5.tgz#8e50616a9da348b33bd001168c8fda9c66b7b4af" integrity sha512-w9lm8sxqTJACY+V7vijiH+NkPExnmtiQEjsV9JKD1KgMdVk2q8y+RhvU/c4B7+7b1+HylRUCxpOIvFuB3rE4+w== -hardhat@=2.22.2, hardhat@^2.18.3: +hardhat@=2.22.2: version "2.22.2" resolved "https://registry.yarnpkg.com/hardhat/-/hardhat-2.22.2.tgz#0cadd7ec93bf39bab09f81603e75bc5e92acea3d" integrity sha512-0xZ7MdCZ5sJem4MrvpQWLR3R3zGDoHw5lsR+pBFimqwagimIOn3bWuZv69KA+veXClwI1s/zpqgwPwiFrd4Dxw== @@ -6380,6 +6155,55 @@ hardhat@=2.22.2, hardhat@^2.18.3: uuid "^8.3.2" ws "^7.4.6" +hardhat@^2.22.5: + version "2.22.5" + resolved "https://registry.yarnpkg.com/hardhat/-/hardhat-2.22.5.tgz#7e1a4311fa9e34a1cfe337784eae06706f6469a5" + integrity sha512-9Zq+HonbXCSy6/a13GY1cgHglQRfh4qkzmj1tpPlhxJDwNVnhxlReV6K7hCWFKlOrV13EQwsdcD0rjcaQKWRZw== + dependencies: + "@ethersproject/abi" "^5.1.2" + "@metamask/eth-sig-util" "^4.0.0" + "@nomicfoundation/edr" "^0.4.0" + "@nomicfoundation/ethereumjs-common" "4.0.4" + "@nomicfoundation/ethereumjs-tx" "5.0.4" + "@nomicfoundation/ethereumjs-util" "9.0.4" + "@nomicfoundation/solidity-analyzer" "^0.1.0" + "@sentry/node" "^5.18.1" + "@types/bn.js" "^5.1.0" + "@types/lru-cache" "^5.1.0" + adm-zip "^0.4.16" + aggregate-error "^3.0.0" + ansi-escapes "^4.3.0" + boxen "^5.1.2" + chalk "^2.4.2" + chokidar "^3.4.0" + ci-info "^2.0.0" + debug "^4.1.1" + enquirer "^2.3.0" + env-paths "^2.2.0" + ethereum-cryptography "^1.0.3" + ethereumjs-abi "^0.6.8" + find-up "^2.1.0" + fp-ts "1.19.3" + fs-extra "^7.0.1" + glob "7.2.0" + immutable "^4.0.0-rc.12" + io-ts "1.10.4" + keccak "^3.0.2" + lodash "^4.17.11" + mnemonist "^0.38.0" + mocha "^10.0.0" + p-map "^4.0.0" + raw-body "^2.4.1" + resolve "1.17.0" + semver "^6.3.0" + solc "0.7.3" + source-map-support "^0.5.13" + stacktrace-parser "^0.1.10" + tsort "0.0.1" + undici "^5.14.0" + uuid "^8.3.2" + ws "^7.4.6" + has-bigints@^1.0.1, has-bigints@^1.0.2: version "1.0.2" resolved "https://registry.yarnpkg.com/has-bigints/-/has-bigints-1.0.2.tgz#0871bd3e3d51626f6ca0966668ba35d5602d6eaa" @@ -6400,11 +6224,6 @@ has-flag@^4.0.0: resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-4.0.0.tgz#944771fd9c81c81265c4d6941860da06bb59479b" integrity sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ== -has-own-prop@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/has-own-prop/-/has-own-prop-2.0.0.tgz#f0f95d58f65804f5d218db32563bb85b8e0417af" - integrity sha512-Pq0h+hvsVm6dDEa8x82GnLSYHOzNDt7f0ddFa3FqcQlgzEiptPqL+XrOJNavjOzSYiYWIrgeVYYgGlLmnxwilQ== - has-property-descriptors@^1.0.0, has-property-descriptors@^1.0.2: version "1.0.2" resolved "https://registry.yarnpkg.com/has-property-descriptors/-/has-property-descriptors-1.0.2.tgz#963ed7d071dc7bf5f084c5bfbe0d1b6222586854" @@ -6595,11 +6414,6 @@ import-local@^3.0.2: pkg-dir "^4.2.0" resolve-cwd "^3.0.0" -import-meta-resolve@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/import-meta-resolve/-/import-meta-resolve-4.0.0.tgz#0b1195915689f60ab00f830af0f15cc841e8919e" - integrity sha512-okYUR7ZQPH+efeuMJGlq4f8ubUgO50kByRPyt/Cy1Io4PSRsPjxME+YlVaCOx+NIToW7hCsZNFJyTPFFKepRSA== - imurmurhash@^0.1.4: version "0.1.4" resolved "https://registry.yarnpkg.com/imurmurhash/-/imurmurhash-0.1.4.tgz#9218b9b2b928a238b13dc4fb6b6d576f231453ea" @@ -6628,11 +6442,6 @@ inherits@2.0.3: resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.3.tgz#633c2c83e3da42a502f52466022480f4208261de" integrity sha512-x00IRNXNy63jwGkJmzPigoySHbaqpNuzKbBOmzK+g2OdZpQ9w+sxCN+VSB3ja7IAge2OP2qpfxTjeNcyjmW1uw== -ini@4.1.1: - version "4.1.1" - resolved "https://registry.yarnpkg.com/ini/-/ini-4.1.1.tgz#d95b3d843b1e906e56d6747d5447904ff50ce7a1" - integrity sha512-QQnnxNyfvmHFIsj7gkPcYymR8Jdw/o7mp5ZFihxn6h8Ci6fh3Dx4E1gPjpQEpIuPo9XVNY/ZUwh4BPMjGyL01g== - ini@^1.3.5, ini@~1.3.0: version "1.3.8" resolved "https://registry.yarnpkg.com/ini/-/ini-1.3.8.tgz#a29da425b48806f34767a4efce397269af28432c" @@ -6803,11 +6612,6 @@ is-number@^7.0.0: resolved "https://registry.yarnpkg.com/is-number/-/is-number-7.0.0.tgz#7535345b896734d5f80c4d06c50955527a14f12b" integrity sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng== -is-obj@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/is-obj/-/is-obj-2.0.0.tgz#473fb05d973705e3fd9620545018ca8e22ef4982" - integrity sha512-drqDG3cbczxxEJRoOXcOjtdp1J/lyp1mNn0xaznRs8+muBhgQcrnbspox5X5fOw0HnMnbfDzvnEMEtqDEJEo8w== - is-path-inside@^3.0.3: version "3.0.3" resolved "https://registry.yarnpkg.com/is-path-inside/-/is-path-inside-3.0.3.tgz#d231362e53a07ff2b0e0ea7fed049161ffd16283" @@ -6859,7 +6663,7 @@ is-typed-array@^1.1.13: dependencies: which-typed-array "^1.1.14" -is-typedarray@^1.0.0, is-typedarray@~1.0.0: +is-typedarray@~1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/is-typedarray/-/is-typedarray-1.0.0.tgz#e479c80858df0c1b11ddda6940f96011fcda4a9a" integrity sha512-cyA56iCMHAh5CdzjJIa4aohJyeO1YbwLi3Jc35MmRU6poroFjIGZzUzupGiRPOjgHg9TLu43xbpwXk523fMxKA== @@ -6959,6 +6763,15 @@ istanbul-reports@^3.1.3: html-escaper "^2.0.0" istanbul-lib-report "^3.0.0" +jackspeak@^3.1.2: + version "3.1.2" + resolved "https://registry.yarnpkg.com/jackspeak/-/jackspeak-3.1.2.tgz#eada67ea949c6b71de50f1b09c92a961897b90ab" + integrity sha512-kWmLKn2tRtfYMF/BakihVVRzBKOxz4gJMiL2Rj91WnAB5TPZumSH99R/Yf1qE1u4uRimvCSJfm6hnxohXeEXjQ== + dependencies: + "@isaacs/cliui" "^8.0.2" + optionalDependencies: + "@pkgjs/parseargs" "^0.11.0" + jest-changed-files@^29.7.0: version "29.7.0" resolved "https://registry.yarnpkg.com/jest-changed-files/-/jest-changed-files-29.7.0.tgz#1c06d07e77c78e1585d020424dedc10d6e17ac3a" @@ -7541,7 +7354,7 @@ keccak@^3.0.0, keccak@^3.0.2: node-gyp-build "^4.2.0" readable-stream "^3.6.0" -keyv@^4.5.3, keyv@^4.5.4: +keyv@^4.5.3: version "4.5.4" resolved "https://registry.yarnpkg.com/keyv/-/keyv-4.5.4.tgz#a879a99e29452f942439f2a405e3af8b31d4de93" integrity sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw== @@ -7845,6 +7658,11 @@ loupe@^2.3.6: dependencies: get-func-name "^2.0.1" +lru-cache@^10.2.0: + version "10.2.2" + resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-10.2.2.tgz#48206bc114c1252940c41b25b41af5b545aca878" + integrity sha512-9hp3Vp2/hFQUiIwKo8XCeFVnrg8Pk3TYNPIR7tJADKi5YfcF7vEaK7avFHTlSy3kOKYaJQaalfEo6YuXdceBOQ== + lru-cache@^5.1.1: version "5.1.1" resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-5.1.1.tgz#1da27e6710271947695daf6848e847f01d84b920" @@ -8043,7 +7861,7 @@ micro-ftch@^0.3.1: resolved "https://registry.yarnpkg.com/micro-ftch/-/micro-ftch-0.3.1.tgz#6cb83388de4c1f279a034fb0cf96dfc050853c5f" integrity sha512-/0LLxhzP0tfiR5hcQebtudP56gUurs2CLkGarnCiB/OqEyUFQ6U3paQi/tgLv0hBJYt2rnr9MNpxz4fiiugstg== -micromatch@^4.0.4, micromatch@^4.0.5: +micromatch@^4.0.4: version "4.0.5" resolved "https://registry.yarnpkg.com/micromatch/-/micromatch-4.0.5.tgz#bc8999a7cbbf77cdc89f132f6e467051b49090c6" integrity sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA== @@ -8133,6 +7951,13 @@ minimatch@^7.4.3: dependencies: brace-expansion "^2.0.1" +minimatch@^9.0.1, minimatch@^9.0.3: + version "9.0.4" + resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-9.0.4.tgz#8e49c731d1749cbec05050ee5145147b32496a51" + integrity sha512-KqWh+VchfxcMNRAJjj2tnsSJdNbHsVgnkBhTNrW7AjVo6OvLtxw8zfT9oLw1JSohlFzJ8jCoTgaoXvJ+kHt6fw== + dependencies: + brace-expansion "^2.0.1" + minimatch@~3.0.4: version "3.0.8" resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-3.0.8.tgz#5e6a59bd11e2ab0de1cfb843eb2d82e546c321c1" @@ -8145,6 +7970,11 @@ minimist@^1.2.0, minimist@^1.2.5, minimist@^1.2.6, minimist@^1.2.8, minimist@~1. resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.8.tgz#c1a464e7693302e082a075cee0c057741ac4772c" integrity sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA== +"minipass@^5.0.0 || ^6.0.2 || ^7.0.0", minipass@^7.0.4: + version "7.1.1" + resolved "https://registry.yarnpkg.com/minipass/-/minipass-7.1.1.tgz#f7f85aff59aa22f110b20e27692465cf3bf89481" + integrity sha512-UZ7eQ+h8ywIRAW1hIEl2AqdwzJucU/Kp59+8kkZeSvafXhZjul247BvIJjEVFVeON6d7lM46XX1HXCduKAS8VA== + mkdirp-classic@^0.5.2: version "0.5.3" resolved "https://registry.yarnpkg.com/mkdirp-classic/-/mkdirp-classic-0.5.3.tgz#fa10c9115cc6d8865be221ba47ee9bed78601113" @@ -8167,6 +7997,11 @@ mkdirp@^2.1.6: resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-2.1.6.tgz#964fbcb12b2d8c5d6fbc62a963ac95a273e2cc19" integrity sha512-+hEnITedc8LAtIP9u3HJDFIdcLV2vXP33sqLLIzkv1Db1zO/1OxbvYf0Y1OC/S/Qo5dxHXepofhmxL02PsKe+A== +mkdirp@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-3.0.1.tgz#e44e4c5607fb279c168241713cc6e0fea9adcb50" + integrity sha512-+NsyUUAZDmo6YVHzL/stxSu3t9YS1iljliy3BSDrXJ/dkn1KYdmtZODGGjLcc9XLgVVpH4KshHB8XmZgMhaBXg== + mnemonist@^0.38.0: version "0.38.5" resolved "https://registry.yarnpkg.com/mnemonist/-/mnemonist-0.38.5.tgz#4adc7f4200491237fe0fa689ac0b86539685cade" @@ -8311,6 +8146,17 @@ nise@^5.1.5: just-extend "^6.2.0" path-to-regexp "^6.2.1" +nise@^6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/nise/-/nise-6.0.0.tgz#ae56fccb5d912037363c3b3f29ebbfa28bde8b48" + integrity sha512-K8ePqo9BFvN31HXwEtTNGzgrPpmvgciDsFz8aztFjt4LqKO/JeFD8tBOeuDiCMXrIl/m1YvfH8auSpxfaD09wg== + dependencies: + "@sinonjs/commons" "^3.0.0" + "@sinonjs/fake-timers" "^11.2.2" + "@sinonjs/text-encoding" "^0.7.2" + just-extend "^6.2.0" + path-to-regexp "^6.2.1" + node-addon-api@^2.0.0: version "2.0.2" resolved "https://registry.yarnpkg.com/node-addon-api/-/node-addon-api-2.0.2.tgz#432cfa82962ce494b132e9d72a15b29f71ff5d32" @@ -8608,13 +8454,6 @@ parent-module@^1.0.0: dependencies: callsites "^3.0.0" -parent-module@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/parent-module/-/parent-module-2.0.0.tgz#fa71f88ff1a50c27e15d8ff74e0e3a9523bf8708" - integrity sha512-uo0Z9JJeWzv8BG+tRcapBKNJ0dro9cLyczGzulS6EfeyAdeC9sbojtW6XwvYxJkEne9En+J2XEl4zyglVeIwFg== - dependencies: - callsites "^3.1.0" - parse-cache-control@^1.0.1: version "1.0.1" resolved "https://registry.yarnpkg.com/parse-cache-control/-/parse-cache-control-1.0.1.tgz#8eeab3e54fa56920fe16ba38f77fa21aacc2d74e" @@ -8673,6 +8512,14 @@ path-parse@^1.0.6, path-parse@^1.0.7: resolved "https://registry.yarnpkg.com/path-parse/-/path-parse-1.0.7.tgz#fbc114b60ca42b30d9daf5858e4bd68bbedb6735" integrity sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw== +path-scurry@^1.11.0: + version "1.11.1" + resolved "https://registry.yarnpkg.com/path-scurry/-/path-scurry-1.11.1.tgz#7960a668888594a0720b12a911d1a742ab9f11d2" + integrity sha512-Xa4Nw17FS9ApQFJ9umLiJS4orGjm7ZzwUrwamcGQuHSzDyth9boKDaycYdDcZDuqYATXw4HFXgaqWTctW/v1HA== + dependencies: + lru-cache "^10.2.0" + minipass "^5.0.0 || ^6.0.2 || ^7.0.0" + path-to-regexp@^6.2.1: version "6.2.2" resolved "https://registry.yarnpkg.com/path-to-regexp/-/path-to-regexp-6.2.2.tgz#324377a83e5049cbecadc5554d6a63a9a4866b36" @@ -9223,11 +9070,6 @@ regexpp@^3.1.0: resolved "https://registry.yarnpkg.com/regexpp/-/regexpp-3.2.0.tgz#0425a2768d8f23bad70ca4b90461fa2f1213e1b2" integrity sha512-pq2bWo9mVD43nbts2wGv17XLiNLya+GklZ8kaDLV2Z08gDCsGpnKn9BFMepvWuHCbyVvY7J5o5+BVvoQbmlJLg== -repeat-string@^1.6.1: - version "1.6.1" - resolved "https://registry.yarnpkg.com/repeat-string/-/repeat-string-1.6.1.tgz#8dcae470e1c88abc2d600fff4a776286da75e637" - integrity sha512-PV0dzCYDNfRi1jCDbJzpW7jNNDRuCOG/jI5ctQcGKt/clZD+YcPS3yIlWuTJMmESC8aevCFmWJy5wjAFgNqN6w== - req-cwd@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/req-cwd/-/req-cwd-2.0.0.tgz#d4082b4d44598036640fb73ddea01ed53db49ebc" @@ -9512,13 +9354,18 @@ semver@^6.3.0, semver@^6.3.1: resolved "https://registry.yarnpkg.com/semver/-/semver-6.3.1.tgz#556d2ef8689146e46dcea4bfdd095f3434dffcb4" integrity sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA== -semver@^7.2.1, semver@^7.3.4, semver@^7.3.5, semver@^7.3.7, semver@^7.5.1, semver@^7.5.2, semver@^7.5.3, semver@^7.5.4, semver@^7.6.0: +semver@^7.2.1, semver@^7.3.4, semver@^7.3.5, semver@^7.3.7, semver@^7.5.1, semver@^7.5.2, semver@^7.5.3, semver@^7.5.4: version "7.6.0" resolved "https://registry.yarnpkg.com/semver/-/semver-7.6.0.tgz#1a46a4db4bffcccd97b743b5005c8325f23d4e2d" integrity sha512-EnwXhrlwXMk9gKu5/flx5sv/an57AkRplG3hTK68W7FRDN+k+OWBj65M7719OkA82XLBxrcX0KSHj+X5COhOVg== dependencies: lru-cache "^6.0.0" +semver@^7.6.2: + version "7.6.2" + resolved "https://registry.yarnpkg.com/semver/-/semver-7.6.2.tgz#1e3b34759f896e8f14d6134732ce798aeb0c6e13" + integrity sha512-FNAIBWCx9qcRhoHcgcJ0gvU7SN1lYU2ZXuSfl04bSC5OpvDHFyJCjdNHomPXxjQlCBU67YW64PzY7/VIEH7F2w== + serialize-javascript@6.0.0: version "6.0.0" resolved "https://registry.yarnpkg.com/serialize-javascript/-/serialize-javascript-6.0.0.tgz#efae5d88f45d7924141da8b5c3a7a7e663fefeb8" @@ -9632,6 +9479,11 @@ signal-exit@^3.0.2, signal-exit@^3.0.3, signal-exit@^3.0.7: resolved "https://registry.yarnpkg.com/signal-exit/-/signal-exit-3.0.7.tgz#a9a1767f8af84155114eaabd73f99273c8f59ad9" integrity sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ== +signal-exit@^4.0.1: + version "4.1.0" + resolved "https://registry.yarnpkg.com/signal-exit/-/signal-exit-4.1.0.tgz#952188c1cbd546070e2dd20d0f41c0ae0530cb04" + integrity sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw== + sinon-chai@^3.7.0: version "3.7.0" resolved "https://registry.yarnpkg.com/sinon-chai/-/sinon-chai-3.7.0.tgz#cfb7dec1c50990ed18c153f1840721cf13139783" @@ -9649,6 +9501,18 @@ sinon@^17.0.1: nise "^5.1.5" supports-color "^7.2.0" +sinon@^18.0.0: + version "18.0.0" + resolved "https://registry.yarnpkg.com/sinon/-/sinon-18.0.0.tgz#69ca293dbc3e82590a8b0d46c97f63ebc1e5fc01" + integrity sha512-+dXDXzD1sBO6HlmZDd7mXZCR/y5ECiEiGCBSGuFD/kZ0bDTofPYc6JaeGmPSF+1j1MejGUWkORbYOLDyvqCWpA== + dependencies: + "@sinonjs/commons" "^3.0.1" + "@sinonjs/fake-timers" "^11.2.2" + "@sinonjs/samsam" "^8.0.0" + diff "^5.2.0" + nise "^6.0.0" + supports-color "^7" + sisteransi@^1.0.5: version "1.0.5" resolved "https://registry.yarnpkg.com/sisteransi/-/sisteransi-1.0.5.tgz#134d681297756437cc05ca01370d3a7a571075ed" @@ -9912,6 +9776,15 @@ string-length@^4.0.1: char-regex "^1.0.2" strip-ansi "^6.0.0" +"string-width-cjs@npm:string-width@^4.2.0", string-width@^4.0.0, string-width@^4.1.0, string-width@^4.2.0, string-width@^4.2.2, string-width@^4.2.3: + version "4.2.3" + resolved "https://registry.yarnpkg.com/string-width/-/string-width-4.2.3.tgz#269c7117d27b05ad2e536830a8ec895ef9c6d010" + integrity sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g== + dependencies: + emoji-regex "^8.0.0" + is-fullwidth-code-point "^3.0.0" + strip-ansi "^6.0.1" + string-width@^2.1.0, string-width@^2.1.1: version "2.1.1" resolved "https://registry.yarnpkg.com/string-width/-/string-width-2.1.1.tgz#ab93f27a8dc13d28cac815c462143a6d9012ae9e" @@ -9920,14 +9793,14 @@ string-width@^2.1.0, string-width@^2.1.1: is-fullwidth-code-point "^2.0.0" strip-ansi "^4.0.0" -string-width@^4.0.0, string-width@^4.1.0, string-width@^4.2.0, string-width@^4.2.2, string-width@^4.2.3: - version "4.2.3" - resolved "https://registry.yarnpkg.com/string-width/-/string-width-4.2.3.tgz#269c7117d27b05ad2e536830a8ec895ef9c6d010" - integrity sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g== +string-width@^5.0.1, string-width@^5.1.2: + version "5.1.2" + resolved "https://registry.yarnpkg.com/string-width/-/string-width-5.1.2.tgz#14f8daec6d81e7221d2a357e668cab73bdbca794" + integrity sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA== dependencies: - emoji-regex "^8.0.0" - is-fullwidth-code-point "^3.0.0" - strip-ansi "^6.0.1" + eastasianwidth "^0.2.0" + emoji-regex "^9.2.2" + strip-ansi "^7.0.1" string.prototype.padend@^3.0.0: version "3.1.6" @@ -9986,6 +9859,13 @@ string_decoder@~1.1.1: dependencies: safe-buffer "~5.1.0" +"strip-ansi-cjs@npm:strip-ansi@^6.0.1", strip-ansi@^6.0.0, strip-ansi@^6.0.1: + version "6.0.1" + resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-6.0.1.tgz#9e26c63d30f53443e9489495b2105d37b67a85d9" + integrity sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A== + dependencies: + ansi-regex "^5.0.1" + strip-ansi@^4.0.0: version "4.0.0" resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-4.0.0.tgz#a8479022eb1ac368a871389b635262c505ee368f" @@ -10000,14 +9880,7 @@ strip-ansi@^5.1.0: dependencies: ansi-regex "^4.1.0" -strip-ansi@^6.0.0, strip-ansi@^6.0.1: - version "6.0.1" - resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-6.0.1.tgz#9e26c63d30f53443e9489495b2105d37b67a85d9" - integrity sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A== - dependencies: - ansi-regex "^5.0.1" - -strip-ansi@^7.1.0: +strip-ansi@^7.0.1: version "7.1.0" resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-7.1.0.tgz#d5b6568ca689d8561370b0707685d22434faff45" integrity sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ== @@ -10067,7 +9940,7 @@ supports-color@^5.3.0: dependencies: has-flag "^3.0.0" -supports-color@^7.1.0, supports-color@^7.2.0: +supports-color@^7, supports-color@^7.1.0, supports-color@^7.2.0: version "7.2.0" resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-7.2.0.tgz#1b7dcdcb32b8138801b3e478ba6a51caa89648da" integrity sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw== @@ -10108,13 +9981,14 @@ synckit@^0.8.6: dependencies: "@matterlabs/hardhat-zksync-deploy" "^0.6.5" "@matterlabs/hardhat-zksync-solc" "^1.1.4" + "@matterlabs/hardhat-zksync-verify" "^1.4.3" commander "^9.4.1" eslint "^8.51.0" eslint-plugin-import "^2.29.0" eslint-plugin-prettier "^5.0.1" ethers "^5.7.0" fast-glob "^3.3.2" - hardhat "^2.18.3" + hardhat "=2.22.2" preprocess "^3.2.0" zksync-ethers "https://github.com/zksync-sdk/zksync-ethers#ethers-v5-feat/bridgehub" @@ -10368,6 +10242,14 @@ ts-morph@^19.0.0: "@ts-morph/common" "~0.20.0" code-block-writer "^12.0.0" +ts-morph@^21.0.1: + version "21.0.1" + resolved "https://registry.yarnpkg.com/ts-morph/-/ts-morph-21.0.1.tgz#712302a0f6e9dbf1aa8d9cf33a4386c4b18c2006" + integrity sha512-dbDtVdEAncKctzrVZ+Nr7kHpHkv+0JDJb2MjjpBaj8bFeCkePU9rHfMklmhuLFnpeq/EJZk2IhStY6NzqgjOkg== + dependencies: + "@ts-morph/common" "~0.22.0" + code-block-writer "^12.0.0" + ts-node@^10.1.0, ts-node@^10.7.0: version "10.9.2" resolved "https://registry.yarnpkg.com/ts-node/-/ts-node-10.9.2.tgz#70f021c9e185bccdca820e26dc413805c101c71f" @@ -10397,6 +10279,11 @@ tsconfig-paths@^3.15.0: minimist "^1.2.6" strip-bom "^3.0.0" +tslib@2.4.0: + version "2.4.0" + resolved "https://registry.yarnpkg.com/tslib/-/tslib-2.4.0.tgz#7cecaa7f073ce680a05847aa77be941098f36dc3" + integrity sha512-d6xOpEDfsi2CZVlPQzGeux8XMwLT9hssAsaPYExaQMuYskwb+x1x7J371tWlbBdWHroy99KnVB6qIkUbs5X3UQ== + tslib@^1.8.1, tslib@^1.9.0, tslib@^1.9.3: version "1.14.1" resolved "https://registry.yarnpkg.com/tslib/-/tslib-1.14.1.tgz#cf2d38bdc34a134bcaf1091c41f6619e2f672d00" @@ -10475,11 +10362,6 @@ type-fest@^0.7.1: resolved "https://registry.yarnpkg.com/type-fest/-/type-fest-0.7.1.tgz#8dda65feaf03ed78f0a3f9678f1869147f7c5c48" integrity sha512-Ne2YiiGN8bmrmJJEuTWTLJR32nh/JdL1+PSicowtNb0WFpn59GK8/lfD61bVtzguz7b3PBt74nxpv/Pw5po5Rg== -type-fest@^1.0.1: - version "1.4.0" - resolved "https://registry.yarnpkg.com/type-fest/-/type-fest-1.4.0.tgz#e9fb813fe3bf1744ec359d55d1affefa76f14be1" - integrity sha512-yGSza74xk0UG8k+pLh5oeoYirvIiWo5t0/o3zHHAO2tRDiZcxWP7fywNlXhqb6/r6sWvwi+RsyQMWhVLe4BVuA== - typechain@^4.0.0: version "4.0.3" resolved "https://registry.yarnpkg.com/typechain/-/typechain-4.0.3.tgz#e8fcd6c984676858c64eeeb155ea783a10b73779" @@ -10553,13 +10435,6 @@ typed-array-length@^1.0.6: is-typed-array "^1.1.13" possible-typed-array-names "^1.0.0" -typedarray-to-buffer@^3.1.5: - version "3.1.5" - resolved "https://registry.yarnpkg.com/typedarray-to-buffer/-/typedarray-to-buffer-3.1.5.tgz#a97ee7a9ff42691b9f783ff1bc5112fe3fca9080" - integrity sha512-zdu8XMNEDepKKR+XYOXAVPtWui0ly0NtohUscw+UmaHiAWT8hrV1rr//H6V+0DvJ3OQ19S979M0laLfX8rm82Q== - dependencies: - is-typedarray "^1.0.0" - typedarray@^0.0.6: version "0.0.6" resolved "https://registry.yarnpkg.com/typedarray/-/typedarray-0.0.6.tgz#867ac74e3864187b1d3d47d996a78ec5c8830777" @@ -10622,12 +10497,10 @@ undici@^5.14.0: dependencies: "@fastify/busboy" "^2.0.0" -unique-string@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/unique-string/-/unique-string-3.0.0.tgz#84a1c377aff5fd7a8bc6b55d8244b2bd90d75b9a" - integrity sha512-VGXBUVwxKMBUznyffQweQABPRRW1vHZAbadFZud4pLFAqRGvv/96vafgjWFqzourzr8YonlQiPgH0YCJfawoGQ== - dependencies: - crypto-random-string "^4.0.0" +undici@^6.18.2: + version "6.19.2" + resolved "https://registry.yarnpkg.com/undici/-/undici-6.19.2.tgz#231bc5de78d0dafb6260cf454b294576c2f3cd31" + integrity sha512-JfjKqIauur3Q6biAtHJ564e3bWa8VvT+7cSiOJHFbX4Erv6CLGDpg8z+Fmg/1OI/47RA+GI2QZaF48SSaLvyBA== universalify@^0.1.0: version "0.1.2" @@ -10742,16 +10615,6 @@ verror@1.10.0: core-util-is "1.0.2" extsprintf "^1.2.0" -vscode-languageserver-textdocument@^1.0.11: - version "1.0.11" - resolved "https://registry.yarnpkg.com/vscode-languageserver-textdocument/-/vscode-languageserver-textdocument-1.0.11.tgz#0822a000e7d4dc083312580d7575fe9e3ba2e2bf" - integrity sha512-X+8T3GoiwTVlJbicx/sIAF+yuJAqz8VvwJyoMVhwEMoEKE/fkDmrqUgDMyBECcM2A2frVZIUj5HI/ErRXCfOeA== - -vscode-uri@^3.0.8: - version "3.0.8" - resolved "https://registry.yarnpkg.com/vscode-uri/-/vscode-uri-3.0.8.tgz#1770938d3e72588659a172d0fd4642780083ff9f" - integrity sha512-AyFQ0EVmsOZOlAnxoFOGOq1SQDWAB7C6aqMGS23svWAllfOaxbuFvcT8D1i8z3Gyn8fraVeZNNmN6e9bxxXkKw== - walker@^1.0.8: version "1.0.8" resolved "https://registry.yarnpkg.com/walker/-/walker-1.0.8.tgz#bd498db477afe573dc04185f011d3ab8a8d7653f" @@ -10862,7 +10725,7 @@ workerpool@6.2.1: resolved "https://registry.yarnpkg.com/workerpool/-/workerpool-6.2.1.tgz#46fc150c17d826b86a008e5a4508656777e9c343" integrity sha512-ILEIE97kDZvF9Wb9f6h5aXK4swSlKGUcOEGiIYb2OOu/IrDU9iwj0fD//SsA6E5ibwJxpEvhullJY4Sl4GcpAw== -wrap-ansi@^7.0.0: +"wrap-ansi-cjs@npm:wrap-ansi@^7.0.0", wrap-ansi@^7.0.0: version "7.0.0" resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-7.0.0.tgz#67e145cff510a6a6984bdf1152911d69d2eb9e43" integrity sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q== @@ -10871,21 +10734,20 @@ wrap-ansi@^7.0.0: string-width "^4.1.0" strip-ansi "^6.0.0" +wrap-ansi@^8.1.0: + version "8.1.0" + resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-8.1.0.tgz#56dc22368ee570face1b49819975d9b9a5ead214" + integrity sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ== + dependencies: + ansi-styles "^6.1.0" + string-width "^5.0.1" + strip-ansi "^7.0.1" + wrappy@1: version "1.0.2" resolved "https://registry.yarnpkg.com/wrappy/-/wrappy-1.0.2.tgz#b5243d8f3ec1aa35f1364605bc0d1036e30ab69f" integrity sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ== -write-file-atomic@^3.0.3: - version "3.0.3" - resolved "https://registry.yarnpkg.com/write-file-atomic/-/write-file-atomic-3.0.3.tgz#56bd5c5a5c70481cd19c571bd39ab965a5de56e8" - integrity sha512-AvHcyZ5JnSfq3ioSyjrBkH9yW4m7Ayk8/9My/DD9onKeu/94fwrMocemO2QAJFAlnnDN+ZDS+ZjAR5ua1/PV/Q== - dependencies: - imurmurhash "^0.1.4" - is-typedarray "^1.0.0" - signal-exit "^3.0.2" - typedarray-to-buffer "^3.1.5" - write-file-atomic@^4.0.2: version "4.0.2" resolved "https://registry.yarnpkg.com/write-file-atomic/-/write-file-atomic-4.0.2.tgz#a9df01ae5b77858a027fd2e80768ee433555fcfd" @@ -10899,16 +10761,16 @@ ws@7.4.6: resolved "https://registry.yarnpkg.com/ws/-/ws-7.4.6.tgz#5654ca8ecdeee47c33a9a4bf6d28e2be2980377c" integrity sha512-YmhHDO4MzaDLB+M9ym/mDA5z0naX8j7SIlT8f8z+I0VtzsRbekxEutHSme7NPS2qE8StCYQNUnfWdXta/Yu85A== +ws@8.5.0: + version "8.5.0" + resolved "https://registry.yarnpkg.com/ws/-/ws-8.5.0.tgz#bfb4be96600757fe5382de12c670dab984a1ed4f" + integrity sha512-BWX0SWVgLPzYwF8lTzEy1egjhS4S4OEAHfsO8o65WOVsrnSRGaSiUaa9e0ggGlkMTtBlmOpEXiie9RUcBO86qg== + ws@^7.4.6: version "7.5.9" resolved "https://registry.yarnpkg.com/ws/-/ws-7.5.9.tgz#54fa7db29f4c7cec68b1ddd3a89de099942bb591" integrity sha512-F+P9Jil7UiSKSkppIiD94dN07AwvFixvLIj1Og1Rl9GGMuNipJnV9JzjD6XuqmAeiswGvUmNLjr5cFuXwNS77Q== -xdg-basedir@^5.0.1: - version "5.1.0" - resolved "https://registry.yarnpkg.com/xdg-basedir/-/xdg-basedir-5.1.0.tgz#1efba19425e73be1bc6f2a6ceb52a3d2c884c0c9" - integrity sha512-GCPAHLvrIH13+c0SuacwvRYj2SxJXQ4kaVTT5xgL3kPrz56XxkF21IGhjSE1+W0aw7gpBWRGXLCPnPby6lSpmQ== - xhr2@0.1.3: version "0.1.3" resolved "https://registry.yarnpkg.com/xhr2/-/xhr2-0.1.3.tgz#cbfc4759a69b4a888e78cf4f20b051038757bd11" @@ -10939,11 +10801,6 @@ yallist@^4.0.0: resolved "https://registry.yarnpkg.com/yallist/-/yallist-4.0.0.tgz#9bb92790d9c0effec63be73519e11a35019a3a72" integrity sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A== -yaml@^2.4.1: - version "2.4.1" - resolved "https://registry.yarnpkg.com/yaml/-/yaml-2.4.1.tgz#2e57e0b5e995292c25c75d2658f0664765210eed" - integrity sha512-pIXzoImaqmfOrL7teGUBt/T7ZDnyeGBWyXQBvOVhLkWLN37GXv8NMLK406UY6dS51JfcQHsmcW5cJ441bHg6Lg== - yaml@^2.4.2: version "2.4.2" resolved "https://registry.yarnpkg.com/yaml/-/yaml-2.4.2.tgz#7a2b30f2243a5fc299e1f14ca58d475ed4bc5362" @@ -11022,6 +10879,11 @@ zksync-ethers@5.8.0-beta.5: dependencies: ethers "~5.7.0" +zksync-ethers@^6.9.0: + version "6.9.0" + resolved "https://registry.yarnpkg.com/zksync-ethers/-/zksync-ethers-6.9.0.tgz#efaff1d59e2cff837eeda84c4ba59fdca4972a91" + integrity sha512-2CppwvLHtz689L7E9EhevbFtsqVukKC/lVicwdeUS2yqV46ET4iBR11rYdEfGW2oEo1h6yJuuwIBDFm2SybkIA== + "zksync-ethers@https://github.com/zksync-sdk/zksync-ethers#ethers-v5-feat/bridgehub": version "5.1.0" resolved "https://github.com/zksync-sdk/zksync-ethers#28ccbe7d67b170c202b17475e06a82002e6e3acc" diff --git a/zk_toolbox/Cargo.lock b/zk_toolbox/Cargo.lock index 7679313e9d68..9d738fdf7231 100644 --- a/zk_toolbox/Cargo.lock +++ b/zk_toolbox/Cargo.lock @@ -14,9 +14,9 @@ dependencies = [ [[package]] name = "addr2line" -version = "0.21.0" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb" +checksum = "6e4503c46a5c0c7844e948c9a4d6acd9f50cccb4de1c48eb9e291ea17470c678" dependencies = [ "gimli", ] @@ -53,9 +53,9 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "1.1.2" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2969dcb958b36655471fc61f7e416fa76033bdd4bfed0678d8fee1e2d07a1f0" +checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" dependencies = [ "memchr", ] @@ -66,49 +66,65 @@ version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" +[[package]] +name = "android-tzdata" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" + +[[package]] +name = "android_system_properties" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" +dependencies = [ + "libc", +] + [[package]] name = "anstream" -version = "0.6.12" +version = "0.6.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96b09b5178381e0874812a9b157f7fe84982617e48f71f4e3235482775e5b540" +checksum = "418c75fa768af9c03be99d17643f93f79bbba589895012a80e3452a19ddda15b" dependencies = [ "anstyle", "anstyle-parse", "anstyle-query", "anstyle-wincon", "colorchoice", + "is_terminal_polyfill", "utf8parse", ] [[package]] name = "anstyle" -version = "1.0.6" +version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8901269c6307e8d93993578286ac0edf7f195079ffff5ebdeea6a59ffb7e36bc" +checksum = "038dfcf04a5feb68e9c60b21c9625a54c2c0616e79b72b0fd87075a056ae1d1b" [[package]] name = "anstyle-parse" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c75ac65da39e5fe5ab759307499ddad880d724eed2f6ce5b5e8a26f4f387928c" +checksum = "c03a11a9034d92058ceb6ee011ce58af4a9bf61491aa7e1e59ecd24bd40d22d4" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" -version = "1.0.2" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e28923312444cdd728e4738b3f9c9cac739500909bb3d3c94b43551b16517648" +checksum = "ad186efb764318d35165f1758e7dcef3b10628e26d41a44bc5550652e6804391" dependencies = [ "windows-sys 0.52.0", ] [[package]] name = "anstyle-wincon" -version = "3.0.2" +version = "3.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cd54b81ec8d6180e24654d0b371ad22fc3dd083b6ff8ba325b72e00c87660a7" +checksum = "61a38449feb7068f52bb06c12759005cf459ee52bb4adc1d5a7c4322d716fb19" dependencies = [ "anstyle", "windows-sys 0.52.0", @@ -116,9 +132,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.82" +version = "1.0.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f538837af36e6f6a9be0faa67f9a314f8119e4e4b5867c6ab40ed60360142519" +checksum = "b3d1d046238990b9cf5bcde22a3fb3584ee5cf65fb2765f454ed428c7a0063da" [[package]] name = "arrayvec" @@ -137,13 +153,13 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.77" +version = "0.1.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c980ee35e870bd1a4d2c8294d4c04d0499e67bca1e4b5cefcc693c2fa00caea9" +checksum = "c6fa2087f2753a7da8cc1c0dbfcf89579dd57458e36769de5ac750b4671737ca" dependencies = [ "proc-macro2", "quote", - "syn 2.0.51", + "syn 2.0.68", ] [[package]] @@ -166,6 +182,12 @@ dependencies = [ "num-traits", ] +[[package]] +name = "atomic-waker" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" + [[package]] name = "auto_impl" version = "1.2.0" @@ -174,20 +196,65 @@ checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" dependencies = [ "proc-macro2", "quote", - "syn 2.0.51", + "syn 2.0.68", ] [[package]] name = "autocfg" -version = "1.1.0" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" + +[[package]] +name = "axum" +version = "0.6.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b829e4e32b91e643de6eafe82b1d90675f5874230191a4ffbc1b336dec4d6bf" +dependencies = [ + "async-trait", + "axum-core", + "bitflags 1.3.2", + "bytes", + "futures-util", + "http 0.2.12", + "http-body 0.4.6", + "hyper 0.14.29", + "itoa", + "matchit", + "memchr", + "mime", + "percent-encoding", + "pin-project-lite", + "rustversion", + "serde", + "sync_wrapper 0.1.2", + "tower", + "tower-layer", + "tower-service", +] + +[[package]] +name = "axum-core" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" +checksum = "759fa577a247914fd3f7f76d62972792636412fbfd634cd452f6a385a74d2d2c" +dependencies = [ + "async-trait", + "bytes", + "futures-util", + "http 0.2.12", + "http-body 0.4.6", + "mime", + "rustversion", + "tower-layer", + "tower-service", +] [[package]] name = "backtrace" -version = "0.3.69" +version = "0.3.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837" +checksum = "5cc23269a4f8976d0a4d2e7109211a419fe30e8d88d677cd60b6bc79c5732e0a" dependencies = [ "addr2line", "cc", @@ -198,6 +265,12 @@ dependencies = [ "rustc-demangle", ] +[[package]] +name = "base16ct" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "349a06037c7bf932dd7e7d1f653678b2038b9ad46a74102f1fc7bd7872678cce" + [[package]] name = "base16ct" version = "0.2.0" @@ -216,6 +289,12 @@ version = "0.21.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" +[[package]] +name = "base64" +version = "0.22.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" + [[package]] name = "base64ct" version = "1.6.0" @@ -228,6 +307,23 @@ version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d86b93f97252c47b41663388e6d155714a9d0c398b99f1005cbc5f978b29f445" +[[package]] +name = "beef" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a8241f3ebb85c056b509d4327ad0358fbbba6ffb340bf388f26350aeda225b1" + +[[package]] +name = "bigdecimal" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a6773ddc0eafc0e509fb60e48dff7f450f8e674a0686ae8605e8d9901bd5eefa" +dependencies = [ + "num-bigint", + "num-integer", + "num-traits", +] + [[package]] name = "bit-set" version = "0.5.3" @@ -251,9 +347,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.4.2" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed570934406eb16438a4e976b1b4500774099c13b8cb96eec99f620f05090ddf" +checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de" dependencies = [ "serde", ] @@ -270,6 +366,15 @@ dependencies = [ "wyz", ] +[[package]] +name = "blake2" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46502ad458c9a52b69d4d4d32775c788b7a1b85e8bc9d482d92250fc0e3f8efe" +dependencies = [ + "digest", +] + [[package]] name = "block-buffer" version = "0.10.4" @@ -281,9 +386,9 @@ dependencies = [ [[package]] name = "bs58" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5353f36341f7451062466f0b755b96ac3a9547e4d7f6b70d603fc721a7d7896" +checksum = "bf88ba1141d185c399bee5288d850d63b8369520c1eafc32a0430b5b6c287bf4" dependencies = [ "sha2", "tinyvec", @@ -291,9 +396,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.15.3" +version = "3.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ea184aa71bb362a1157c896979544cc23974e08fd265f29ea96b59f0b4a555b" +checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" [[package]] name = "byte-slice-cast" @@ -309,9 +414,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.5.0" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2bd12c1caf447e69cd4528f47f94d203fd2582878ecb9e9465484c4148a8223" +checksum = "514de17de45fdb8dc022b1a7975556c53c86f9f0aa5f534b98977b171857c2c9" dependencies = [ "serde", ] @@ -339,18 +444,18 @@ dependencies = [ [[package]] name = "camino" -version = "1.1.6" +version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c59e92b5a388f549b863a7bea62612c09f24c8393560709a54558a9abdfb3b9c" +checksum = "e0ec6b951b160caa93cc0c7b209e5a3bff7aae9062213451ac99493cd844c239" dependencies = [ "serde", ] [[package]] name = "cargo-platform" -version = "0.1.7" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "694c8807f2ae16faecc43dc17d74b3eb042482789fd0eb64b39a2e04e087053f" +checksum = "24b1f0365a6c6bb4020cd05806fd0d33c44d38046b8bd7f0e40814b9763cabfc" dependencies = [ "serde", ] @@ -371,11 +476,13 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.88" +version = "1.0.104" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02f341c093d19155a6e41631ce5971aac4e9a868262212153124c15fa22d1cdc" +checksum = "74b6a57f98764a267ff415d50a25e6e166f3831a5071af4995296ea97d210490" dependencies = [ + "jobserver", "libc", + "once_cell", ] [[package]] @@ -386,11 +493,17 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chrono" -version = "0.4.34" +version = "0.4.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bc015644b92d5890fab7489e49d21f879d5c990186827d42ec511919404f38b" +checksum = "a21f936df1771bf62b77f047b726c4625ff2e8aa607c01ec06e5a05bd8463401" dependencies = [ + "android-tzdata", + "iana-time-zone", + "js-sys", "num-traits", + "serde", + "wasm-bindgen", + "windows-targets 0.52.5", ] [[package]] @@ -405,9 +518,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.1" +version = "4.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c918d541ef2913577a0f9566e9ce27cb35b6df072075769e0b26cb5a554520da" +checksum = "84b3edb18336f4df585bc9aa31dd99c036dfa5dc5e9a2939a722a188f3a8970d" dependencies = [ "clap_builder", "clap_derive", @@ -415,34 +528,34 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.1" +version = "4.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f3e7391dad68afb0c2ede1bf619f579a3dc9c2ec67f089baa397123a2f3d1eb" +checksum = "c1c09dd5ada6c6c78075d6fd0da3f90d8080651e2d6cc8eb2f1aaa4034ced708" dependencies = [ "anstream", "anstyle", "clap_lex", - "strsim", + "strsim 0.11.1", "terminal_size", ] [[package]] name = "clap_derive" -version = "4.5.0" +version = "4.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "307bc0538d5f0f83b8248db3087aa92fe504e4691294d0c96c0eabc33f47ba47" +checksum = "2bac35c6dafb060fd4d275d9a4ffae97917c13a6327903a8be2153cd964f7085" dependencies = [ - "heck", + "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.51", + "syn 2.0.68", ] [[package]] name = "clap_lex" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98cc8fbded0c607b7ba9dd60cd98df59af97e84d24e49c8557331cfc26d301ce" +checksum = "4b82cf0babdbd58558212896d1a4272303a57bdb245c2bf1147185fb45640e70" [[package]] name = "cliclack" @@ -467,7 +580,7 @@ dependencies = [ "coins-core", "digest", "hmac", - "k256", + "k256 0.13.3", "serde", "sha2", "thiserror", @@ -511,9 +624,9 @@ dependencies = [ [[package]] name = "colorchoice" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" +checksum = "0b6a852b24ab71dffc585bcb46eaf7959d175cb865a7152e35b348d1b2960422" [[package]] name = "common" @@ -530,13 +643,20 @@ dependencies = [ "serde_json", "serde_yaml", "sqlx", - "strum_macros 0.26.2", + "strum_macros 0.26.4", + "thiserror", "tokio", "toml", "url", "xshell", ] +[[package]] +name = "compile-fmt" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bed69047ed42e52c7e38d6421eeb8ceefb4f2a2b52eed59137f7bad7908f6800" + [[package]] name = "config" version = "0.1.0" @@ -545,16 +665,17 @@ dependencies = [ "clap", "common", "ethers", - "path-absolutize", "rand", "serde", "serde_json", - "strum 0.26.2", - "strum_macros 0.26.2", + "strum 0.26.3", + "strum_macros 0.26.4", "thiserror", "types", "url", "xshell", + "zksync_config", + "zksync_protobuf_config", ] [[package]] @@ -572,9 +693,9 @@ dependencies = [ [[package]] name = "const-hex" -version = "1.11.3" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ba00838774b4ab0233e355d26710fbfc8327a05c017f6dc4873f876d1f79f78" +checksum = "94fb8a24a26d37e1ffd45343323dc9fe6654ceea44c12f2fcb3d7ac29e610bc6" dependencies = [ "cfg-if", "cpufeatures", @@ -637,13 +758,22 @@ checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5" [[package]] name = "crc32fast" -version = "1.4.0" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3855a8a784b474f333699ef2bbca9db2c4a1f6d9088a90a2d25b1eb53111eaa" +checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3" dependencies = [ "cfg-if", ] +[[package]] +name = "crossbeam-channel" +version = "0.5.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33480d6946193aa8033910124896ca395333cae7e2d1113d1fef6c3272217df2" +dependencies = [ + "crossbeam-utils", +] + [[package]] name = "crossbeam-deque" version = "0.8.5" @@ -674,9 +804,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.19" +version = "0.8.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345" +checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" [[package]] name = "crunchy" @@ -684,6 +814,18 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" +[[package]] +name = "crypto-bigint" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef2b4b23cddf68b89b8f8069890e8c270d54e2d5fe1b143820234805e4cb17ef" +dependencies = [ + "generic-array", + "rand_core", + "subtle", + "zeroize", +] + [[package]] name = "crypto-bigint" version = "0.5.5" @@ -715,17 +857,72 @@ dependencies = [ "cipher", ] +[[package]] +name = "darling" +version = "0.13.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a01d95850c592940db9b8194bc39f4bc0e89dee5c4265e4b1807c34a9aba453c" +dependencies = [ + "darling_core", + "darling_macro", +] + +[[package]] +name = "darling_core" +version = "0.13.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "859d65a907b6852c9361e3185c862aae7fafd2887876799fa55f5f99dc40d610" +dependencies = [ + "fnv", + "ident_case", + "proc-macro2", + "quote", + "strsim 0.10.0", + "syn 1.0.109", +] + +[[package]] +name = "darling_macro" +version = "0.13.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c972679f83bdf9c42bd905396b6c3588a843a17f0f16dfcfa3e2c5d57441835" +dependencies = [ + "darling_core", + "quote", + "syn 1.0.109", +] + [[package]] name = "data-encoding" -version = "2.5.0" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8566979429cf69b49a5c740c60791108e86440e8be149bbea4fe54d2c32d6e2" + +[[package]] +name = "debugid" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef552e6f588e446098f6ba40d89ac146c8c7b64aade83c051ee00bb5d2bc18d" +dependencies = [ + "serde", + "uuid 1.9.1", +] + +[[package]] +name = "der" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e962a19be5cfc3f3bf6dd8f61eb50107f356ad6270fbb3ed41476571db78be5" +checksum = "f1a467a65c5e759bce6e65eaf91cc29f466cdc57cb65777bd646872a8a1fd4de" +dependencies = [ + "const-oid", + "zeroize", +] [[package]] name = "der" -version = "0.7.8" +version = "0.7.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fffa369a668c8af7dbf8b5e56c9f744fbd399949ed171606040001947de40b1c" +checksum = "f55bf8e7b65898637379c1b74eb1551107c8294ed26d855ceb9fd1a09cfc9bc0" dependencies = [ "const-oid", "pem-rfc7468", @@ -743,20 +940,41 @@ dependencies = [ [[package]] name = "derive_more" -version = "0.99.17" +version = "0.99.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" +checksum = "5f33878137e4dafd7fa914ad4e259e18a4e8e532b9617a2d0150262bf53abfce" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.68", ] [[package]] -name = "diff" -version = "0.1.13" +name = "derive_more" +version = "1.0.0-beta.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56254986775e3233ffa9c4d7d3faaf6d36a2c09d30b20687e9f88bc8bafc16c8" +checksum = "f7abbfc297053be59290e3152f8cbcd52c8642e0728b69ee187d991d4c1af08d" +dependencies = [ + "derive_more-impl", +] + +[[package]] +name = "derive_more-impl" +version = "1.0.0-beta.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2bba3e9872d7c58ce7ef0fcf1844fcc3e23ef2a58377b50df35dd98e42a5726e" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.68", + "unicode-xid", +] + +[[package]] +name = "deunicode" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "339544cc9e2c4dc3fc7149fd630c5f22263a4fdf18a98afd0075784968b5cf00" [[package]] name = "digest" @@ -818,59 +1036,106 @@ version = "0.15.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1aaf95b3e5c8f23aa320147307562d361db0ae0d51242340f558153b4eb2439b" +[[package]] +name = "dtoa" +version = "1.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dcbb2bf8e87535c23f7a8a321e364ce21462d0ff10cb6407820e8e96dfff6653" + [[package]] name = "dunce" version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "56ce8c6da7551ec6c462cbaf3bfbc75131ebbfa1c944aeaa9dab51ca1c5f0c3b" +[[package]] +name = "ecdsa" +version = "0.14.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "413301934810f597c1d19ca71c8710e99a3f1ba28a0d2ebc01551a2daeea3c5c" +dependencies = [ + "der 0.6.1", + "elliptic-curve 0.12.3", + "rfc6979 0.3.1", + "signature 1.6.4", +] + [[package]] name = "ecdsa" version = "0.16.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ee27f32b5c5292967d2d4a9d7f1e0b0aed2c15daded5a60300e4abb9d8020bca" dependencies = [ - "der", + "der 0.7.9", "digest", - "elliptic-curve", - "rfc6979", - "signature", - "spki", + "elliptic-curve 0.13.8", + "rfc6979 0.4.0", + "signature 2.2.0", + "spki 0.7.3", ] [[package]] name = "either" -version = "1.10.0" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11157ac094ffbdde99aa67b23417ebdd801842852b500e395a45a9c0aac03e4a" +checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0" dependencies = [ "serde", ] +[[package]] +name = "elliptic-curve" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7bb888ab5300a19b8e5bceef25ac745ad065f3c9f7efc6de1b91958110891d3" +dependencies = [ + "base16ct 0.1.1", + "crypto-bigint 0.4.9", + "der 0.6.1", + "digest", + "ff 0.12.1", + "generic-array", + "group 0.12.1", + "pkcs8 0.9.0", + "rand_core", + "sec1 0.3.0", + "subtle", + "zeroize", +] + [[package]] name = "elliptic-curve" version = "0.13.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b5e6043086bf7973472e0c7dff2142ea0b680d30e18d9cc40f267efbf222bd47" dependencies = [ - "base16ct", - "crypto-bigint", + "base16ct 0.2.0", + "crypto-bigint 0.5.5", "digest", - "ff", + "ff 0.13.0", "generic-array", - "group", - "pkcs8", + "group 0.13.0", + "pkcs8 0.10.2", "rand_core", - "sec1", + "sec1 0.7.3", "subtle", "zeroize", ] +[[package]] +name = "elsa" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d98e71ae4df57d214182a2e5cb90230c0192c6ddfcaa05c36453d46a54713e10" +dependencies = [ + "stable_deref_trait", +] + [[package]] name = "ena" -version = "0.14.2" +version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c533630cf40e9caa44bd91aadc88a75d75a4c3a12b4cfde353cbed41daa1e1f1" +checksum = "3d248bdd43ce613d87415282f69b9bb99d947d290b10962dd6c56233312c2ad5" dependencies = [ "log", ] @@ -883,23 +1148,23 @@ checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f" [[package]] name = "encoding_rs" -version = "0.8.33" +version = "0.8.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7268b386296a025e474d5140678f75d6de9493ae55a5d709eeb9dd08149945e1" +checksum = "b45de904aa0b010bce2ab45264d0631681847fa7b6f2eaa7dab7619943bc4f59" dependencies = [ "cfg-if", ] [[package]] name = "enr" -version = "0.9.1" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe81b5c06ecfdbc71dd845216f225f53b62a10cb8a16c946836a3467f701d05b" +checksum = "2a3d8dc56e02f954cac8eb489772c552c473346fc34f67412bb6244fd647f7e4" dependencies = [ "base64 0.21.7", "bytes", "hex", - "k256", + "k256 0.13.3", "log", "rand", "rlp", @@ -908,6 +1173,15 @@ dependencies = [ "zeroize", ] +[[package]] +name = "envy" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f47e0157f2cb54f5ae1bd371b30a2ae4311e1c028f575cd4e81de7353215965" +dependencies = [ + "serde", +] + [[package]] name = "equivalent" version = "1.0.1" @@ -916,9 +1190,9 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "errno" -version = "0.3.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a258e46cdc063eb8519c00b9fc845fc47bcfca4130e2f08e88665ceda8474245" +checksum = "534c5cf6194dfab3db3242765c03bbe257cf92f22b38f6bc0c58d59108a820ba" dependencies = [ "libc", "windows-sys 0.52.0", @@ -1007,9 +1281,9 @@ dependencies = [ [[package]] name = "ethers" -version = "2.0.13" +version = "2.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c7cd562832e2ff584fa844cd2f6e5d4f35bbe11b28c7c9b8df957b2e1d0c701" +checksum = "816841ea989f0c69e459af1cf23a6b0033b19a55424a1ea3a30099becdb8dec0" dependencies = [ "ethers-addressbook", "ethers-contract", @@ -1023,9 +1297,9 @@ dependencies = [ [[package]] name = "ethers-addressbook" -version = "2.0.13" +version = "2.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35dc9a249c066d17e8947ff52a4116406163cf92c7f0763cb8c001760b26403f" +checksum = "5495afd16b4faa556c3bba1f21b98b4983e53c1755022377051a975c3b021759" dependencies = [ "ethers-core", "once_cell", @@ -1035,9 +1309,9 @@ dependencies = [ [[package]] name = "ethers-contract" -version = "2.0.13" +version = "2.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43304317c7f776876e47f2f637859f6d0701c1ec7930a150f169d5fbe7d76f5a" +checksum = "6fceafa3578c836eeb874af87abacfb041f92b4da0a78a5edd042564b8ecdaaa" dependencies = [ "const-hex", "ethers-contract-abigen", @@ -1054,9 +1328,9 @@ dependencies = [ [[package]] name = "ethers-contract-abigen" -version = "2.0.13" +version = "2.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9f96502317bf34f6d71a3e3d270defaa9485d754d789e15a8e04a84161c95eb" +checksum = "04ba01fbc2331a38c429eb95d4a570166781f14290ef9fdb144278a90b5a739b" dependencies = [ "Inflector", "const-hex", @@ -1068,19 +1342,19 @@ dependencies = [ "proc-macro2", "quote", "regex", - "reqwest", + "reqwest 0.11.27", "serde", "serde_json", - "syn 2.0.51", + "syn 2.0.68", "toml", "walkdir", ] [[package]] name = "ethers-contract-derive" -version = "2.0.13" +version = "2.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "452ff6b0a64507ce8d67ffd48b1da3b42f03680dcf5382244e9c93822cbbf5de" +checksum = "87689dcabc0051cde10caaade298f9e9093d65f6125c14575db3fd8c669a168f" dependencies = [ "Inflector", "const-hex", @@ -1089,33 +1363,33 @@ dependencies = [ "proc-macro2", "quote", "serde_json", - "syn 2.0.51", + "syn 2.0.68", ] [[package]] name = "ethers-core" -version = "2.0.13" +version = "2.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aab3cef6cc1c9fd7f787043c81ad3052eff2b96a3878ef1526aa446311bdbfc9" +checksum = "82d80cc6ad30b14a48ab786523af33b37f28a8623fc06afd55324816ef18fb1f" dependencies = [ "arrayvec", "bytes", "cargo_metadata", "chrono", "const-hex", - "elliptic-curve", + "elliptic-curve 0.13.8", "ethabi", "generic-array", - "k256", - "num_enum", + "k256 0.13.3", + "num_enum 0.7.2", "once_cell", "open-fastrlp", "rand", "rlp", "serde", "serde_json", - "strum 0.25.0", - "syn 2.0.51", + "strum 0.26.3", + "syn 2.0.68", "tempfile", "thiserror", "tiny-keccak", @@ -1124,13 +1398,13 @@ dependencies = [ [[package]] name = "ethers-etherscan" -version = "2.0.13" +version = "2.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16d45b981f5fa769e1d0343ebc2a44cfa88c9bc312eb681b676318b40cef6fb1" +checksum = "e79e5973c26d4baf0ce55520bd732314328cabe53193286671b47144145b9649" dependencies = [ "chrono", "ethers-core", - "reqwest", + "reqwest 0.11.27", "semver", "serde", "serde_json", @@ -1140,9 +1414,9 @@ dependencies = [ [[package]] name = "ethers-middleware" -version = "2.0.13" +version = "2.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "145211f34342487ef83a597c1e69f0d3e01512217a7c72cc8a25931854c7dca0" +checksum = "48f9fdf09aec667c099909d91908d5eaf9be1bd0e2500ba4172c1d28bfaa43de" dependencies = [ "async-trait", "auto_impl", @@ -1155,7 +1429,7 @@ dependencies = [ "futures-locks", "futures-util", "instant", - "reqwest", + "reqwest 0.11.27", "serde", "serde_json", "thiserror", @@ -1167,9 +1441,9 @@ dependencies = [ [[package]] name = "ethers-providers" -version = "2.0.13" +version = "2.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb6b15393996e3b8a78ef1332d6483c11d839042c17be58decc92fa8b1c3508a" +checksum = "6434c9a33891f1effc9c75472e12666db2fa5a0fec4b29af6221680a6fe83ab2" dependencies = [ "async-trait", "auto_impl", @@ -1182,12 +1456,12 @@ dependencies = [ "futures-timer", "futures-util", "hashers", - "http", + "http 0.2.12", "instant", "jsonwebtoken", "once_cell", "pin-project", - "reqwest", + "reqwest 0.11.27", "serde", "serde_json", "thiserror", @@ -1204,15 +1478,15 @@ dependencies = [ [[package]] name = "ethers-signers" -version = "2.0.13" +version = "2.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3b125a103b56aef008af5d5fb48191984aa326b50bfd2557d231dc499833de3" +checksum = "228875491c782ad851773b652dd8ecac62cda8571d3bc32a5853644dd26766c2" dependencies = [ "async-trait", "coins-bip32", "coins-bip39", "const-hex", - "elliptic-curve", + "elliptic-curve 0.13.8", "eth-keystore", "ethers-core", "rand", @@ -1223,9 +1497,9 @@ dependencies = [ [[package]] name = "ethers-solc" -version = "2.0.13" +version = "2.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d21df08582e0a43005018a858cc9b465c5fff9cf4056651be64f844e57d1f55f" +checksum = "66244a771d9163282646dbeffe0e6eca4dda4146b6498644e678ac6089b11edd" dependencies = [ "cfg-if", "const-hex", @@ -1271,9 +1545,19 @@ dependencies = [ [[package]] name = "fastrand" -version = "2.0.1" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fc0510504f03c51ada170672ac806f1f105a88aa97a5281117e1ddc3368e51a" + +[[package]] +name = "ff" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5" +checksum = "d013fc25338cc558c5c2cfbad646908fb23591e2404481826742b651c9af7160" +dependencies = [ + "rand_core", + "subtle", +] [[package]] name = "ff" @@ -1286,10 +1570,16 @@ dependencies = [ ] [[package]] -name = "finl_unicode" -version = "1.2.0" +name = "findshlibs" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fcfdc7a0362c9f4444381a9e697c79d435fe65b52a37466fc2c1184cee9edc6" +checksum = "40b9e59cd0f7e0806cca4be089683ecb6434e602038df21fe6bf6711b2f07f64" +dependencies = [ + "cc", + "lazy_static", + "libc", + "winapi", +] [[package]] name = "fixed-hash" @@ -1311,9 +1601,9 @@ checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" [[package]] name = "flate2" -version = "1.0.28" +version = "1.0.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46303f565772937ffe1d394a4fac6f411c6013172fadde9dcdb1e147a086940e" +checksum = "5f54427cfd1c7829e2a139fcefea601bf088ebca651d2bf53ebc600eac295dae" dependencies = [ "crc32fast", "miniz_oxide", @@ -1336,6 +1626,21 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" +[[package]] +name = "foreign-types" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" +dependencies = [ + "foreign-types-shared", +] + +[[package]] +name = "foreign-types-shared" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" + [[package]] name = "form_urlencoded" version = "1.2.1" @@ -1438,7 +1743,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.51", + "syn 2.0.68", ] [[package]] @@ -1503,9 +1808,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.12" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "190092ea657667030ac6a35e305e62fc4dd69fd98ac98631e5d3a2b1575a12b5" +checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" dependencies = [ "cfg-if", "libc", @@ -1514,9 +1819,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.28.1" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253" +checksum = "40ecd4077b5ae9fd2e9e169b102c6c330d0605168eb0e8bf79952b256dbefffd" [[package]] name = "glob" @@ -1536,13 +1841,24 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "group" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5dfbfb3a6cfbd390d5c9564ab283a0349b9b9fcd46a706c1eb10e0db70bfbac7" +dependencies = [ + "ff 0.12.1", + "rand_core", + "subtle", +] + [[package]] name = "group" version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" dependencies = [ - "ff", + "ff 0.13.0", "rand_core", "subtle", ] @@ -1558,8 +1874,8 @@ dependencies = [ "futures-core", "futures-sink", "futures-util", - "http", - "indexmap", + "http 0.2.12", + "indexmap 2.2.6", "slab", "tokio", "tokio-util", @@ -1567,12 +1883,37 @@ dependencies = [ ] [[package]] -name = "hashbrown" -version = "0.14.3" +name = "h2" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" +checksum = "fa82e28a107a8cc405f0839610bdc9b15f1e25ec7d696aa5cf173edbcb1486ab" dependencies = [ - "ahash", + "atomic-waker", + "bytes", + "fnv", + "futures-core", + "futures-sink", + "http 1.1.0", + "indexmap 2.2.6", + "slab", + "tokio", + "tokio-util", + "tracing", +] + +[[package]] +name = "hashbrown" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" + +[[package]] +name = "hashbrown" +version = "0.14.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" +dependencies = [ + "ahash", "allocator-api2", ] @@ -1591,7 +1932,7 @@ version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e8094feaf31ff591f651a2664fb9cfd92bba7a60ce3197265e9482ebe753c8f7" dependencies = [ - "hashbrown", + "hashbrown 0.14.5", ] [[package]] @@ -1603,11 +1944,17 @@ dependencies = [ "unicode-segmentation", ] +[[package]] +name = "heck" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" + [[package]] name = "hermit-abi" -version = "0.3.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "379dada1584ad501b383485dd706b8afb7a70fcbc7f4da7d780638a5a6124a60" +checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" [[package]] name = "hex" @@ -1642,11 +1989,33 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "hostname" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c731c3e10504cc8ed35cfe2f1db4c9274c3d35fa486e3b31df46f068ef3e867" +dependencies = [ + "libc", + "match_cfg", + "winapi", +] + [[package]] name = "http" -version = "0.2.11" +version = "0.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1" +dependencies = [ + "bytes", + "fnv", + "itoa", +] + +[[package]] +name = "http" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8947b1a6fad4393052c7ba1f4cd97bed3e953a95c79c92ad9b051a04611d9fbb" +checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258" dependencies = [ "bytes", "fnv", @@ -1660,15 +2029,38 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" dependencies = [ "bytes", - "http", + "http 0.2.12", + "pin-project-lite", +] + +[[package]] +name = "http-body" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1cac85db508abc24a2e48553ba12a996e87244a0395ce011e62b37158745d643" +dependencies = [ + "bytes", + "http 1.1.0", +] + +[[package]] +name = "http-body-util" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "793429d76616a256bcb62c2a2ec2bed781c8307e797e2598c50010f2bee2544f" +dependencies = [ + "bytes", + "futures-util", + "http 1.1.0", + "http-body 1.0.0", "pin-project-lite", ] [[package]] name = "httparse" -version = "1.8.0" +version = "1.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" +checksum = "0fcc0b4a115bf80b728eb8ea024ad5bd707b615bfed49e0665b6e0f86fd082d9" [[package]] name = "httpdate" @@ -1689,22 +2081,22 @@ dependencies = [ "serde", "serde_derive", "toml", - "uuid 1.8.0", + "uuid 1.9.1", ] [[package]] name = "hyper" -version = "0.14.28" +version = "0.14.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf96e135eb83a2a8ddf766e426a841d8ddd7449d5f00d34ea02b41d2f19eef80" +checksum = "f361cde2f109281a220d4307746cdfd5ee3f410da58a70377762396775634b33" dependencies = [ "bytes", "futures-channel", "futures-core", "futures-util", - "h2", - "http", - "http-body", + "h2 0.3.26", + "http 0.2.12", + "http-body 0.4.6", "httparse", "httpdate", "itoa", @@ -1716,6 +2108,26 @@ dependencies = [ "want", ] +[[package]] +name = "hyper" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4fe55fb7a772d59a5ff1dfbff4fe0258d19b89fec4b233e75d35d5d2316badc" +dependencies = [ + "bytes", + "futures-channel", + "futures-util", + "h2 0.4.5", + "http 1.1.0", + "http-body 1.0.0", + "httparse", + "itoa", + "pin-project-lite", + "smallvec", + "tokio", + "want", +] + [[package]] name = "hyper-rustls" version = "0.24.2" @@ -1723,13 +2135,120 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" dependencies = [ "futures-util", - "http", - "hyper", - "rustls", + "http 0.2.12", + "hyper 0.14.29", + "rustls 0.21.12", + "tokio", + "tokio-rustls 0.24.1", +] + +[[package]] +name = "hyper-rustls" +version = "0.27.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ee4be2c948921a1a5320b629c4193916ed787a7f7f293fd3f7f5a6c9de74155" +dependencies = [ + "futures-util", + "http 1.1.0", + "hyper 1.4.0", + "hyper-util", + "rustls 0.23.10", + "rustls-pki-types", + "tokio", + "tokio-rustls 0.26.0", + "tower-service", +] + +[[package]] +name = "hyper-timeout" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1" +dependencies = [ + "hyper 0.14.29", + "pin-project-lite", + "tokio", + "tokio-io-timeout", +] + +[[package]] +name = "hyper-tls" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" +dependencies = [ + "bytes", + "hyper 0.14.29", + "native-tls", + "tokio", + "tokio-native-tls", +] + +[[package]] +name = "hyper-tls" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" +dependencies = [ + "bytes", + "http-body-util", + "hyper 1.4.0", + "hyper-util", + "native-tls", + "tokio", + "tokio-native-tls", + "tower-service", +] + +[[package]] +name = "hyper-util" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ab92f4f49ee4fb4f997c784b7a2e0fa70050211e0b6a287f898c3c9785ca956" +dependencies = [ + "bytes", + "futures-channel", + "futures-util", + "http 1.1.0", + "http-body 1.0.0", + "hyper 1.4.0", + "pin-project-lite", + "socket2", "tokio", - "tokio-rustls", + "tower", + "tower-service", + "tracing", +] + +[[package]] +name = "iana-time-zone" +version = "0.1.60" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7ffbb5a1b541ea2561f8c41c087286cc091e21e556a4f09a8f6cbf17b69b141" +dependencies = [ + "android_system_properties", + "core-foundation-sys", + "iana-time-zone-haiku", + "js-sys", + "wasm-bindgen", + "windows-core", +] + +[[package]] +name = "iana-time-zone-haiku" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" +dependencies = [ + "cc", ] +[[package]] +name = "ident_case" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" + [[package]] name = "idna" version = "0.5.0" @@ -1786,12 +2305,22 @@ checksum = "ce23b50ad8242c51a442f3ff322d56b02f08852c77e4c0b4d3fd684abc89c683" [[package]] name = "indexmap" -version = "2.2.3" +version = "1.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" +dependencies = [ + "autocfg", + "hashbrown 0.12.3", +] + +[[package]] +name = "indexmap" +version = "2.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "233cf39063f058ea2caae4091bf4a3ef70a653afbc026f5c4a4135d114e3c177" +checksum = "168fb715dda47215e360912c096649d23d58bf392ac62f73919e831745e40f26" dependencies = [ "equivalent", - "hashbrown", + "hashbrown 0.14.5", ] [[package]] @@ -1818,9 +2347,9 @@ dependencies = [ [[package]] name = "instant" -version = "0.1.12" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" +checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222" dependencies = [ "cfg-if", ] @@ -1832,15 +2361,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3" [[package]] -name = "is-terminal" -version = "0.4.12" +name = "is_terminal_polyfill" +version = "1.70.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f23ff5ef2b80d608d61efee834934d862cd92461afc0560dedf493e4c033738b" -dependencies = [ - "hermit-abi", - "libc", - "windows-sys 0.52.0", -] +checksum = "f8478577c03552c21db0e2724ffb8986a5ce7af88107e6be5d2ee6e158c12800" [[package]] name = "itertools" @@ -1871,15 +2395,24 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.10" +version = "1.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" + +[[package]] +name = "jobserver" +version = "0.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1a46d1a171d865aa5f83f92695765caa047a9b4cbae2cbf37dbd613a793fd4c" +checksum = "d2b099aaa34a9751c5bf0878add70444e1ed2dd73f347be99003d4577277de6e" +dependencies = [ + "libc", +] [[package]] name = "js-sys" -version = "0.3.68" +version = "0.3.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "406cda4b368d531c842222cf9d2600a9a4acce8d29423695379c6868a143a9ee" +checksum = "29c15563dc2726973df627357ce0c9ddddbea194836909d655df6a75d2cf296d" dependencies = [ "wasm-bindgen", ] @@ -1900,16 +2433,28 @@ dependencies = [ [[package]] name = "k256" -version = "0.13.1" +version = "0.11.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72c1e0b51e7ec0a97369623508396067a486bd0cbed95a2659a4b863d28cfc8b" +dependencies = [ + "cfg-if", + "ecdsa 0.14.8", + "elliptic-curve 0.12.3", + "sha2", +] + +[[package]] +name = "k256" +version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cadb76004ed8e97623117f3df85b17aaa6626ab0b0831e6573f104df16cd1bcc" +checksum = "956ff9b67e26e1a6a866cb758f12c6f8746208489e3e4a4b5580802f2f0a587b" dependencies = [ "cfg-if", - "ecdsa", - "elliptic-curve", + "ecdsa 0.16.9", + "elliptic-curve 0.13.8", "once_cell", "sha2", - "signature", + "signature 2.2.0", ] [[package]] @@ -1923,46 +2468,48 @@ dependencies = [ [[package]] name = "lalrpop" -version = "0.20.0" +version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da4081d44f4611b66c6dd725e6de3169f9f63905421e8626fcb86b6a898998b8" +checksum = "55cb077ad656299f160924eb2912aa147d7339ea7d69e1b5517326fdcec3c1ca" dependencies = [ "ascii-canvas", "bit-set", - "diff", "ena", - "is-terminal", - "itertools 0.10.5", + "itertools 0.11.0", "lalrpop-util", "petgraph", "regex", - "regex-syntax 0.7.5", + "regex-syntax 0.8.4", "string_cache", "term", "tiny-keccak", "unicode-xid", + "walkdir", ] [[package]] name = "lalrpop-util" -version = "0.20.0" +version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f35c735096c0293d313e8f2a641627472b83d01b937177fe76e5e2708d31e0d" +checksum = "507460a910eb7b32ee961886ff48539633b788a36b65692b95f225b844c82553" +dependencies = [ + "regex-automata 0.4.7", +] [[package]] name = "lazy_static" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" dependencies = [ - "spin 0.5.2", + "spin 0.9.8", ] [[package]] name = "libc" -version = "0.2.153" +version = "0.2.155" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd" +checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c" [[package]] name = "libm" @@ -1972,13 +2519,12 @@ checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" [[package]] name = "libredox" -version = "0.0.1" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85c833ca1e66078851dba29046874e38f08b2c883700aa29a03ddd3b23814ee8" +checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.6.0", "libc", - "redox_syscall", ] [[package]] @@ -1992,17 +2538,37 @@ dependencies = [ "vcpkg", ] +[[package]] +name = "linkme" +version = "0.3.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccb76662d78edc9f9bf56360d6919bdacc8b7761227727e5082f128eeb90bbf5" +dependencies = [ + "linkme-impl", +] + +[[package]] +name = "linkme-impl" +version = "0.3.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8dccda732e04fa3baf2e17cf835bfe2601c7c2edafd64417c627dabae3a8cda" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.68", +] + [[package]] name = "linux-raw-sys" -version = "0.4.13" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01cda141df6706de531b6c46c3a33ecca755538219bd484262fa09410c13539c" +checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" [[package]] name = "lock_api" -version = "0.4.11" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c168f8615b12bc01f9c17e2eb0cc07dcae1940121185446edc3744920e8ef45" +checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" dependencies = [ "autocfg", "scopeguard", @@ -2010,9 +2576,62 @@ dependencies = [ [[package]] name = "log" -version = "0.4.20" +version = "0.4.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" + +[[package]] +name = "logos" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c000ca4d908ff18ac99b93a062cb8958d331c3220719c52e77cb19cc6ac5d2c1" +dependencies = [ + "logos-derive", +] + +[[package]] +name = "logos-codegen" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc487311295e0002e452025d6b580b77bb17286de87b57138f3b5db711cded68" +dependencies = [ + "beef", + "fnv", + "proc-macro2", + "quote", + "regex-syntax 0.6.29", + "syn 2.0.68", +] + +[[package]] +name = "logos-derive" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dbfc0d229f1f42d790440136d941afd806bc9e949e2bcb8faa813b0f00d1267e" +dependencies = [ + "logos-codegen", +] + +[[package]] +name = "match_cfg" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffbee8634e0d45d258acb448e7eaab3fce7a0a467395d4d9f228e3c1f01fb2e4" + +[[package]] +name = "matchers" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" +dependencies = [ + "regex-automata 0.1.10", +] + +[[package]] +name = "matchit" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" +checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" [[package]] name = "md-5" @@ -2026,9 +2645,32 @@ dependencies = [ [[package]] name = "memchr" -version = "2.7.1" +version = "2.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" + +[[package]] +name = "miette" +version = "5.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59bb584eaeeab6bd0226ccf3509a69d7936d148cf3d036ad350abe35e8c6856e" +dependencies = [ + "miette-derive", + "once_cell", + "thiserror", + "unicode-width", +] + +[[package]] +name = "miette-derive" +version = "5.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "523dc4f511e55ab87b694dc30d0f820d60906ef06413f93d4d7a1385599cc149" +checksum = "49e7bc1560b95a3c4a25d03de42fe76ca718ab92d1a22a55b9b4cf67b3ae635c" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.68", +] [[package]] name = "mime" @@ -2044,9 +2686,9 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.7.2" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d811f3e15f28568be3407c8e7fdb6514c1cda3cb30683f15b6a1a1dc4ea14a7" +checksum = "b8a240ddb74feaf34a79a7add65a741f3167852fba007066dcac1ca548d89c08" dependencies = [ "adler", ] @@ -2063,10 +2705,42 @@ dependencies = [ ] [[package]] -name = "new_debug_unreachable" -version = "1.0.4" +name = "multimap" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4a24736216ec316047a1fc4252e27dabb04218aa4a3f37c6e7ddbf1f9782b54" +checksum = "defc4c55412d89136f966bbb339008b474350e5e6e78d2714439c386b3137a03" + +[[package]] +name = "nanoid" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ffa00dec017b5b1a8b7cf5e2c008bfda1aa7e0697ac1508b491fdf2622fb4d8" +dependencies = [ + "rand", +] + +[[package]] +name = "native-tls" +version = "0.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8614eb2c83d59d1c8cc974dd3f920198647674a0a035e1af1fa58707e317466" +dependencies = [ + "libc", + "log", + "openssl", + "openssl-probe", + "openssl-sys", + "schannel", + "security-framework", + "security-framework-sys", + "tempfile", +] + +[[package]] +name = "new_debug_unreachable" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "650eef8c711430f1a879fdd01d4745a7deea475becfb90269c06775983bbf086" [[package]] name = "nom" @@ -2078,15 +2752,39 @@ dependencies = [ "minimal-lexical", ] +[[package]] +name = "nu-ansi-term" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" +dependencies = [ + "overload", + "winapi", +] + +[[package]] +name = "num" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "35bd024e8b2ff75562e5f34e7f4905839deb4b22955ef5e73d2fea1b9813cb23" +dependencies = [ + "num-bigint", + "num-complex", + "num-integer", + "num-iter", + "num-rational", + "num-traits", +] + [[package]] name = "num-bigint" -version = "0.4.4" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "608e7659b5c3d7cba262d894801b9ec9d00de989e8a82bd4bef91d08da45cdc0" +checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" dependencies = [ - "autocfg", "num-integer", "num-traits", + "serde", ] [[package]] @@ -2106,6 +2804,16 @@ dependencies = [ "zeroize", ] +[[package]] +name = "num-complex" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73f88a1307638156682bada9d7604135552957b7818057dcef22705b4d509495" +dependencies = [ + "num-traits", + "serde", +] + [[package]] name = "num-conv" version = "0.1.0" @@ -2123,20 +2831,32 @@ dependencies = [ [[package]] name = "num-iter" -version = "0.1.44" +version = "0.1.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d869c01cc0c455284163fd0092f1f93835385ccab5a98a0dcc497b2f8bf055a9" +checksum = "1429034a0490724d0075ebb2bc9e875d6503c3cf69e235a8941aa757d83ef5bf" dependencies = [ "autocfg", "num-integer", "num-traits", ] +[[package]] +name = "num-rational" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f83d14da390562dca69fc84082e73e548e1ad308d24accdedd2720017cb37824" +dependencies = [ + "num-bigint", + "num-integer", + "num-traits", + "serde", +] + [[package]] name = "num-traits" -version = "0.2.18" +version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da0df0e5185db44f69b44f26786fe401b6c293d1907744beaa7fa62b2e5a517a" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" dependencies = [ "autocfg", "libm", @@ -2152,13 +2872,34 @@ dependencies = [ "libc", ] +[[package]] +name = "num_enum" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a015b430d3c108a207fd776d2e2196aaf8b1cf8cf93253e3a097ff3085076a1" +dependencies = [ + "num_enum_derive 0.6.1", +] + [[package]] name = "num_enum" version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "02339744ee7253741199f897151b38e72257d13802d4ee837285cc2990a90845" dependencies = [ - "num_enum_derive", + "num_enum_derive 0.7.2", +] + +[[package]] +name = "num_enum_derive" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96667db765a921f7b295ffee8b60472b686a51d4f21c2ee4ffdb94c7013b65a6" +dependencies = [ + "proc-macro-crate 1.3.1", + "proc-macro2", + "quote", + "syn 2.0.68", ] [[package]] @@ -2170,7 +2911,7 @@ dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2", "quote", - "syn 2.0.51", + "syn 2.0.68", ] [[package]] @@ -2181,9 +2922,9 @@ checksum = "830b246a0e5f20af87141b25c173cd1b609bd7779a4617d6ec582abaf90870f3" [[package]] name = "object" -version = "0.32.2" +version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6a622008b6e321afc04970976f62ee297fdbaa6f95318ca343e3eebb9648441" +checksum = "081b846d1d56ddfc18fdf1a922e4f6e07a11768ea1b92dec44e42b72712ccfce" dependencies = [ "memchr", ] @@ -2219,12 +2960,178 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "openssl" +version = "0.10.64" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95a0481286a310808298130d22dd1fef0fa571e05a8f44ec801801e84b216b1f" +dependencies = [ + "bitflags 2.6.0", + "cfg-if", + "foreign-types", + "libc", + "once_cell", + "openssl-macros", + "openssl-sys", +] + +[[package]] +name = "openssl-macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.68", +] + +[[package]] +name = "openssl-probe" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" + +[[package]] +name = "openssl-sys" +version = "0.9.102" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c597637d56fbc83893a35eb0dd04b2b8e7a50c91e64e9493e398b5df4fb45fa2" +dependencies = [ + "cc", + "libc", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "opentelemetry" +version = "0.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9591d937bc0e6d2feb6f71a559540ab300ea49955229c347a517a28d27784c54" +dependencies = [ + "opentelemetry_api", + "opentelemetry_sdk", +] + +[[package]] +name = "opentelemetry-http" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7594ec0e11d8e33faf03530a4c49af7064ebba81c1480e01be67d90b356508b" +dependencies = [ + "async-trait", + "bytes", + "http 0.2.12", + "opentelemetry_api", + "reqwest 0.11.27", +] + +[[package]] +name = "opentelemetry-otlp" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e5e5a5c4135864099f3faafbe939eb4d7f9b80ebf68a8448da961b32a7c1275" +dependencies = [ + "async-trait", + "futures-core", + "http 0.2.12", + "opentelemetry-http", + "opentelemetry-proto", + "opentelemetry-semantic-conventions", + "opentelemetry_api", + "opentelemetry_sdk", + "prost 0.11.9", + "reqwest 0.11.27", + "thiserror", + "tokio", + "tonic", +] + +[[package]] +name = "opentelemetry-proto" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1e3f814aa9f8c905d0ee4bde026afd3b2577a97c10e1699912e3e44f0c4cbeb" +dependencies = [ + "opentelemetry_api", + "opentelemetry_sdk", + "prost 0.11.9", + "tonic", +] + +[[package]] +name = "opentelemetry-semantic-conventions" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73c9f9340ad135068800e7f1b24e9e09ed9e7143f5bf8518ded3d3ec69789269" +dependencies = [ + "opentelemetry", +] + +[[package]] +name = "opentelemetry_api" +version = "0.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a81f725323db1b1206ca3da8bb19874bbd3f57c3bcd59471bfb04525b265b9b" +dependencies = [ + "futures-channel", + "futures-util", + "indexmap 1.9.3", + "js-sys", + "once_cell", + "pin-project-lite", + "thiserror", + "urlencoding", +] + +[[package]] +name = "opentelemetry_sdk" +version = "0.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa8e705a0612d48139799fcbaba0d4a90f06277153e43dd2bdc16c6f0edd8026" +dependencies = [ + "async-trait", + "crossbeam-channel", + "futures-channel", + "futures-executor", + "futures-util", + "once_cell", + "opentelemetry_api", + "ordered-float 3.9.2", + "percent-encoding", + "rand", + "regex", + "serde_json", + "thiserror", + "tokio", + "tokio-stream", +] + [[package]] name = "option-ext" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" +[[package]] +name = "ordered-float" +version = "2.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68f19d67e5a2795c94e73e0bb1cc1a7edeb2e28efd39e2e1c9b7a40c1108b11c" +dependencies = [ + "num-traits", +] + +[[package]] +name = "ordered-float" +version = "3.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1e1c390732d15f1d48471625cd92d154e66db2c56645e29a9cd26f4699f72dc" +dependencies = [ + "num-traits", +] + [[package]] name = "os_info" version = "3.8.2" @@ -2236,11 +3143,17 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "overload" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" + [[package]] name = "parity-scale-codec" -version = "3.6.9" +version = "3.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "881331e34fa842a2fb61cc2db9643a8fedc615e47cfcc52597d1af0db9a7e8fe" +checksum = "306800abfa29c7f16596b5970a588435e3d5b3149683d00c12b699cc19f895ee" dependencies = [ "arrayvec", "bitvec", @@ -2252,11 +3165,11 @@ dependencies = [ [[package]] name = "parity-scale-codec-derive" -version = "3.6.9" +version = "3.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be30eaf4b0a9fba5336683b38de57bb86d179a35862ba6bfcf57625d006bde5b" +checksum = "d830939c76d294956402033aee57a6da7b438f2294eb94864c37b0569053a42c" dependencies = [ - "proc-macro-crate 2.0.0", + "proc-macro-crate 3.1.0", "proc-macro2", "quote", "syn 1.0.109", @@ -2264,9 +3177,9 @@ dependencies = [ [[package]] name = "parking_lot" -version = "0.12.1" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" +checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27" dependencies = [ "lock_api", "parking_lot_core", @@ -2274,15 +3187,15 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.9" +version = "0.9.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c42a9226546d68acdd9c0a280d17ce19bfe27a46bf68784e4066115788d008e" +checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ "cfg-if", "libc", - "redox_syscall", + "redox_syscall 0.5.2", "smallvec", - "windows-targets 0.48.5", + "windows-targets 0.52.5", ] [[package]] @@ -2298,27 +3211,9 @@ dependencies = [ [[package]] name = "paste" -version = "1.0.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c" - -[[package]] -name = "path-absolutize" -version = "3.1.1" +version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4af381fe79fa195b4909485d99f73a80792331df0625188e707854f0b3383f5" -dependencies = [ - "path-dedot", -] - -[[package]] -name = "path-dedot" -version = "3.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07ba0ad7e047712414213ff67533e6dd477af0a4e1d14fb52343e53d30ea9397" -dependencies = [ - "once_cell", -] +checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" [[package]] name = "path-slash" @@ -2374,12 +3269,12 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "petgraph" -version = "0.6.4" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1d3afd2628e69da2be385eb6f2fd57c8ac7977ceeff6dc166ff1657b0e386a9" +checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" dependencies = [ "fixedbitset", - "indexmap", + "indexmap 2.2.6", ] [[package]] @@ -2422,7 +3317,7 @@ dependencies = [ "phf_shared 0.11.2", "proc-macro2", "quote", - "syn 2.0.51", + "syn 2.0.68", ] [[package]] @@ -2445,29 +3340,29 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.1.4" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0302c4a0442c456bd56f841aee5c3bfd17967563f6fadc9ceb9f9c23cf3807e0" +checksum = "b6bf43b791c5b9e34c3d182969b4abb522f9343702850a2e57f460d00d09b4b3" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.4" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "266c042b60c9c76b8d53061e52b2e0d1116abc57cefc8c5cd671619a56ac3690" +checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 2.0.51", + "syn 2.0.68", ] [[package]] name = "pin-project-lite" -version = "0.2.13" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58" +checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02" [[package]] name = "pin-utils" @@ -2481,9 +3376,19 @@ version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c8ffb9f10fa047879315e6625af03c164b16962a5368d724ed16323b68ace47f" dependencies = [ - "der", - "pkcs8", - "spki", + "der 0.7.9", + "pkcs8 0.10.2", + "spki 0.7.3", +] + +[[package]] +name = "pkcs8" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9eca2c590a5f85da82668fa685c09ce2888b9430e83299debf1f34b65fd4a4ba" +dependencies = [ + "der 0.6.1", + "spki 0.6.0", ] [[package]] @@ -2492,8 +3397,8 @@ version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" dependencies = [ - "der", - "spki", + "der 0.7.9", + "spki 0.7.3", ] [[package]] @@ -2528,12 +3433,12 @@ checksum = "925383efa346730478fb4838dbe9137d2a47675ad789c546d150a6e1dd4ab31c" [[package]] name = "prettyplease" -version = "0.2.16" +version = "0.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a41cf62165e97c7f814d2221421dbb9afcbcdb0a88068e5ea206e19951c2cbb5" +checksum = "5f12335488a2f3b0a83b14edad48dca9879ce89b2edd10e80237e4e852dd645e" dependencies = [ "proc-macro2", - "syn 2.0.51", + "syn 2.0.68", ] [[package]] @@ -2560,15 +3465,6 @@ dependencies = [ "toml_edit 0.19.15", ] -[[package]] -name = "proc-macro-crate" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e8366a6159044a37876a2b9817124296703c586a5c92e2c53751fa06d8d43e8" -dependencies = [ - "toml_edit 0.20.7", -] - [[package]] name = "proc-macro-crate" version = "3.1.0" @@ -2580,53 +3476,204 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.78" +version = "1.0.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2422ad645d89c99f8f3e6b88a9fdeca7fabeac836b1002371c4367c8f984aae" +checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77" dependencies = [ "unicode-ident", ] +[[package]] +name = "prometheus-client" +version = "0.22.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1ca959da22a332509f2a73ae9e5f23f9dcfc31fd3a54d71f159495bd5909baa" +dependencies = [ + "dtoa", + "itoa", + "parking_lot", + "prometheus-client-derive-encode", +] + +[[package]] +name = "prometheus-client-derive-encode" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.68", +] + [[package]] name = "proptest" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31b476131c3c86cb68032fdc5cb6d5a1045e3e42d96b69fa599fd77701e1f5bf" +checksum = "b4c2511913b88df1637da85cc8d96ec8e43a3f8bb8ccb71ee1ac240d6f3df58d" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.6.0", "lazy_static", "num-traits", "rand", "rand_chacha", "rand_xorshift", - "regex-syntax 0.8.2", + "regex-syntax 0.8.4", "unarray", ] [[package]] -name = "quote" -version = "1.0.35" +name = "prost" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef" +checksum = "0b82eaa1d779e9a4bc1c3217db8ffbeabaae1dca241bf70183242128d48681cd" dependencies = [ - "proc-macro2", + "bytes", + "prost-derive 0.11.9", ] [[package]] -name = "radium" -version = "0.7.0" +name = "prost" +version = "0.12.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" +checksum = "deb1435c188b76130da55f17a466d252ff7b1418b2ad3e037d127b94e3411f29" +dependencies = [ + "bytes", + "prost-derive 0.12.6", +] [[package]] -name = "rand" -version = "0.8.5" +name = "prost-build" +version = "0.12.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +checksum = "22505a5c94da8e3b7c2996394d1c933236c4d743e81a410bcca4e6989fc066a4" dependencies = [ - "libc", - "rand_chacha", - "rand_core", + "bytes", + "heck 0.5.0", + "itertools 0.12.1", + "log", + "multimap", + "once_cell", + "petgraph", + "prettyplease", + "prost 0.12.6", + "prost-types", + "regex", + "syn 2.0.68", + "tempfile", +] + +[[package]] +name = "prost-derive" +version = "0.11.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4" +dependencies = [ + "anyhow", + "itertools 0.10.5", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "prost-derive" +version = "0.12.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81bddcdb20abf9501610992b6759a4c888aef7d1a7247ef75e2404275ac24af1" +dependencies = [ + "anyhow", + "itertools 0.12.1", + "proc-macro2", + "quote", + "syn 2.0.68", +] + +[[package]] +name = "prost-reflect" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "057237efdb71cf4b3f9396302a3d6599a92fa94063ba537b66130980ea9909f3" +dependencies = [ + "base64 0.21.7", + "logos", + "miette", + "once_cell", + "prost 0.12.6", + "prost-types", + "serde", + "serde-value", +] + +[[package]] +name = "prost-types" +version = "0.12.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9091c90b0a32608e984ff2fa4091273cbdd755d54935c51d520887f4a1dbd5b0" +dependencies = [ + "prost 0.12.6", +] + +[[package]] +name = "protox" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00bb76c5f6221de491fe2c8f39b106330bbd9762c6511119c07940e10eb9ff11" +dependencies = [ + "bytes", + "miette", + "prost 0.12.6", + "prost-reflect", + "prost-types", + "protox-parse", + "thiserror", +] + +[[package]] +name = "protox-parse" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b4581f441c58863525a3e6bec7b8de98188cf75239a56c725a3e7288450a33f" +dependencies = [ + "logos", + "miette", + "prost-types", + "thiserror", +] + +[[package]] +name = "quick-protobuf" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d6da84cc204722a989e01ba2f6e1e276e190f22263d0cb6ce8526fcdb0d2e1f" +dependencies = [ + "byteorder", +] + +[[package]] +name = "quote" +version = "1.0.36" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "radium" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" + +[[package]] +name = "rand" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +dependencies = [ + "libc", + "rand_chacha", + "rand_core", ] [[package]] @@ -2659,9 +3706,9 @@ dependencies = [ [[package]] name = "rayon" -version = "1.8.1" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa7237101a77a10773db45d62004a272517633fbcc3df19d96455ede1122e051" +checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa" dependencies = [ "either", "rayon-core", @@ -2686,11 +3733,20 @@ dependencies = [ "bitflags 1.3.2", ] +[[package]] +name = "redox_syscall" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c82cf8cff14456045f55ec4241383baeff27af886adb72ffb2162f99911de0fd" +dependencies = [ + "bitflags 2.6.0", +] + [[package]] name = "redox_users" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a18479200779601e498ada4e8c1e1f50e3ee19deb0259c25825a98b5603b2cb4" +checksum = "bd283d9651eeda4b2a83a43c1c91b266c40fd76ecd39a50a8c630ae69dc72891" dependencies = [ "getrandom", "libredox", @@ -2699,78 +3755,145 @@ dependencies = [ [[package]] name = "regex" -version = "1.10.3" +version = "1.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b62dbe01f0b06f9d8dc7d49e05a0785f153b00b2c227856282f671e0318c9b15" +checksum = "b91213439dad192326a0d7c6ee3955910425f441d7038e0d6933b0aec5c4517f" dependencies = [ "aho-corasick", "memchr", - "regex-automata", - "regex-syntax 0.8.2", + "regex-automata 0.4.7", + "regex-syntax 0.8.4", ] [[package]] name = "regex-automata" -version = "0.4.5" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bb987efffd3c6d0d8f5f89510bb458559eab11e4f869acb20bf845e016259cd" +checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" +dependencies = [ + "regex-syntax 0.6.29", +] + +[[package]] +name = "regex-automata" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38caf58cc5ef2fed281f89292ef23f6365465ed9a41b7a7754eb4e26496c92df" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.8.2", + "regex-syntax 0.8.4", ] [[package]] name = "regex-syntax" -version = "0.7.5" +version = "0.6.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbb5fb1acd8a1a18b3dd5be62d25485eb770e05afb408a9627d14d451bae12da" +checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.8.2" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" +checksum = "7a66a03ae7c801facd77a29370b4faec201768915ac14a721ba36f20bc9c209b" [[package]] name = "reqwest" -version = "0.11.24" +version = "0.11.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6920094eb85afde5e4a138be3f2de8bbdf28000f0029e72c45025a56b042251" +checksum = "dd67538700a17451e7cba03ac727fb961abb7607553461627b97de0b89cf4a62" dependencies = [ "base64 0.21.7", "bytes", "encoding_rs", "futures-core", "futures-util", - "h2", - "http", - "http-body", - "hyper", - "hyper-rustls", + "h2 0.3.26", + "http 0.2.12", + "http-body 0.4.6", + "hyper 0.14.29", + "hyper-rustls 0.24.2", + "hyper-tls 0.5.0", "ipnet", "js-sys", "log", "mime", + "native-tls", "once_cell", "percent-encoding", "pin-project-lite", - "rustls", - "rustls-pemfile", + "rustls 0.21.12", + "rustls-pemfile 1.0.4", "serde", "serde_json", "serde_urlencoded", - "sync_wrapper", + "sync_wrapper 0.1.2", "system-configuration", "tokio", - "tokio-rustls", + "tokio-native-tls", + "tokio-rustls 0.24.1", "tower-service", "url", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", "webpki-roots", - "winreg", + "winreg 0.50.0", +] + +[[package]] +name = "reqwest" +version = "0.12.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7d6d2a27d57148378eb5e111173f4276ad26340ecc5c49a4a2152167a2d6a37" +dependencies = [ + "base64 0.22.1", + "bytes", + "encoding_rs", + "futures-channel", + "futures-core", + "futures-util", + "h2 0.4.5", + "http 1.1.0", + "http-body 1.0.0", + "http-body-util", + "hyper 1.4.0", + "hyper-rustls 0.27.2", + "hyper-tls 0.6.0", + "hyper-util", + "ipnet", + "js-sys", + "log", + "mime", + "native-tls", + "once_cell", + "percent-encoding", + "pin-project-lite", + "rustls-pemfile 2.1.2", + "serde", + "serde_json", + "serde_urlencoded", + "sync_wrapper 1.0.1", + "system-configuration", + "tokio", + "tokio-native-tls", + "tower-service", + "url", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", + "winreg 0.52.0", +] + +[[package]] +name = "rfc6979" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7743f17af12fa0b03b803ba12cd6a8d9483a587e89c69445e3909655c0b9fabb" +dependencies = [ + "crypto-bigint 0.4.9", + "hmac", + "zeroize", ] [[package]] @@ -2856,19 +3979,19 @@ dependencies = [ "num-integer", "num-traits", "pkcs1", - "pkcs8", + "pkcs8 0.10.2", "rand_core", - "signature", - "spki", + "signature 2.2.0", + "spki 0.7.3", "subtle", "zeroize", ] [[package]] name = "rustc-demangle" -version = "0.1.23" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" +checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" [[package]] name = "rustc-hex" @@ -2887,11 +4010,11 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.31" +version = "0.38.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ea3e1a662af26cd7a3ba09c0297a31af215563ecf42817c98df621387f4e949" +checksum = "70dc5ec042f7a43c4a73241207cecc9873a06d45debb38b329f8541d85c2730f" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.6.0", "errno", "libc", "linux-raw-sys", @@ -2906,10 +4029,23 @@ checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e" dependencies = [ "log", "ring 0.17.8", - "rustls-webpki", + "rustls-webpki 0.101.7", "sct", ] +[[package]] +name = "rustls" +version = "0.23.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05cff451f60db80f490f3c182b77c35260baace73209e9cdbbe526bfe3a4d402" +dependencies = [ + "once_cell", + "rustls-pki-types", + "rustls-webpki 0.102.4", + "subtle", + "zeroize", +] + [[package]] name = "rustls-pemfile" version = "1.0.4" @@ -2919,6 +4055,22 @@ dependencies = [ "base64 0.21.7", ] +[[package]] +name = "rustls-pemfile" +version = "2.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29993a25686778eb88d4189742cd713c9bce943bc54251a33509dc63cbacf73d" +dependencies = [ + "base64 0.22.1", + "rustls-pki-types", +] + +[[package]] +name = "rustls-pki-types" +version = "1.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "976295e77ce332211c0d24d92c0e83e50f5c5f046d11082cea19f3df13a3562d" + [[package]] name = "rustls-webpki" version = "0.101.7" @@ -2929,17 +4081,28 @@ dependencies = [ "untrusted 0.9.0", ] +[[package]] +name = "rustls-webpki" +version = "0.102.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff448f7e92e913c4b7d4c6d8e4540a1724b319b4152b8aef6d4cf8339712b33e" +dependencies = [ + "ring 0.17.8", + "rustls-pki-types", + "untrusted 0.9.0", +] + [[package]] name = "rustversion" -version = "1.0.14" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" +checksum = "955d28af4278de8121b7ebeb796b6a45735dc01436d898801014aced2773a3d6" [[package]] name = "ryu" -version = "1.0.17" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e86697c916019a8588c99b5fac3cead74ec0b4b819707a682fd4d23fa0ce1ba1" +checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" [[package]] name = "salsa20" @@ -2961,28 +4124,37 @@ dependencies = [ [[package]] name = "scale-info" -version = "2.10.0" +version = "2.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f7d66a1128282b7ef025a8ead62a4a9fcf017382ec53b8ffbf4d7bf77bd3c60" +checksum = "eca070c12893629e2cc820a9761bedf6ce1dcddc9852984d1dc734b8bd9bd024" dependencies = [ "cfg-if", - "derive_more", + "derive_more 0.99.18", "parity-scale-codec", "scale-info-derive", ] [[package]] name = "scale-info-derive" -version = "2.10.0" +version = "2.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abf2c68b89cafb3b8d918dd07b42be0da66ff202cf1155c5739a4e0c1ea0dc19" +checksum = "2d35494501194174bda522a32605929eefc9ecf7e0a326c26db1fdd85881eb62" dependencies = [ - "proc-macro-crate 1.3.1", + "proc-macro-crate 3.1.0", "proc-macro2", "quote", "syn 1.0.109", ] +[[package]] +name = "schannel" +version = "0.1.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fbc91545643bcf3a0bbb6569265615222618bdf33ce4ffbbd13c4bbd4c093534" +dependencies = [ + "windows-sys 0.52.0", +] + [[package]] name = "scopeguard" version = "1.2.0" @@ -3002,75 +4174,257 @@ dependencies = [ ] [[package]] -name = "sct" -version = "0.7.1" +name = "sct" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" +dependencies = [ + "ring 0.17.8", + "untrusted 0.9.0", +] + +[[package]] +name = "sec1" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3be24c1842290c45df0a7bf069e0c268a747ad05a192f2fd7dcfdbc1cba40928" +dependencies = [ + "base16ct 0.1.1", + "der 0.6.1", + "generic-array", + "pkcs8 0.9.0", + "subtle", + "zeroize", +] + +[[package]] +name = "sec1" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3e97a565f76233a6003f9f5c54be1d9c5bdfa3eccfb189469f11ec4901c47dc" +dependencies = [ + "base16ct 0.2.0", + "der 0.7.9", + "generic-array", + "pkcs8 0.10.2", + "subtle", + "zeroize", +] + +[[package]] +name = "secp256k1" +version = "0.27.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "25996b82292a7a57ed3508f052cfff8640d38d32018784acd714758b43da9c8f" +dependencies = [ + "secp256k1-sys", +] + +[[package]] +name = "secp256k1-sys" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70a129b9e9efbfb223753b9163c4ab3b13cff7fd9c7f010fbac25ab4099fa07e" +dependencies = [ + "cc", +] + +[[package]] +name = "secrecy" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9bd1c54ea06cfd2f6b63219704de0b9b4f72dcc2b8fdef820be6cd799780e91e" +dependencies = [ + "zeroize", +] + +[[package]] +name = "security-framework" +version = "2.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c627723fd09706bacdb5cf41499e95098555af3c3c29d014dc3c458ef6be11c0" +dependencies = [ + "bitflags 2.6.0", + "core-foundation", + "core-foundation-sys", + "libc", + "security-framework-sys", +] + +[[package]] +name = "security-framework-sys" +version = "2.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "317936bbbd05227752583946b9e66d7ce3b489f84e11a94a510b4437fef407d7" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "semver" +version = "1.0.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" +dependencies = [ + "serde", +] + +[[package]] +name = "send_wrapper" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f638d531eccd6e23b980caf34876660d38e265409d8e99b397ab71eb3612fad0" + +[[package]] +name = "send_wrapper" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd0b0ec5f1c1ca621c432a25813d8d60c88abe6d3e08a3eb9cf37d97a0fe3d73" + +[[package]] +name = "sentry" +version = "0.31.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ce4b57f1b521f674df7a1d200be8ff5d74e3712020ee25b553146657b5377d5" +dependencies = [ + "httpdate", + "native-tls", + "reqwest 0.11.27", + "sentry-backtrace", + "sentry-contexts", + "sentry-core", + "sentry-debug-images", + "sentry-panic", + "sentry-tracing", + "tokio", + "ureq", +] + +[[package]] +name = "sentry-backtrace" +version = "0.31.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "58cc8d4e04a73de8f718dc703943666d03f25d3e9e4d0fb271ca0b8c76dfa00e" +dependencies = [ + "backtrace", + "once_cell", + "regex", + "sentry-core", +] + +[[package]] +name = "sentry-contexts" +version = "0.31.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6436c1bad22cdeb02179ea8ef116ffc217797c028927def303bc593d9320c0d1" +dependencies = [ + "hostname", + "libc", + "os_info", + "rustc_version", + "sentry-core", + "uname", +] + +[[package]] +name = "sentry-core" +version = "0.31.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" +checksum = "901f761681f97db3db836ef9e094acdd8756c40215326c194201941947164ef1" dependencies = [ - "ring 0.17.8", - "untrusted 0.9.0", + "once_cell", + "rand", + "sentry-types", + "serde", + "serde_json", ] [[package]] -name = "sec1" -version = "0.7.3" +name = "sentry-debug-images" +version = "0.31.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3e97a565f76233a6003f9f5c54be1d9c5bdfa3eccfb189469f11ec4901c47dc" +checksum = "afdb263e73d22f39946f6022ed455b7561b22ff5553aca9be3c6a047fa39c328" dependencies = [ - "base16ct", - "der", - "generic-array", - "pkcs8", - "subtle", - "zeroize", + "findshlibs", + "once_cell", + "sentry-core", ] [[package]] -name = "semver" -version = "1.0.22" +name = "sentry-panic" +version = "0.31.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92d43fe69e652f3df9bdc2b85b2854a0825b86e4fb76bc44d945137d053639ca" +checksum = "74fbf1c163f8b6a9d05912e1b272afa27c652e8b47ea60cb9a57ad5e481eea99" dependencies = [ - "serde", + "sentry-backtrace", + "sentry-core", ] [[package]] -name = "send_wrapper" -version = "0.4.0" +name = "sentry-tracing" +version = "0.31.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f638d531eccd6e23b980caf34876660d38e265409d8e99b397ab71eb3612fad0" +checksum = "82eabcab0a047040befd44599a1da73d3adb228ff53b5ed9795ae04535577704" +dependencies = [ + "sentry-backtrace", + "sentry-core", + "tracing-core", + "tracing-subscriber", +] [[package]] -name = "send_wrapper" -version = "0.6.0" +name = "sentry-types" +version = "0.31.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd0b0ec5f1c1ca621c432a25813d8d60c88abe6d3e08a3eb9cf37d97a0fe3d73" +checksum = "da956cca56e0101998c8688bc65ce1a96f00673a0e58e663664023d4c7911e82" +dependencies = [ + "debugid", + "hex", + "rand", + "serde", + "serde_json", + "thiserror", + "time", + "url", + "uuid 1.9.1", +] [[package]] name = "serde" -version = "1.0.197" +version = "1.0.203" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fb1c873e1b9b056a4dc4c0c198b24c3ffa059243875552b2bd0933b1aee4ce2" +checksum = "7253ab4de971e72fb7be983802300c30b5a7f0c2e56fab8abfc6a214307c0094" dependencies = [ "serde_derive", ] +[[package]] +name = "serde-value" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3a1a3341211875ef120e117ea7fd5228530ae7e7036a779fdc9117be6b3282c" +dependencies = [ + "ordered-float 2.10.1", + "serde", +] + [[package]] name = "serde_derive" -version = "1.0.197" +version = "1.0.203" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b" +checksum = "500cbc0ebeb6f46627f50f3f5811ccf6bf00643be300b4c3eabc0ef55dc5b5ba" dependencies = [ "proc-macro2", "quote", - "syn 2.0.51", + "syn 2.0.68", ] [[package]] name = "serde_json" -version = "1.0.114" +version = "1.0.120" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5f09b1bd632ef549eaa9f60a1f8de742bdbc698e6cee2095fc84dde5f549ae0" +checksum = "4e0d21c9a8cae1235ad58a00c11cb40d4b1e5c784f1ef2c537876ed6ffd8b7c5" dependencies = [ "itoa", "ryu", @@ -3079,9 +4433,9 @@ dependencies = [ [[package]] name = "serde_spanned" -version = "0.6.5" +version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb3622f419d1296904700073ea6cc23ad690adbd66f13ea683df73298736f0c1" +checksum = "79e674e01f999af37c49f70a6ede167a8a60b2503e56c5599532a65baa5969a0" dependencies = [ "serde", ] @@ -3098,13 +4452,35 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_with" +version = "1.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "678b5a069e50bf00ecd22d0cd8ddf7c236f68581b03db652061ed5eb13a312ff" +dependencies = [ + "serde", + "serde_with_macros", +] + +[[package]] +name = "serde_with_macros" +version = "1.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e182d6ec6f05393cc0e5ed1bf81ad6db3a8feedf8ee515ecdd369809bcce8082" +dependencies = [ + "darling", + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "serde_yaml" version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap", + "indexmap 2.2.6", "itoa", "ryu", "serde", @@ -3133,6 +4509,17 @@ dependencies = [ "digest", ] +[[package]] +name = "sha2_ce" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eca2daa77078f4ddff27e75c4bf59e4c2697525f56dbb3c842d34a5d1f2b04a2" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest", +] + [[package]] name = "sha3" version = "0.10.8" @@ -3143,15 +4530,44 @@ dependencies = [ "keccak", ] +[[package]] +name = "sha3_ce" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34c9a08202c50378d8a07a5f458193a5f542d2828ac6640263dbc0c2533ea25e" +dependencies = [ + "digest", + "keccak", +] + +[[package]] +name = "sharded-slab" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" +dependencies = [ + "lazy_static", +] + [[package]] name = "signal-hook-registry" -version = "1.4.1" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" +checksum = "a9e9e0b4211b72e7b8b6e85c807d36c212bdb33ea8587f7569562a84df5465b1" dependencies = [ "libc", ] +[[package]] +name = "signature" +version = "1.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c" +dependencies = [ + "digest", + "rand_core", +] + [[package]] name = "signature" version = "2.2.0" @@ -3189,11 +4605,21 @@ dependencies = [ "autocfg", ] +[[package]] +name = "slugify-rs" +version = "0.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c88cdb6ea794da1dde6f267c3a363b2373ce24386b136828d66402a97ebdbff3" +dependencies = [ + "deunicode", + "nanoid", +] + [[package]] name = "smallvec" -version = "1.13.1" +version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6ecd384b10a64542d77071bd64bd7b231f4ed5940fba55e98c3de13824cf3d7" +checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" [[package]] name = "smawk" @@ -3203,9 +4629,9 @@ checksum = "b7c388c1b5e93756d0c740965c41e8822f866621d41acbdf6336a6a168f8840c" [[package]] name = "socket2" -version = "0.5.6" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05ffd9c0a93b7543e062e759284fcf5f5e3b098501104bfbdde4d404db792871" +checksum = "ce305eb0b4296696835b71df73eb912e0f1ffd2556a501fcede6e0c50349191c" dependencies = [ "libc", "windows-sys 0.52.0", @@ -3240,6 +4666,16 @@ dependencies = [ "lock_api", ] +[[package]] +name = "spki" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67cf02bbac7a337dc36e4f5a693db6c21e7863f45070f7064577eb4367a3212b" +dependencies = [ + "base64ct", + "der 0.6.1", +] + [[package]] name = "spki" version = "0.7.3" @@ -3247,16 +4683,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d" dependencies = [ "base64ct", - "der", + "der 0.7.9", ] [[package]] name = "sqlformat" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce81b7bd7c4493975347ef60d8c7e8b742d4694f4c49f93e0a12ea263938176c" +checksum = "f895e3734318cc55f1fe66258926c9b910c124d47520339efecbb6c59cec7c1f" dependencies = [ - "itertools 0.12.1", "nom", "unicode_categories", ] @@ -3295,7 +4730,7 @@ dependencies = [ "futures-util", "hashlink", "hex", - "indexmap", + "indexmap 2.2.6", "log", "memchr", "once_cell", @@ -3334,7 +4769,7 @@ checksum = "5833ef53aaa16d860e92123292f1f6a3d53c34ba8b1969f152ef1a7bb803f3c8" dependencies = [ "dotenvy", "either", - "heck", + "heck 0.4.1", "hex", "once_cell", "proc-macro2", @@ -3360,7 +4795,7 @@ checksum = "1ed31390216d20e538e447a7a9b959e06ed9fc51c37b514b46eb758016ecd418" dependencies = [ "atoi", "base64 0.21.7", - "bitflags 2.4.2", + "bitflags 2.6.0", "byteorder", "bytes", "crc", @@ -3402,7 +4837,7 @@ checksum = "7c824eb80b894f926f89a0b9da0c7f435d27cdd35b8c655b114e58223918577e" dependencies = [ "atoi", "base64 0.21.7", - "bitflags 2.4.2", + "bitflags 2.6.0", "byteorder", "crc", "dotenvy", @@ -3455,6 +4890,12 @@ dependencies = [ "urlencoding", ] +[[package]] +name = "stable_deref_trait" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" + [[package]] name = "static_assertions" version = "1.1.0" @@ -3476,67 +4917,76 @@ dependencies = [ [[package]] name = "stringprep" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb41d74e231a107a1b4ee36bd1214b11285b77768d2e3824aedafa988fd36ee6" +checksum = "7b4df3d392d81bd458a8a621b8bffbd2302a12ffe288a9d931670948749463b1" dependencies = [ - "finl_unicode", "unicode-bidi", "unicode-normalization", + "unicode-properties", ] [[package]] name = "strsim" -version = "0.11.0" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" + +[[package]] +name = "strsim" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ee073c9e4cd00e28217186dbe12796d692868f432bf2e97ee73bed0c56dfa01" +checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" [[package]] name = "strum" -version = "0.25.0" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "290d54ea6f91c969195bdbcd7442c8c2a2ba87da8bf60a7ee86a235d4bc1e125" +checksum = "063e6045c0e62079840579a7e47a355ae92f60eb74daaf156fb1e84ba164e63f" dependencies = [ - "strum_macros 0.25.3", + "strum_macros 0.24.3", ] [[package]] name = "strum" -version = "0.26.2" +version = "0.26.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d8cec3501a5194c432b2b7976db6b7d10ec95c253208b45f83f7136aa985e29" +checksum = "8fec0f0aef304996cf250b31b5a10dee7980c85da9d759361292b8bca5a18f06" +dependencies = [ + "strum_macros 0.26.4", +] [[package]] name = "strum_macros" -version = "0.25.3" +version = "0.24.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23dc1fa9ac9c169a78ba62f0b841814b7abae11bdd047b9c58f893439e309ea0" +checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59" dependencies = [ - "heck", + "heck 0.4.1", "proc-macro2", "quote", "rustversion", - "syn 2.0.51", + "syn 1.0.109", ] [[package]] name = "strum_macros" -version = "0.26.2" +version = "0.26.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6cf59daf282c0a494ba14fd21610a0325f9f90ec9d1231dea26bcb1d696c946" +checksum = "4c6bee85a5a24955dc440386795aa378cd9cf82acd5f764469152d2270e581be" dependencies = [ - "heck", + "heck 0.5.0", "proc-macro2", "quote", "rustversion", - "syn 2.0.51", + "syn 2.0.68", ] [[package]] name = "subtle" -version = "2.4.1" +version = "2.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" +checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" [[package]] name = "svm-rs" @@ -3548,7 +4998,7 @@ dependencies = [ "fs2", "hex", "once_cell", - "reqwest", + "reqwest 0.11.27", "semver", "serde", "serde_json", @@ -3571,9 +5021,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.51" +version = "2.0.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ab617d94515e94ae53b8406c628598680aa0c9587474ecbe58188f7b345d66c" +checksum = "901fa70d88b9d6c98022e23b4136f9f3e54e4662c3bc1bd1d84a42a9a0f0c1e9" dependencies = [ "proc-macro2", "quote", @@ -3586,6 +5036,12 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" +[[package]] +name = "sync_wrapper" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7065abeca94b6a8a577f9bd45aa0867a2238b74e8eb67cf10d492bc39351394" + [[package]] name = "system-configuration" version = "0.5.1" @@ -3659,29 +5115,39 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.57" +version = "1.0.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e45bcbe8ed29775f228095caf2cd67af7a4ccf756ebff23a306bf3e8b47b24b" +checksum = "c546c80d6be4bc6a00c0f01730c08df82eaa7a7a61f11d656526506112cc1709" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.57" +version = "1.0.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a953cb265bef375dae3de6663da4d3804eee9682ea80d8e2542529b73c531c81" +checksum = "46c3384250002a6d5af4d114f2845d37b57521033f30d5c3f46c4d70e1197533" dependencies = [ "proc-macro2", "quote", - "syn 2.0.51", + "syn 2.0.68", +] + +[[package]] +name = "thread_local" +version = "1.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c" +dependencies = [ + "cfg-if", + "once_cell", ] [[package]] name = "time" -version = "0.3.34" +version = "0.3.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8248b6521bb14bc45b4067159b9b6ad792e2d6d754d6c41fb50e29fefe38749" +checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885" dependencies = [ "deranged", "itoa", @@ -3700,9 +5166,9 @@ checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" [[package]] name = "time-macros" -version = "0.2.17" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ba3a3ef41e6672a2f0f001392bb5dcd3ff0a9992d618ca761a11c3121547774" +checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf" dependencies = [ "num-conv", "time-core", @@ -3719,9 +5185,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.6.0" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" +checksum = "c55115c6fbe2d2bef26eb09ad74bde02d8255476fc0c7b515ef09fbb35742d82" dependencies = [ "tinyvec_macros", ] @@ -3734,9 +5200,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.37.0" +version = "1.38.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1adbebffeca75fcfd058afa480fb6c0b81e165a0323f9c9d39c9697e37c46787" +checksum = "ba4f4a02a7a80d6f274636f0aa95c7e383b912d41fe721a31f29e29698585a4a" dependencies = [ "backtrace", "bytes", @@ -3751,15 +5217,35 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "tokio-io-timeout" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30b74022ada614a1b4834de765f9bb43877f910cc8ce4be40e89042c9223a8bf" +dependencies = [ + "pin-project-lite", + "tokio", +] + [[package]] name = "tokio-macros" -version = "2.2.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" +checksum = "5f5ae998a069d4b5aba8ee9dad856af7d520c3699e6159b185c2acd48155d39a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.51", + "syn 2.0.68", +] + +[[package]] +name = "tokio-native-tls" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" +dependencies = [ + "native-tls", + "tokio", ] [[package]] @@ -3768,7 +5254,18 @@ version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" dependencies = [ - "rustls", + "rustls 0.21.12", + "tokio", +] + +[[package]] +name = "tokio-rustls" +version = "0.26.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" +dependencies = [ + "rustls 0.23.10", + "rustls-pki-types", "tokio", ] @@ -3791,44 +5288,43 @@ checksum = "212d5dcb2a1ce06d81107c3d0ffa3121fe974b73f068c8282cb1c32328113b6c" dependencies = [ "futures-util", "log", - "rustls", + "rustls 0.21.12", "tokio", - "tokio-rustls", + "tokio-rustls 0.24.1", "tungstenite", "webpki-roots", ] [[package]] name = "tokio-util" -version = "0.7.10" +version = "0.7.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5419f34732d9eb6ee4c3578b7989078579b7f039cbbb9ca2c4da015749371e15" +checksum = "9cf6b47b3771c49ac75ad09a6162f53ad4b8088b76ac60e8ec1455b31a189fe1" dependencies = [ "bytes", "futures-core", "futures-sink", "pin-project-lite", "tokio", - "tracing", ] [[package]] name = "toml" -version = "0.8.12" +version = "0.8.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9dd1545e8208b4a5af1aa9bbd0b4cf7e9ea08fabc5d0a5c67fcaafa17433aa3" +checksum = "6f49eb2ab21d2f26bd6db7bf383edc527a7ebaee412d17af4d40fdccd442f335" dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit 0.22.9", + "toml_edit 0.22.14", ] [[package]] name = "toml_datetime" -version = "0.6.5" +version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3550f4e9685620ac18a50ed434eb3aec30db8ba93b0287467bca5826ea25baf1" +checksum = "4badfd56924ae69bcc9039335b2e017639ce3f9b001c393c1b2d1ef846ce2cbf" dependencies = [ "serde", ] @@ -3839,46 +5335,89 @@ version = "0.19.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ - "indexmap", + "indexmap 2.2.6", "toml_datetime", "winnow 0.5.40", ] [[package]] name = "toml_edit" -version = "0.20.7" +version = "0.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70f427fce4d84c72b5b732388bf4a9f4531b53f74e2887e3ecb2481f68f66d81" +checksum = "6a8534fd7f78b5405e860340ad6575217ce99f38d4d5c8f2442cb5ecb50090e1" dependencies = [ - "indexmap", + "indexmap 2.2.6", "toml_datetime", "winnow 0.5.40", ] [[package]] name = "toml_edit" -version = "0.21.1" +version = "0.22.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a8534fd7f78b5405e860340ad6575217ce99f38d4d5c8f2442cb5ecb50090e1" +checksum = "f21c7aaf97f1bd9ca9d4f9e73b0a6c74bd5afef56f2bc931943a6e1c37e04e38" dependencies = [ - "indexmap", + "indexmap 2.2.6", + "serde", + "serde_spanned", "toml_datetime", - "winnow 0.5.40", + "winnow 0.6.13", ] [[package]] -name = "toml_edit" -version = "0.22.9" +name = "tonic" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e40bb779c5187258fd7aad0eb68cb8706a0a81fa712fbea808ab43c4b8374c4" +checksum = "3082666a3a6433f7f511c7192923fa1fe07c69332d3c6a2e6bb040b569199d5a" dependencies = [ - "indexmap", - "serde", - "serde_spanned", - "toml_datetime", - "winnow 0.6.2", + "async-trait", + "axum", + "base64 0.21.7", + "bytes", + "futures-core", + "futures-util", + "h2 0.3.26", + "http 0.2.12", + "http-body 0.4.6", + "hyper 0.14.29", + "hyper-timeout", + "percent-encoding", + "pin-project", + "prost 0.11.9", + "tokio", + "tokio-stream", + "tower", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" +dependencies = [ + "futures-core", + "futures-util", + "indexmap 1.9.3", + "pin-project", + "pin-project-lite", + "rand", + "slab", + "tokio", + "tokio-util", + "tower-layer", + "tower-service", + "tracing", ] +[[package]] +name = "tower-layer" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c20c8dbed6283a09604c3e69b4b7eeb54e298b8a600d4d5ecb5ad39de609f1d0" + [[package]] name = "tower-service" version = "0.3.2" @@ -3905,7 +5444,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.51", + "syn 2.0.68", ] [[package]] @@ -3915,6 +5454,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" dependencies = [ "once_cell", + "valuable", ] [[package]] @@ -3927,6 +5467,76 @@ dependencies = [ "tracing", ] +[[package]] +name = "tracing-log" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f751112709b4e791d8ce53e32c4ed2d353565a795ce84da2285393f41557bdf2" +dependencies = [ + "log", + "once_cell", + "tracing-core", +] + +[[package]] +name = "tracing-log" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" +dependencies = [ + "log", + "once_cell", + "tracing-core", +] + +[[package]] +name = "tracing-opentelemetry" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75327c6b667828ddc28f5e3f169036cb793c3f588d83bf0f262a7f062ffed3c8" +dependencies = [ + "once_cell", + "opentelemetry", + "opentelemetry_sdk", + "smallvec", + "tracing", + "tracing-core", + "tracing-log 0.1.4", + "tracing-subscriber", +] + +[[package]] +name = "tracing-serde" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc6b213177105856957181934e4920de57730fc69bf42c37ee5bb664d406d9e1" +dependencies = [ + "serde", + "tracing-core", +] + +[[package]] +name = "tracing-subscriber" +version = "0.3.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b" +dependencies = [ + "matchers", + "nu-ansi-term", + "once_cell", + "regex", + "serde", + "serde_json", + "sharded-slab", + "smallvec", + "thread_local", + "time", + "tracing", + "tracing-core", + "tracing-log 0.2.0", + "tracing-serde", +] + [[package]] name = "try-lock" version = "0.2.5" @@ -3942,11 +5552,11 @@ dependencies = [ "byteorder", "bytes", "data-encoding", - "http", + "http 0.2.12", "httparse", "log", "rand", - "rustls", + "rustls 0.21.12", "sha1", "thiserror", "url", @@ -3966,8 +5576,8 @@ dependencies = [ "clap", "ethers", "serde", - "strum 0.26.2", - "strum_macros 0.26.2", + "strum 0.26.3", + "strum_macros 0.26.4", "thiserror", ] @@ -3983,6 +5593,15 @@ dependencies = [ "static_assertions", ] +[[package]] +name = "uname" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b72f89f0ca32e4db1c04e2a72f5345d59796d4866a1ee0609084569f73683dc8" +dependencies = [ + "libc", +] + [[package]] name = "unarray" version = "0.1.4" @@ -4016,6 +5635,12 @@ dependencies = [ "tinyvec", ] +[[package]] +name = "unicode-properties" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e4259d9d4425d9f0661581b804cb85fe66a4c631cadd8f490d1c13a35d5d9291" + [[package]] name = "unicode-segmentation" version = "1.11.0" @@ -4024,9 +5649,9 @@ checksum = "d4c87d22b6e3f4a18d4d40ef354e97c90fcb14dd91d7dc0aa9d8a1172ebf7202" [[package]] name = "unicode-width" -version = "0.1.11" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e51733f11c9c4f72aa0c160008246859e340b00807569a0da0e7a1079b27ba85" +checksum = "0336d538f7abc86d282a4189614dfaa90810dfc2c6f6427eaf88e16311dd225d" [[package]] name = "unicode-xid" @@ -4058,11 +5683,24 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" +[[package]] +name = "ureq" +version = "2.9.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d11a831e3c0b56e438a28308e7c810799e3c118417f342d30ecec080105395cd" +dependencies = [ + "base64 0.22.1", + "log", + "native-tls", + "once_cell", + "url", +] + [[package]] name = "url" -version = "2.5.0" +version = "2.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31e6302e3bb753d46e83516cae55ae196fc0c309407cf11ab35cc51a4c2a4633" +checksum = "22784dbdf76fdde8af1aeda5622b546b422b6fc585325248a2bf9f5e41e94d6c" dependencies = [ "form_urlencoded", "idna", @@ -4084,9 +5722,9 @@ checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" [[package]] name = "utf8parse" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" +checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "uuid" @@ -4100,13 +5738,20 @@ dependencies = [ [[package]] name = "uuid" -version = "1.8.0" +version = "1.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a183cf7feeba97b4dd1c0d46788634f6221d87fa961b305bed08c851829efcc0" +checksum = "5de17fd2f7da591098415cff336e12965a28061ddace43b59cb3c430179c9439" dependencies = [ "getrandom", + "serde", ] +[[package]] +name = "valuable" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" + [[package]] name = "vcpkg" version = "0.2.15" @@ -4119,11 +5764,49 @@ version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" +[[package]] +name = "vise" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "229baafe01d5177b63c6ee1def80d8e39a2365e64caf69ddb05a57594b15647c" +dependencies = [ + "compile-fmt", + "elsa", + "linkme", + "once_cell", + "prometheus-client", + "vise-macros", +] + +[[package]] +name = "vise-exporter" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23981b18d697026f5430249ab01ba739ef2edc463e400042394331cb2bb63494" +dependencies = [ + "hyper 0.14.29", + "once_cell", + "tokio", + "tracing", + "vise", +] + +[[package]] +name = "vise-macros" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8bb19c33cd5f04dcf4e767635e058a998edbc2b7fca32ade0a4a1cea0f8e9b34" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.68", +] + [[package]] name = "walkdir" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d71d857dc86794ca4c280d616f7da00d2dbfd8cd788846559a6813e6aa4b54ee" +checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" dependencies = [ "same-file", "winapi-util", @@ -4152,9 +5835,9 @@ checksum = "b8dad83b4f25e74f184f64c43b150b91efe7647395b42289f38e50566d82855b" [[package]] name = "wasm-bindgen" -version = "0.2.91" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1e124130aee3fb58c5bdd6b639a0509486b0338acaaae0c84a5124b0f588b7f" +checksum = "4be2531df63900aeb2bca0daaaddec08491ee64ceecbee5076636a3b026795a8" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -4162,24 +5845,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.91" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9e7e1900c352b609c8488ad12639a311045f40a35491fb69ba8c12f758af70b" +checksum = "614d787b966d3989fa7bb98a654e369c762374fd3213d212cfc0251257e747da" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn 2.0.51", + "syn 2.0.68", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.41" +version = "0.4.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "877b9c3f61ceea0e56331985743b13f3d25c406a7098d45180fb5f09bc19ed97" +checksum = "76bc14366121efc8dbb487ab05bcc9d346b3b5ec0eaa76e46594cabbe51762c0" dependencies = [ "cfg-if", "js-sys", @@ -4189,9 +5872,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.91" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b30af9e2d358182b5c7449424f017eba305ed32a7010509ede96cdc4696c46ed" +checksum = "a1f8823de937b71b9460c0c34e25f3da88250760bec0ebac694b49997550d726" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -4199,28 +5882,28 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.91" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "642f325be6301eb8107a83d12a8ac6c1e1c54345a7ef1a9261962dfefda09e66" +checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.51", + "syn 2.0.68", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.91" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f186bd2dcf04330886ce82d6f33dd75a7bfcf69ecf5763b89fcde53b6ac9838" +checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96" [[package]] name = "web-sys" -version = "0.3.68" +version = "0.3.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96565907687f7aceb35bc5fc03770a8a0471d82e479f25832f54a0e3f4b28446" +checksum = "77afa9a11836342370f4817622a2f0f418b134426d91a82dfb48f532d2ec13ef" dependencies = [ "js-sys", "wasm-bindgen", @@ -4238,7 +5921,7 @@ version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a44ab49fad634e88f55bf8f9bb3abd2f27d7204172a112c7c9987e01c1c94ea9" dependencies = [ - "redox_syscall", + "redox_syscall 0.4.1", "wasite", ] @@ -4260,11 +5943,11 @@ checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" -version = "0.1.6" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f29e6f9198ba0d26b4c9f07dbe6f9ed633e1f3d5b8b414090084349e46a52596" +checksum = "4d4cc384e1e73b93bafa6fb4f1df8c41695c8a91cf9c4c64358067d15a7b6c6b" dependencies = [ - "winapi", + "windows-sys 0.52.0", ] [[package]] @@ -4273,6 +5956,15 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" +[[package]] +name = "windows-core" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" +dependencies = [ + "windows-targets 0.52.5", +] + [[package]] name = "windows-sys" version = "0.48.0" @@ -4288,7 +5980,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets 0.52.3", + "windows-targets 0.52.5", ] [[package]] @@ -4308,17 +6000,18 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.52.3" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d380ba1dc7187569a8a9e91ed34b8ccfc33123bbacb8c0aed2d1ad7f3ef2dc5f" +checksum = "6f0713a46559409d202e70e28227288446bf7841d3211583a4b53e3f6d96e7eb" dependencies = [ - "windows_aarch64_gnullvm 0.52.3", - "windows_aarch64_msvc 0.52.3", - "windows_i686_gnu 0.52.3", - "windows_i686_msvc 0.52.3", - "windows_x86_64_gnu 0.52.3", - "windows_x86_64_gnullvm 0.52.3", - "windows_x86_64_msvc 0.52.3", + "windows_aarch64_gnullvm 0.52.5", + "windows_aarch64_msvc 0.52.5", + "windows_i686_gnu 0.52.5", + "windows_i686_gnullvm", + "windows_i686_msvc 0.52.5", + "windows_x86_64_gnu 0.52.5", + "windows_x86_64_gnullvm 0.52.5", + "windows_x86_64_msvc 0.52.5", ] [[package]] @@ -4329,9 +6022,9 @@ checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_gnullvm" -version = "0.52.3" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68e5dcfb9413f53afd9c8f86e56a7b4d86d9a2fa26090ea2dc9e40fba56c6ec6" +checksum = "7088eed71e8b8dda258ecc8bac5fb1153c5cffaf2578fc8ff5d61e23578d3263" [[package]] name = "windows_aarch64_msvc" @@ -4341,9 +6034,9 @@ checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_aarch64_msvc" -version = "0.52.3" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8dab469ebbc45798319e69eebf92308e541ce46760b49b18c6b3fe5e8965b30f" +checksum = "9985fd1504e250c615ca5f281c3f7a6da76213ebd5ccc9561496568a2752afb6" [[package]] name = "windows_i686_gnu" @@ -4353,9 +6046,15 @@ checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_gnu" -version = "0.52.3" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a4e9b6a7cac734a8b4138a4e1044eac3404d8326b6c0f939276560687a033fb" +checksum = "88ba073cf16d5372720ec942a8ccbf61626074c6d4dd2e745299726ce8b89670" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87f4261229030a858f36b459e748ae97545d6f1ec60e5e0d6a3d32e0dc232ee9" [[package]] name = "windows_i686_msvc" @@ -4365,9 +6064,9 @@ checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_i686_msvc" -version = "0.52.3" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28b0ec9c422ca95ff34a78755cfa6ad4a51371da2a5ace67500cf7ca5f232c58" +checksum = "db3c2bf3d13d5b658be73463284eaf12830ac9a26a90c717b7f771dfe97487bf" [[package]] name = "windows_x86_64_gnu" @@ -4377,9 +6076,9 @@ checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnu" -version = "0.52.3" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "704131571ba93e89d7cd43482277d6632589b18ecf4468f591fbae0a8b101614" +checksum = "4e4246f76bdeff09eb48875a0fd3e2af6aada79d409d33011886d3e1581517d9" [[package]] name = "windows_x86_64_gnullvm" @@ -4389,9 +6088,9 @@ checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_gnullvm" -version = "0.52.3" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42079295511643151e98d61c38c0acc444e52dd42ab456f7ccfd5152e8ecf21c" +checksum = "852298e482cd67c356ddd9570386e2862b5673c85bd5f88df9ab6802b334c596" [[package]] name = "windows_x86_64_msvc" @@ -4401,9 +6100,9 @@ checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "windows_x86_64_msvc" -version = "0.52.3" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0770833d60a970638e989b3fa9fd2bb1aaadcf88963d1659fd7d9990196ed2d6" +checksum = "bec47e5bfd1bff0eeaf6d8b485cc1074891a197ab4225d504cb7a1ab88b02bf0" [[package]] name = "winnow" @@ -4416,9 +6115,9 @@ dependencies = [ [[package]] name = "winnow" -version = "0.6.2" +version = "0.6.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a4191c47f15cc3ec71fcb4913cb83d58def65dd3787610213c649283b5ce178" +checksum = "59b5e5f6c299a3c7890b876a2a587f3115162487e704907d9b6cd29473052ba1" dependencies = [ "memchr", ] @@ -4433,6 +6132,16 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "winreg" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a277a57398d4bfa075df44f501a17cfdf8542d224f0d36095a2adc7aee4ef0a5" +dependencies = [ + "cfg-if", + "windows-sys 0.48.0", +] + [[package]] name = "ws_stream_wasm" version = "0.7.4" @@ -4484,29 +6193,29 @@ checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec" [[package]] name = "zerocopy" -version = "0.7.32" +version = "0.7.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74d4d3961e53fa4c9a25a8637fc2bfaf2595b3d3ae34875568a5cf64787716be" +checksum = "ae87e3fcd617500e5d106f0380cf7b77f3c6092aae37191433159dda23cfb087" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.7.32" +version = "0.7.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" +checksum = "15e934569e47891f7d9411f1a451d947a60e000ab3bd24fbb970f000387d1b3b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.51", + "syn 2.0.68", ] [[package]] name = "zeroize" -version = "1.7.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "525b4ec142c6b68a2d10f01f7bbf6755599ca3f81ea53b8431b7dd348f5fdb2d" +checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" dependencies = [ "zeroize_derive", ] @@ -4519,7 +6228,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.51", + "syn 2.0.68", ] [[package]] @@ -4542,6 +6251,35 @@ dependencies = [ "zstd", ] +[[package]] +name = "zk_evm" +version = "0.133.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9af08e9284686a1b0c89ec4931eb915ac0729367f1247abd06164874fe738106" +dependencies = [ + "anyhow", + "lazy_static", + "num", + "serde", + "serde_json", + "static_assertions", + "zk_evm_abstractions", + "zkevm_opcode_defs", +] + +[[package]] +name = "zk_evm_abstractions" +version = "0.140.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be696258861eba4e6625a5665084b2266720bb67f4ba69819469700ac5c6a401" +dependencies = [ + "anyhow", + "num_enum 0.6.1", + "serde", + "static_assertions", + "zkevm_opcode_defs", +] + [[package]] name = "zk_inception" version = "0.1.0" @@ -4558,14 +6296,16 @@ dependencies = [ "serde", "serde_json", "serde_yaml", - "strum 0.26.2", - "strum_macros 0.26.2", + "slugify-rs", + "strum 0.26.3", + "strum_macros 0.26.4", "thiserror", "tokio", "toml", "types", "url", "xshell", + "zksync_config", ] [[package]] @@ -4577,13 +6317,272 @@ dependencies = [ "common", "config", "human-panic", - "strum 0.26.2", - "strum_macros 0.26.2", + "serde", + "strum 0.26.3", + "strum_macros 0.26.4", "tokio", "url", "xshell", ] +[[package]] +name = "zkevm_opcode_defs" +version = "0.132.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e0769f7b27d8fb06e715da3290c575cac5d04d10a557faef180e847afce50ac4" +dependencies = [ + "bitflags 2.6.0", + "blake2", + "ethereum-types", + "k256 0.11.6", + "lazy_static", + "sha2_ce", + "sha3_ce", +] + +[[package]] +name = "zksync_basic_types" +version = "0.1.0" +dependencies = [ + "anyhow", + "chrono", + "ethabi", + "hex", + "num_enum 0.7.2", + "serde", + "serde_json", + "serde_with", + "strum 0.24.1", + "thiserror", + "tiny-keccak", + "url", +] + +[[package]] +name = "zksync_concurrency" +version = "0.1.0-rc.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50302b77192891256d180ff2551dc0c3bc4144958b49e9a16c50a0dc218958ba" +dependencies = [ + "anyhow", + "once_cell", + "pin-project", + "rand", + "sha3", + "thiserror", + "time", + "tokio", + "tracing", + "tracing-subscriber", + "vise", +] + +[[package]] +name = "zksync_config" +version = "0.1.0" +dependencies = [ + "anyhow", + "rand", + "secrecy", + "serde", + "url", + "zksync_basic_types", + "zksync_concurrency", + "zksync_consensus_utils", + "zksync_crypto_primitives", +] + +[[package]] +name = "zksync_consensus_utils" +version = "0.1.0-rc.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24dc6135abeefa80f617eb2903fe43d137d362bf673f0651b4894b17069d1fb1" +dependencies = [ + "anyhow", + "rand", + "thiserror", + "zksync_concurrency", +] + +[[package]] +name = "zksync_contracts" +version = "0.1.0" +dependencies = [ + "envy", + "ethabi", + "hex", + "once_cell", + "serde", + "serde_json", + "zksync_utils", +] + +[[package]] +name = "zksync_crypto_primitives" +version = "0.1.0" +dependencies = [ + "anyhow", + "blake2", + "hex", + "rand", + "secp256k1", + "serde", + "serde_json", + "sha2", + "thiserror", + "zksync_basic_types", + "zksync_utils", +] + +[[package]] +name = "zksync_mini_merkle_tree" +version = "0.1.0" +dependencies = [ + "once_cell", + "zksync_basic_types", + "zksync_crypto_primitives", +] + +[[package]] +name = "zksync_protobuf" +version = "0.1.0-rc.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1e7c7820f290db565a1b4ff73aa1175cd7d31498fca8d859eb5aceebd33468c" +dependencies = [ + "anyhow", + "bit-vec", + "once_cell", + "prost 0.12.6", + "prost-reflect", + "quick-protobuf", + "rand", + "serde", + "serde_json", + "serde_yaml", + "zksync_concurrency", + "zksync_consensus_utils", + "zksync_protobuf_build", +] + +[[package]] +name = "zksync_protobuf_build" +version = "0.1.0-rc.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6cafeec1150ae91f1a37c8f0dce6b71b92b93e0c4153d32b4c37e2fd71bce2f" +dependencies = [ + "anyhow", + "heck 0.5.0", + "prettyplease", + "proc-macro2", + "prost-build", + "prost-reflect", + "protox", + "quote", + "syn 2.0.68", +] + +[[package]] +name = "zksync_protobuf_config" +version = "0.1.0" +dependencies = [ + "anyhow", + "hex", + "prost 0.12.6", + "rand", + "secrecy", + "serde_json", + "serde_yaml", + "zksync_basic_types", + "zksync_config", + "zksync_protobuf", + "zksync_protobuf_build", + "zksync_types", +] + +[[package]] +name = "zksync_system_constants" +version = "0.1.0" +dependencies = [ + "once_cell", + "zksync_basic_types", + "zksync_utils", +] + +[[package]] +name = "zksync_types" +version = "0.1.0" +dependencies = [ + "anyhow", + "bigdecimal", + "blake2", + "chrono", + "derive_more 1.0.0-beta.6", + "hex", + "itertools 0.10.5", + "num", + "num_enum 0.7.2", + "once_cell", + "prost 0.12.6", + "rlp", + "secp256k1", + "serde", + "serde_json", + "strum 0.24.1", + "thiserror", + "tracing", + "zksync_basic_types", + "zksync_config", + "zksync_contracts", + "zksync_crypto_primitives", + "zksync_mini_merkle_tree", + "zksync_protobuf", + "zksync_protobuf_build", + "zksync_system_constants", + "zksync_utils", +] + +[[package]] +name = "zksync_utils" +version = "0.1.0" +dependencies = [ + "anyhow", + "bigdecimal", + "futures", + "hex", + "itertools 0.10.5", + "num", + "once_cell", + "reqwest 0.12.5", + "serde", + "serde_json", + "thiserror", + "tokio", + "tracing", + "zk_evm", + "zksync_basic_types", + "zksync_vlog", +] + +[[package]] +name = "zksync_vlog" +version = "0.1.0" +dependencies = [ + "anyhow", + "chrono", + "opentelemetry", + "opentelemetry-otlp", + "opentelemetry-semantic-conventions", + "sentry", + "serde", + "serde_json", + "tokio", + "tracing", + "tracing-opentelemetry", + "tracing-subscriber", + "vise", + "vise-exporter", +] + [[package]] name = "zstd" version = "0.11.2+zstd.1.5.2" @@ -4605,9 +6604,9 @@ dependencies = [ [[package]] name = "zstd-sys" -version = "2.0.9+zstd.1.5.5" +version = "2.0.11+zstd.1.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e16efa8a874a0481a574084d34cc26fdb3b99627480f785888deb6386506656" +checksum = "75652c55c0b6f3e6f12eb786fe1bc960396bf05a1eb3bf1f3691c3610ac2e6d4" dependencies = [ "cc", "pkg-config", diff --git a/zk_toolbox/Cargo.toml b/zk_toolbox/Cargo.toml index 15e1ddc4cdcb..138a8e3af129 100644 --- a/zk_toolbox/Cargo.toml +++ b/zk_toolbox/Cargo.toml @@ -25,10 +25,13 @@ keywords = ["zk", "cryptography", "blockchain", "ZKStack", "ZKsync"] common = { path = "crates/common" } config = { path = "crates/config" } types = { path = "crates/types" } +zksync_config = { path = "../core/lib/config" } +zksync_protobuf_config = { path = "../core/lib/protobuf_config" } # External dependencies anyhow = "1.0.82" clap = { version = "4.4", features = ["derive", "wrap_help"] } +slugify-rs = "0.0.3" cliclack = "0.2.5" console = "0.15.8" ethers = "2.0" @@ -36,13 +39,12 @@ futures = "0.3.30" human-panic = "2.0" lazy_static = "1.4.0" once_cell = "1.19.0" -path-absolutize = "3.1.1" rand = "0.8.5" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" serde_yaml = "0.9" sqlx = { version = "0.7.4", features = ["runtime-tokio", "migrate", "postgres"] } -strum = "0.26.2" +strum = { version = "0.26.2", features = ["derive"] } strum_macros = "0.26.2" thiserror = "1.0.57" tokio = { version = "1.37", features = ["full"] } diff --git a/zk_toolbox/crates/common/Cargo.toml b/zk_toolbox/crates/common/Cargo.toml index 00c3b7775112..6b362905160f 100644 --- a/zk_toolbox/crates/common/Cargo.toml +++ b/zk_toolbox/crates/common/Cargo.toml @@ -27,3 +27,4 @@ tokio.workspace = true toml.workspace = true url.workspace = true xshell.workspace = true +thiserror = "1.0.57" diff --git a/zk_toolbox/crates/common/src/cmd.rs b/zk_toolbox/crates/common/src/cmd.rs index e39f1e18972c..ca0f285882a3 100644 --- a/zk_toolbox/crates/common/src/cmd.rs +++ b/zk_toolbox/crates/common/src/cmd.rs @@ -1,6 +1,11 @@ -use std::process::Output; +use std::{ + ffi::OsStr, + fmt::{Display, Formatter}, + io, + process::{Command, Output, Stdio}, + string::FromUtf8Error, +}; -use anyhow::bail; use console::style; use crate::{ @@ -16,6 +21,51 @@ pub struct Cmd<'a> { force_run: bool, } +#[derive(thiserror::Error, Debug)] +pub struct CmdError { + pub stderr: Option, + pub source: anyhow::Error, +} + +impl Display for CmdError { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + let mut data = format!("{}", &self.source); + if let Some(stderr) = &self.stderr { + data = format!("{data}\n{stderr}"); + } + write!(f, "{}", data) + } +} + +impl From for CmdError { + fn from(value: xshell::Error) -> Self { + Self { + stderr: None, + source: value.into(), + } + } +} + +impl From for CmdError { + fn from(value: io::Error) -> Self { + Self { + stderr: None, + source: value.into(), + } + } +} + +impl From for CmdError { + fn from(value: FromUtf8Error) -> Self { + Self { + stderr: None, + source: value.into(), + } + } +} + +pub type CmdResult = Result; + impl<'a> Cmd<'a> { /// Create a new `Cmd` instance. pub fn new(cmd: xshell::Cmd<'a>) -> Self { @@ -31,32 +81,43 @@ impl<'a> Cmd<'a> { self } + /// Set env variables for the command. + pub fn env, V: AsRef>(mut self, key: K, value: V) -> Self { + self.inner = self.inner.env(key, value); + self + } + /// Run the command without capturing its output. - pub fn run(&mut self) -> anyhow::Result<()> { - if global_config().verbose || self.force_run { + pub fn run(mut self) -> CmdResult<()> { + let command_txt = self.inner.to_string(); + let output = if global_config().verbose || self.force_run { logger::debug(format!("Running: {}", self.inner)); logger::new_empty_line(); - self.inner.run()?; - logger::new_empty_line(); - logger::new_line(); + let output = run_low_level_process_command(self.inner.into())?; + if let Ok(data) = String::from_utf8(output.stderr.clone()) { + if !data.is_empty() { + logger::info(data) + } + } + output } else { // Command will be logged manually. self.inner.set_quiet(true); // Error will be handled manually. self.inner.set_ignore_status(true); - let output = self.inner.output()?; - self.check_output_status(&output)?; - } + self.inner.output()? + }; + check_output_status(&command_txt, &output)?; if global_config().verbose { - logger::debug(format!("Command completed: {}", self.inner)); + logger::debug(format!("Command completed: {}", command_txt)); } Ok(()) } /// Run the command and return its output. - pub fn run_with_output(&mut self) -> anyhow::Result { + pub fn run_with_output(&mut self) -> CmdResult { if global_config().verbose || self.force_run { logger::debug(format!("Running: {}", self.inner)); logger::new_empty_line(); @@ -73,28 +134,53 @@ impl<'a> Cmd<'a> { Ok(output) } +} - fn check_output_status(&self, output: &std::process::Output) -> anyhow::Result<()> { - if !output.status.success() { - logger::new_line(); - logger::error_note( - &format!("Command failed to run: {}", self.inner), - &log_output(output), - ); - bail!("Command failed to run: {}", self.inner); - } - - Ok(()) +fn check_output_status(command_text: &str, output: &std::process::Output) -> CmdResult<()> { + if !output.status.success() { + logger::new_line(); + logger::error_note( + &format!("Command failed to run: {}", command_text), + &log_output(output), + ); + return Err(CmdError { + stderr: Some(String::from_utf8(output.stderr.clone())?), + source: anyhow::anyhow!("Command failed to run: {}", command_text), + }); } + + Ok(()) +} + +fn run_low_level_process_command(mut command: Command) -> io::Result { + command.stdout(Stdio::inherit()); + command.stderr(Stdio::piped()); + let child = command.spawn()?; + child.wait_with_output() } fn log_output(output: &std::process::Output) -> String { let (status, stdout, stderr) = get_indented_output(output, 4, 120); + log_output_int(status, Some(stdout), Some(stderr)) +} + +fn log_output_int(status: String, stdout: Option, stderr: Option) -> String { let status_header = style(" Status:").bold(); - let stdout_header = style(" Stdout:").bold(); - let stderr_header = style(" Stderr:").bold(); + let stdout = if let Some(stdout) = stdout { + let stdout_header = style(" Stdout:").bold(); + format!("{stdout_header}\n{stdout}\n") + } else { + String::new() + }; + + let stderr = if let Some(stderr) = stderr { + let stderr_header = style(" Stderr:").bold(); + format!("{stderr_header}\n{stderr}\n") + } else { + String::new() + }; - format!("{status_header}\n{status}\n{stdout_header}\n{stdout}\n{stderr_header}\n{stderr}") + format!("{status_header}\n{status}\n{stdout}\n{stderr}") } // Indent output and wrap text. diff --git a/zk_toolbox/crates/common/src/docker.rs b/zk_toolbox/crates/common/src/docker.rs index db8a63e9f5d0..f01a7955aead 100644 --- a/zk_toolbox/crates/common/src/docker.rs +++ b/zk_toolbox/crates/common/src/docker.rs @@ -3,8 +3,9 @@ use xshell::{cmd, Shell}; use crate::cmd::Cmd; pub fn up(shell: &Shell, docker_compose_file: &str) -> anyhow::Result<()> { - Cmd::new(cmd!(shell, "docker compose -f {docker_compose_file} up -d")).run() + Ok(Cmd::new(cmd!(shell, "docker compose -f {docker_compose_file} up -d")).run()?) } + pub fn down(shell: &Shell, docker_compose_file: &str) -> anyhow::Result<()> { - Cmd::new(cmd!(shell, "docker compose -f {docker_compose_file} down")).run() + Ok(Cmd::new(cmd!(shell, "docker compose -f {docker_compose_file} down")).run()?) } diff --git a/zk_toolbox/crates/common/src/forge.rs b/zk_toolbox/crates/common/src/forge.rs index 565c7aa52d96..de91c0e72500 100644 --- a/zk_toolbox/crates/common/src/forge.rs +++ b/zk_toolbox/crates/common/src/forge.rs @@ -5,16 +5,20 @@ use std::{ use clap::{Parser, ValueEnum}; use ethers::{ + core::types::Bytes, middleware::Middleware, prelude::{LocalWallet, Signer}, types::{Address, H256, U256}, - utils::hex::ToHex, + utils::{hex, hex::ToHex}, }; use serde::{Deserialize, Serialize}; use strum_macros::Display; use xshell::{cmd, Shell}; -use crate::{cmd::Cmd, ethereum::create_ethers_client}; +use crate::{ + cmd::{Cmd, CmdResult}, + ethereum::create_ethers_client, +}; /// Forge is a wrapper around the forge binary. pub struct Forge { @@ -54,9 +58,24 @@ impl ForgeScript { pub fn run(mut self, shell: &Shell) -> anyhow::Result<()> { let _dir_guard = shell.push_dir(&self.base_path); let script_path = self.script_path.as_os_str(); - let args = self.args.build(); - Cmd::new(cmd!(shell, "forge script {script_path} --legacy {args...}")).run()?; - Ok(()) + let args_no_resume = self.args.build(); + if self.args.resume { + let mut args = args_no_resume.clone(); + args.push(ForgeScriptArg::Resume.to_string()); + let res = Cmd::new(cmd!(shell, "forge script {script_path} --legacy {args...}")).run(); + if !res.resume_not_successful_because_has_not_began() { + return Ok(res?); + } + } + let res = Cmd::new(cmd!( + shell, + "forge script {script_path} --legacy {args_no_resume...}" + )) + .run(); + if res.proposal_error() { + return Ok(()); + } + Ok(res?) } pub fn wallet_args_passed(&self) -> bool { @@ -88,6 +107,13 @@ impl ForgeScript { self } + pub fn with_calldata(mut self, calldata: &Bytes) -> Self { + self.args.add_arg(ForgeScriptArg::Sig { + sig: hex::encode(calldata), + }); + self + } + /// Makes sure a transaction is sent, only after its previous one has been confirmed and succeeded. pub fn with_slow(mut self) -> Self { self.args.add_arg(ForgeScriptArg::Slow); @@ -209,6 +235,7 @@ pub enum ForgeScriptArg { url: String, }, Verify, + Resume, } /// ForgeScriptArgs is a set of arguments that can be passed to the forge script command. @@ -230,6 +257,8 @@ pub struct ForgeScriptArgs { /// Verifier API key #[clap(long)] pub verifier_api_key: Option, + #[clap(long)] + pub resume: bool, /// List of additional arguments that can be passed through the CLI. /// /// e.g.: `zk_inception init -a --private-key=` @@ -349,3 +378,35 @@ pub enum ForgeVerifier { Blockscout, Oklink, } + +// Trait for handling forge errors. Required for implementing method for CmdResult +trait ForgeErrorHandler { + // Resume doesn't work if the forge script has never been started on this chain before. + // So we want to catch it and try again without resume arg if it's the case + fn resume_not_successful_because_has_not_began(&self) -> bool; + // Catch the error if upgrade tx has already been processed. We do execute much of + // txs using upgrade mechanism and if this particular upgrade has already been processed we could assume + // it as a success + fn proposal_error(&self) -> bool; +} + +impl ForgeErrorHandler for CmdResult<()> { + fn resume_not_successful_because_has_not_began(&self) -> bool { + let text = "Deployment not found for chain"; + check_error(self, text) + } + + fn proposal_error(&self) -> bool { + let text = "revert: Operation with this proposal id already exists"; + check_error(self, text) + } +} + +fn check_error(cmd_result: &CmdResult<()>, error_text: &str) -> bool { + if let Err(cmd_error) = &cmd_result { + if let Some(stderr) = &cmd_error.stderr { + return stderr.contains(error_text); + } + } + false +} diff --git a/zk_toolbox/crates/common/src/git.rs b/zk_toolbox/crates/common/src/git.rs new file mode 100644 index 000000000000..7ebedf0f6283 --- /dev/null +++ b/zk_toolbox/crates/common/src/git.rs @@ -0,0 +1,31 @@ +use std::path::PathBuf; + +use xshell::{cmd, Shell}; + +use crate::cmd::Cmd; + +pub fn clone( + shell: &Shell, + path: PathBuf, + repository: &str, + name: &str, +) -> anyhow::Result { + let _dir = shell.push_dir(path); + Cmd::new(cmd!( + shell, + "git clone --recurse-submodules {repository} {name}" + )) + .run()?; + Ok(shell.current_dir().join(name)) +} + +pub fn submodule_update(shell: &Shell, link_to_code: PathBuf) -> anyhow::Result<()> { + let _dir_guard = shell.push_dir(link_to_code); + Cmd::new(cmd!( + shell, + "git submodule update --init --recursive +" + )) + .run()?; + Ok(()) +} diff --git a/zk_toolbox/crates/common/src/lib.rs b/zk_toolbox/crates/common/src/lib.rs index 349cd751c5f6..2ab5c5f10e13 100644 --- a/zk_toolbox/crates/common/src/lib.rs +++ b/zk_toolbox/crates/common/src/lib.rs @@ -1,3 +1,7 @@ +mod prerequisites; +mod prompt; +mod term; + pub mod cmd; pub mod config; pub mod db; @@ -5,13 +9,10 @@ pub mod docker; pub mod ethereum; pub mod files; pub mod forge; -mod prerequisites; -mod prompt; -mod slugify; -mod term; +pub mod git; +pub mod server; pub mod wallets; -pub use prerequisites::check_prerequisites; +pub use prerequisites::{check_general_prerequisites, check_prover_prequisites}; pub use prompt::{init_prompt_theme, Prompt, PromptConfirm, PromptSelect}; -pub use slugify::slugify; -pub use term::{logger, spinner}; +pub use term::{error, logger, spinner}; diff --git a/zk_toolbox/crates/common/src/prerequisites.rs b/zk_toolbox/crates/common/src/prerequisites.rs index ae21ba68b3c1..6c437302470d 100644 --- a/zk_toolbox/crates/common/src/prerequisites.rs +++ b/zk_toolbox/crates/common/src/prerequisites.rs @@ -30,21 +30,52 @@ const DOCKER_COMPOSE_PREREQUISITE: Prerequisite = Prerequisite { download_link: "https://docs.docker.com/compose/install/", }; +const PROVER_PREREQUISITES: [Prerequisite; 5] = [ + Prerequisite { + name: "gcloud", + download_link: "https://cloud.google.com/sdk/docs/install", + }, + Prerequisite { + name: "wget", + download_link: "https://www.gnu.org/software/wget/", + }, + Prerequisite { + name: "cmake", + download_link: "https://cmake.org/download/", + }, + Prerequisite { + name: "nvcc", + download_link: "https://developer.nvidia.com/cuda-downloads", + }, // CUDA toolkit + Prerequisite { + name: "nvidia-smi", + download_link: "https://developer.nvidia.com/cuda-downloads", + }, // CUDA GPU driver +]; + struct Prerequisite { name: &'static str, download_link: &'static str, } -pub fn check_prerequisites(shell: &Shell) { +pub fn check_general_prerequisites(shell: &Shell) { + check_prerequisites(shell, &PREREQUISITES, true); +} + +pub fn check_prover_prequisites(shell: &Shell) { + check_prerequisites(shell, &PROVER_PREREQUISITES, false); +} + +fn check_prerequisites(shell: &Shell, prerequisites: &[Prerequisite], check_compose: bool) { let mut missing_prerequisites = vec![]; - for prerequisite in &PREREQUISITES { + for prerequisite in prerequisites { if !check_prerequisite(shell, prerequisite.name) { missing_prerequisites.push(prerequisite); } } - if !check_docker_compose_prerequisite(shell) { + if check_compose && !check_docker_compose_prerequisite(shell) { missing_prerequisites.push(&DOCKER_COMPOSE_PREREQUISITE); } diff --git a/zk_toolbox/crates/common/src/server.rs b/zk_toolbox/crates/common/src/server.rs new file mode 100644 index 000000000000..c65c8d4c13e2 --- /dev/null +++ b/zk_toolbox/crates/common/src/server.rs @@ -0,0 +1,97 @@ +use std::{ffi::OsStr, path::PathBuf}; + +use xshell::{cmd, Shell}; + +use crate::cmd::Cmd; + +/// Allows to perform server operations. +#[derive(Debug)] +pub struct Server { + components: Option>, + code_path: PathBuf, +} + +/// Possible server modes. +#[derive(Debug)] +pub enum ServerMode { + Normal, + Genesis, +} + +impl Server { + /// Creates a new instance of the server. + pub fn new(components: Option>, code_path: PathBuf) -> Self { + Self { + components, + code_path, + } + } + + /// Runs the server. + #[allow(clippy::too_many_arguments)] + pub fn run

( + &self, + shell: &Shell, + server_mode: ServerMode, + genesis_path: P, + wallets_path: P, + general_path: P, + secrets_path: P, + contracts_path: P, + mut additional_args: Vec, + ) -> anyhow::Result<()> + where + P: AsRef, + { + let _dir_guard = shell.push_dir(&self.code_path); + + if let Some(components) = self.components() { + additional_args.push(format!("--components={}", components)) + } + if let ServerMode::Genesis = server_mode { + additional_args.push("--genesis".to_string()); + } + + let mut cmd = Cmd::new( + cmd!( + shell, + "cargo run --release --bin zksync_server -- + --genesis-path {genesis_path} + --wallets-path {wallets_path} + --config-path {general_path} + --secrets-path {secrets_path} + --contracts-config-path {contracts_path} + " + ) + .args(additional_args) + .env_remove("RUSTUP_TOOLCHAIN"), + ); + + // If we are running server in normal mode + // we need to get the output to the console + if let ServerMode::Normal = server_mode { + cmd = cmd.with_force_run(); + } + + cmd.run()?; + + Ok(()) + } + + /// Builds the server. + pub fn build(&self, shell: &Shell) -> anyhow::Result<()> { + let _dir_guard = shell.push_dir(&self.code_path); + Cmd::new(cmd!(shell, "cargo build --release --bin zksync_server")).run()?; + Ok(()) + } + + /// Returns the components as a comma-separated string. + fn components(&self) -> Option { + self.components.as_ref().and_then(|components| { + if components.is_empty() { + return None; + } + Some(components.join(",")) + }) + } +} diff --git a/zk_toolbox/crates/common/src/slugify.rs b/zk_toolbox/crates/common/src/slugify.rs deleted file mode 100644 index 5e9940efb8e2..000000000000 --- a/zk_toolbox/crates/common/src/slugify.rs +++ /dev/null @@ -1,3 +0,0 @@ -pub fn slugify(data: &str) -> String { - data.trim().replace(' ', "-") -} diff --git a/zk_toolbox/crates/common/src/term/error.rs b/zk_toolbox/crates/common/src/term/error.rs new file mode 100644 index 000000000000..462b4c4f8bb2 --- /dev/null +++ b/zk_toolbox/crates/common/src/term/error.rs @@ -0,0 +1,20 @@ +use crate::logger; + +pub fn log_error(error: anyhow::Error) { + logger::error(error.to_string()); + + if error.chain().count() > 1 { + logger::warn( + // "Caused by:", + error + .chain() + .skip(1) + .enumerate() + .map(|(i, cause)| format!(" {i}: {}", cause)) + .collect::>() + .join("\n"), + ); + } + + logger::outro("Failed to run command"); +} diff --git a/zk_toolbox/crates/common/src/term/mod.rs b/zk_toolbox/crates/common/src/term/mod.rs index a82083530671..9c4bbfca2486 100644 --- a/zk_toolbox/crates/common/src/term/mod.rs +++ b/zk_toolbox/crates/common/src/term/mod.rs @@ -1,2 +1,3 @@ +pub mod error; pub mod logger; pub mod spinner; diff --git a/zk_toolbox/crates/common/src/term/spinner.rs b/zk_toolbox/crates/common/src/term/spinner.rs index dcfaaf44d44d..b97ba075ac45 100644 --- a/zk_toolbox/crates/common/src/term/spinner.rs +++ b/zk_toolbox/crates/common/src/term/spinner.rs @@ -43,4 +43,9 @@ impl Spinner { self.time.elapsed().as_secs_f64() )); } + + /// Freeze the spinner with current message. + pub fn freeze(self) { + self.pb.stop(self.msg); + } } diff --git a/zk_toolbox/crates/config/Cargo.toml b/zk_toolbox/crates/config/Cargo.toml index a1fb10760b45..32cce24b3152 100644 --- a/zk_toolbox/crates/config/Cargo.toml +++ b/zk_toolbox/crates/config/Cargo.toml @@ -15,7 +15,6 @@ anyhow.workspace = true clap.workspace = true common.workspace = true ethers.workspace = true -path-absolutize.workspace = true rand.workspace = true serde.workspace = true serde_json.workspace = true @@ -25,3 +24,5 @@ thiserror.workspace = true types.workspace = true url.workspace = true xshell.workspace = true +zksync_config.workspace = true +zksync_protobuf_config.workspace = true diff --git a/zk_toolbox/crates/config/src/chain.rs b/zk_toolbox/crates/config/src/chain.rs index e685b0966b44..01dc1cae6434 100644 --- a/zk_toolbox/crates/config/src/chain.rs +++ b/zk_toolbox/crates/config/src/chain.rs @@ -8,14 +8,17 @@ use types::{ BaseToken, ChainId, L1BatchCommitDataGeneratorMode, L1Network, ProverMode, WalletCreation, }; use xshell::Shell; +use zksync_config::configs::GeneralConfig as ZkSyncGeneralConfig; +use zksync_protobuf_config::{decode_yaml_repr, encode_yaml_repr}; use crate::{ consts::{ - CONFIG_NAME, CONTRACTS_FILE, GENESIS_FILE, L1_CONTRACTS_FOUNDRY, SECRETS_FILE, WALLETS_FILE, + CONFIG_NAME, CONTRACTS_FILE, GENERAL_FILE, GENESIS_FILE, L1_CONTRACTS_FOUNDRY, + SECRETS_FILE, WALLETS_FILE, }, create_localhost_wallets, traits::{FileConfigWithDefaultName, ReadConfig, SaveConfig, SaveConfigWithBasePath}, - ContractsConfig, GenesisConfig, SecretsConfig, WalletsConfig, + ContractsConfig, GeneralConfig, GenesisConfig, SecretsConfig, WalletsConfig, }; /// Chain configuration file. This file is created in the chain @@ -30,6 +33,7 @@ pub struct ChainConfigInternal { pub prover_version: ProverMode, pub configs: PathBuf, pub rocks_db_path: PathBuf, + pub external_node_config_path: Option, pub l1_batch_commit_data_generator_mode: L1BatchCommitDataGeneratorMode, pub base_token: BaseToken, pub wallet_creation: WalletCreation, @@ -47,6 +51,7 @@ pub struct ChainConfig { pub link_to_code: PathBuf, pub rocks_db_path: PathBuf, pub configs: PathBuf, + pub external_node_config_path: Option, pub l1_batch_commit_data_generator_mode: L1BatchCommitDataGeneratorMode, pub base_token: BaseToken, pub wallet_creation: WalletCreation, @@ -71,6 +76,10 @@ impl ChainConfig { GenesisConfig::read(self.get_shell(), self.configs.join(GENESIS_FILE)) } + pub fn get_general_config(&self) -> anyhow::Result { + GeneralConfig::read(self.get_shell(), self.configs.join(GENERAL_FILE)) + } + pub fn get_wallets_config(&self) -> anyhow::Result { let path = self.configs.join(WALLETS_FILE); if let Ok(wallets) = WalletsConfig::read(self.get_shell(), &path) { @@ -91,6 +100,33 @@ impl ChainConfig { SecretsConfig::read(self.get_shell(), self.configs.join(SECRETS_FILE)) } + pub fn path_to_general_config(&self) -> PathBuf { + self.configs.join(GENERAL_FILE) + } + + pub fn path_to_secrets_config(&self) -> PathBuf { + self.configs.join(SECRETS_FILE) + } + + pub fn get_zksync_general_config(&self) -> anyhow::Result { + decode_yaml_repr::( + &self.configs.join(GENERAL_FILE), + false, + ) + } + + pub fn save_zksync_general_config( + &self, + general_config: &ZkSyncGeneralConfig, + ) -> anyhow::Result<()> { + let path = self.configs.join(GENERAL_FILE); + let bytes = encode_yaml_repr::( + general_config, + )?; + self.get_shell().write_file(path, bytes)?; + Ok(()) + } + pub fn path_to_foundry(&self) -> PathBuf { self.link_to_code.join(L1_CONTRACTS_FOUNDRY) } @@ -100,7 +136,7 @@ impl ChainConfig { config.save(shell, path) } - pub fn save_with_base_path(&self, shell: &Shell, path: impl AsRef) -> anyhow::Result<()> { + pub fn save_with_base_path(self, shell: &Shell, path: impl AsRef) -> anyhow::Result<()> { let config = self.get_internal(); config.save_with_base_path(shell, path) } @@ -113,6 +149,7 @@ impl ChainConfig { prover_version: self.prover_version, configs: self.configs.clone(), rocks_db_path: self.rocks_db_path.clone(), + external_node_config_path: self.external_node_config_path.clone(), l1_batch_commit_data_generator_mode: self.l1_batch_commit_data_generator_mode, base_token: self.base_token.clone(), wallet_creation: self.wallet_creation, diff --git a/zk_toolbox/crates/config/src/consts.rs b/zk_toolbox/crates/config/src/consts.rs index 9141d044af94..a00274fb35f3 100644 --- a/zk_toolbox/crates/config/src/consts.rs +++ b/zk_toolbox/crates/config/src/consts.rs @@ -11,6 +11,8 @@ pub(crate) const GENERAL_FILE: &str = "general.yaml"; /// Name of the genesis config file pub(crate) const GENESIS_FILE: &str = "genesis.yaml"; +// Name of external node specific config +pub(crate) const EN_CONFIG_FILE: &str = "external_node.yaml"; pub(crate) const ERC20_CONFIGS_FILE: &str = "erc20.yaml"; /// Name of the initial deployments config file pub(crate) const INITIAL_DEPLOYMENT_FILE: &str = "initial_deployments.yaml"; diff --git a/zk_toolbox/crates/config/src/contracts.rs b/zk_toolbox/crates/config/src/contracts.rs index b86b9b0f2958..a4c00a10a455 100644 --- a/zk_toolbox/crates/config/src/contracts.rs +++ b/zk_toolbox/crates/config/src/contracts.rs @@ -3,7 +3,11 @@ use serde::{Deserialize, Serialize}; use crate::{ consts::CONTRACTS_FILE, - forge_interface::deploy_ecosystem::output::DeployL1Output, + forge_interface::{ + deploy_ecosystem::output::DeployL1Output, + initialize_bridges::output::InitializeBridgeOutput, + register_chain::output::RegisterChainOutput, + }, traits::{FileConfig, FileConfigWithDefaultName}, }; @@ -64,6 +68,21 @@ impl ContractsConfig { .diamond_cut_data .clone_from(&deploy_l1_output.contracts_config.diamond_cut_data); } + + pub fn set_chain_contracts(&mut self, register_chain_output: &RegisterChainOutput) { + self.l1.diamond_proxy_addr = register_chain_output.diamond_proxy_addr; + self.l1.governance_addr = register_chain_output.governance_addr; + self.l1.chain_admin_addr = register_chain_output.chain_admin_addr; + } + + pub fn set_l2_shared_bridge( + &mut self, + initialize_bridges_output: &InitializeBridgeOutput, + ) -> anyhow::Result<()> { + self.bridges.shared.l2_address = Some(initialize_bridges_output.l2_shared_bridge_proxy); + self.bridges.erc20.l2_address = Some(initialize_bridges_output.l2_shared_bridge_proxy); + Ok(()) + } } impl FileConfigWithDefaultName for ContractsConfig { @@ -99,6 +118,8 @@ pub struct L1Contracts { pub default_upgrade_addr: Address, pub diamond_proxy_addr: Address, pub governance_addr: Address, + #[serde(default)] + pub chain_admin_addr: Address, pub multicall3_addr: Address, pub verifier_addr: Address, pub validator_timelock_addr: Address, diff --git a/zk_toolbox/crates/config/src/ecosystem.rs b/zk_toolbox/crates/config/src/ecosystem.rs index 1557ab21646f..a0121a2b25db 100644 --- a/zk_toolbox/crates/config/src/ecosystem.rs +++ b/zk_toolbox/crates/config/src/ecosystem.rs @@ -1,7 +1,10 @@ -use std::{cell::OnceCell, path::PathBuf}; +use std::{ + cell::OnceCell, + path::{Path, PathBuf}, +}; -use path_absolutize::Absolutize; -use serde::{Deserialize, Deserializer, Serialize, Serializer}; +use common::logger; +use serde::{Deserialize, Serialize, Serializer}; use thiserror::Error; use types::{ChainId, L1Network, ProverMode, WalletCreation}; use xshell::Shell; @@ -25,6 +28,7 @@ struct EcosystemConfigInternal { pub name: String, pub l1_network: L1Network, pub link_to_code: PathBuf, + pub bellman_cuda_dir: Option, pub chains: PathBuf, pub config: PathBuf, pub default_chain: String, @@ -40,6 +44,7 @@ pub struct EcosystemConfig { pub name: String, pub l1_network: L1Network, pub link_to_code: PathBuf, + pub bellman_cuda_dir: Option, pub chains: PathBuf, pub config: PathBuf, pub default_chain: String, @@ -58,20 +63,18 @@ impl Serialize for EcosystemConfig { } } -impl<'de> Deserialize<'de> for EcosystemConfig { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - let config: EcosystemConfigInternal = Deserialize::deserialize(deserializer)?; +impl ReadConfig for EcosystemConfig { + fn read(shell: &Shell, path: impl AsRef) -> anyhow::Result { + let config: EcosystemConfigInternal = EcosystemConfigInternal::read(shell, path)?; + + let bellman_cuda_dir = config + .bellman_cuda_dir + .map(|dir| shell.current_dir().join(dir)); Ok(EcosystemConfig { name: config.name.clone(), l1_network: config.l1_network, - link_to_code: config - .link_to_code - .absolutize() - .expect("Failed to parse zksync-era path") - .to_path_buf(), + link_to_code: shell.current_dir().join(config.link_to_code), + bellman_cuda_dir, chains: config.chains.clone(), config: config.config.clone(), default_chain: config.default_chain.clone(), @@ -93,16 +96,39 @@ impl EcosystemConfig { } pub fn from_file(shell: &Shell) -> Result { - let path = PathBuf::from(CONFIG_NAME); - if !shell.path_exists(path) { - return Err(EcosystemConfigFromFileError::NotExists); - } + let Ok(path) = find_file(shell, shell.current_dir(), CONFIG_NAME) else { + return Err(EcosystemConfigFromFileError::NotExists { + path: shell.current_dir(), + }); + }; - let mut config = EcosystemConfig::read(shell, CONFIG_NAME) - .map_err(|e| EcosystemConfigFromFileError::InvalidConfig { source: e })?; - config.shell = shell.clone().into(); + shell.change_dir(&path); - Ok(config) + let ecosystem = match EcosystemConfig::read(shell, CONFIG_NAME) { + Ok(mut config) => { + config.shell = shell.clone().into(); + config + } + Err(_) => { + // Try to deserialize with chain config, if it's successful, likely we are in the folder + // with chain and we will find the ecosystem config somewhere in parent directories + let chain_config = ChainConfigInternal::read(shell, CONFIG_NAME) + .map_err(|err| EcosystemConfigFromFileError::InvalidConfig { source: err })?; + logger::info(format!("You are in a directory with chain config, default chain for execution has changed to {}", &chain_config.name)); + + let current_dir = shell.current_dir(); + let Some(parent) = current_dir.parent() else { + return Err(EcosystemConfigFromFileError::NotExists { path }); + }; + // Try to find ecosystem somewhere in parent directories + shell.change_dir(parent); + let mut ecosystem_config = EcosystemConfig::from_file(shell)?; + // change the default chain for using it in later executions + ecosystem_config.default_chain = chain_config.name; + ecosystem_config + } + }; + Ok(ecosystem) } pub fn load_chain(&self, name: Option) -> Option { @@ -120,13 +146,10 @@ impl EcosystemConfig { chain_id: config.chain_id, prover_version: config.prover_version, configs: config.configs, + external_node_config_path: config.external_node_config_path, l1_batch_commit_data_generator_mode: config.l1_batch_commit_data_generator_mode, l1_network: self.l1_network, - link_to_code: self - .link_to_code - .absolutize() - .expect("Failed to parse zksync-era path") - .into(), + link_to_code: self.get_shell().current_dir().join(&self.link_to_code), base_token: config.base_token, rocks_db_path: config.rocks_db_path, wallet_creation: config.wallet_creation, @@ -193,14 +216,15 @@ impl EcosystemConfig { } fn get_internal(&self) -> EcosystemConfigInternal { + let bellman_cuda_dir = self + .bellman_cuda_dir + .clone() + .map(|dir| self.get_shell().current_dir().join(dir)); EcosystemConfigInternal { name: self.name.clone(), l1_network: self.l1_network, - link_to_code: self - .link_to_code - .absolutize() - .expect("Failed to parse zksync-era path") - .into(), + link_to_code: self.get_shell().current_dir().join(&self.link_to_code), + bellman_cuda_dir, chains: self.chains.clone(), config: self.config.clone(), default_chain: self.default_chain.clone(), @@ -214,8 +238,9 @@ impl EcosystemConfig { /// Result of checking if the ecosystem exists. #[derive(Error, Debug)] pub enum EcosystemConfigFromFileError { - #[error("Ecosystem configuration not found")] - NotExists, + #[error("Ecosystem configuration not found (Could not find 'ZkStack.toml' in {path:?}: Make sure you have created an ecosystem & are in the new folder `cd path/to/ecosystem/name`)" + )] + NotExists { path: PathBuf }, #[error("Invalid ecosystem configuration")] InvalidConfig { source: anyhow::Error }, } @@ -223,3 +248,17 @@ pub enum EcosystemConfigFromFileError { pub fn get_default_era_chain_id() -> ChainId { ERA_CHAIN_ID } + +// Find file in all parents repository and return necessary path or an empty error if nothing has been found +fn find_file(shell: &Shell, path_buf: PathBuf, file_name: &str) -> Result { + let _dir = shell.push_dir(path_buf); + if shell.path_exists(file_name) { + Ok(shell.current_dir()) + } else { + let current_dir = shell.current_dir(); + let Some(path) = current_dir.parent() else { + return Err(()); + }; + find_file(shell, path.to_path_buf(), file_name) + } +} diff --git a/zk_toolbox/crates/config/src/external_node.rs b/zk_toolbox/crates/config/src/external_node.rs new file mode 100644 index 000000000000..87acb15e4d8c --- /dev/null +++ b/zk_toolbox/crates/config/src/external_node.rs @@ -0,0 +1,23 @@ +use std::num::NonZeroUsize; + +use serde::{Deserialize, Serialize}; +use types::{ChainId, L1BatchCommitDataGeneratorMode}; + +use crate::{consts::EN_CONFIG_FILE, traits::FileConfigWithDefaultName}; + +#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] +pub struct ENConfig { + // Genesis + pub l2_chain_id: ChainId, + pub l1_chain_id: u32, + pub l1_batch_commit_data_generator_mode: L1BatchCommitDataGeneratorMode, + + // Main node configuration + pub main_node_url: String, + #[serde(skip_serializing_if = "Option::is_none")] + pub main_node_rate_limit_rps: Option, +} + +impl FileConfigWithDefaultName for ENConfig { + const FILE_NAME: &'static str = EN_CONFIG_FILE; +} diff --git a/zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/input.rs b/zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/input.rs index 0998d459ba5c..e0ad2ac70cdc 100644 --- a/zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/input.rs +++ b/zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/input.rs @@ -79,21 +79,21 @@ impl Default for Erc20DeploymentConfig { symbol: String::from("DAI"), decimals: 18, implementation: String::from("TestnetERC20Token.sol"), - mint: 10000000000, + mint: U256::from_str("9000000000000000000000").unwrap(), }, Erc20DeploymentTokensConfig { name: String::from("WBTC"), symbol: String::from("WBTC"), decimals: 8, implementation: String::from("TestnetERC20Token.sol"), - mint: 10000000000, + mint: U256::from_str("9000000000000000000000").unwrap(), }, Erc20DeploymentTokensConfig { name: String::from("Wrapped Ether"), symbol: String::from("WETH"), decimals: 18, implementation: String::from("WETH9.sol"), - mint: 0, + mint: U256::zero(), }, ], } @@ -106,7 +106,7 @@ pub struct Erc20DeploymentTokensConfig { pub symbol: String, pub decimals: u64, pub implementation: String, - pub mint: u64, + pub mint: U256, } #[derive(Debug, Deserialize, Serialize, Clone)] @@ -209,6 +209,7 @@ pub struct DeployErc20Config { pub create2_factory_salt: H256, pub create2_factory_addr: Address, pub tokens: HashMap, + pub additional_addresses_for_minting: Vec

, } impl FileConfig for DeployErc20Config {} @@ -217,6 +218,7 @@ impl DeployErc20Config { pub fn new( erc20_deployment_config: &Erc20DeploymentConfig, contracts_config: &ContractsConfig, + additional_addresses_for_minting: Vec
, ) -> Self { let mut tokens = HashMap::new(); for token in &erc20_deployment_config.tokens { @@ -235,6 +237,7 @@ impl DeployErc20Config { create2_factory_addr: contracts_config.create2_factory_addr, create2_factory_salt: contracts_config.create2_factory_salt, tokens, + additional_addresses_for_minting, } } } @@ -245,5 +248,5 @@ pub struct TokenDeployErc20Config { pub symbol: String, pub decimals: u64, pub implementation: String, - pub mint: u64, + pub mint: U256, } diff --git a/zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/output.rs b/zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/output.rs index 1200bf7eab0d..874414ccc1a4 100644 --- a/zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/output.rs +++ b/zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/output.rs @@ -1,6 +1,6 @@ use std::collections::HashMap; -use ethers::types::{Address, H256}; +use ethers::types::{Address, H256, U256}; use serde::{Deserialize, Serialize}; use crate::{ @@ -85,7 +85,7 @@ pub struct TokenDeployErc20Output { pub symbol: String, pub decimals: u64, pub implementation: String, - pub mint: u64, + pub mint: U256, } #[derive(Debug, Deserialize, Serialize, Clone)] diff --git a/zk_toolbox/crates/config/src/forge_interface/register_chain/output.rs b/zk_toolbox/crates/config/src/forge_interface/register_chain/output.rs index 7d105b578b5b..2f39b76c3933 100644 --- a/zk_toolbox/crates/config/src/forge_interface/register_chain/output.rs +++ b/zk_toolbox/crates/config/src/forge_interface/register_chain/output.rs @@ -7,6 +7,7 @@ use crate::traits::FileConfig; pub struct RegisterChainOutput { pub diamond_proxy_addr: Address, pub governance_addr: Address, + pub chain_admin_addr: Address, } impl FileConfig for RegisterChainOutput {} diff --git a/zk_toolbox/crates/config/src/general.rs b/zk_toolbox/crates/config/src/general.rs index 058f23bf1b5d..b97384f26f8c 100644 --- a/zk_toolbox/crates/config/src/general.rs +++ b/zk_toolbox/crates/config/src/general.rs @@ -1,17 +1,68 @@ use std::path::PathBuf; use serde::{Deserialize, Serialize}; +use url::Url; use crate::{consts::GENERAL_FILE, traits::FileConfigWithDefaultName}; +pub struct RocksDbs { + pub state_keeper: PathBuf, + pub merkle_tree: PathBuf, +} + #[derive(Debug, Deserialize, Serialize, Clone)] pub struct GeneralConfig { pub db: RocksDBConfig, pub eth: EthConfig, + pub api: ApiConfig, #[serde(flatten)] pub other: serde_json::Value, } +impl GeneralConfig { + pub fn set_rocks_db_config(&mut self, rocks_dbs: RocksDbs) -> anyhow::Result<()> { + self.db.state_keeper_db_path = rocks_dbs.state_keeper; + self.db.merkle_tree.path = rocks_dbs.merkle_tree; + Ok(()) + } + + pub fn ports_config(&self) -> PortsConfig { + PortsConfig { + web3_json_rpc_http_port: self.api.web3_json_rpc.http_port, + web3_json_rpc_ws_port: self.api.web3_json_rpc.ws_port, + healthcheck_port: self.api.healthcheck.port, + merkle_tree_port: self.api.merkle_tree.port, + prometheus_listener_port: self.api.prometheus.listener_port, + } + } + + pub fn update_ports(&mut self, ports_config: &PortsConfig) -> anyhow::Result<()> { + self.api.web3_json_rpc.http_port = ports_config.web3_json_rpc_http_port; + update_port_in_url( + &mut self.api.web3_json_rpc.http_url, + ports_config.web3_json_rpc_http_port, + )?; + self.api.web3_json_rpc.ws_port = ports_config.web3_json_rpc_ws_port; + update_port_in_url( + &mut self.api.web3_json_rpc.ws_url, + ports_config.web3_json_rpc_ws_port, + )?; + self.api.healthcheck.port = ports_config.healthcheck_port; + self.api.merkle_tree.port = ports_config.merkle_tree_port; + self.api.prometheus.listener_port = ports_config.prometheus_listener_port; + Ok(()) + } +} + +fn update_port_in_url(http_url: &mut String, port: u16) -> anyhow::Result<()> { + let mut http_url_url = Url::parse(http_url)?; + if let Err(()) = http_url_url.set_port(Some(port)) { + anyhow::bail!("Wrong url, setting port is impossible"); + } + *http_url = http_url_url.as_str().to_string(); + Ok(()) +} + impl FileConfigWithDefaultName for GeneralConfig { const FILE_NAME: &'static str = GENERAL_FILE; } @@ -45,3 +96,88 @@ pub struct EthSender { #[serde(flatten)] pub other: serde_json::Value, } + +#[derive(Debug, Deserialize, Serialize, Clone)] +pub struct ApiConfig { + /// Configuration options for the Web3 JSON RPC servers. + pub web3_json_rpc: Web3JsonRpcConfig, + /// Configuration options for the Prometheus exporter. + pub prometheus: PrometheusConfig, + /// Configuration options for the Health check. + pub healthcheck: HealthCheckConfig, + /// Configuration options for Merkle tree API. + pub merkle_tree: MerkleTreeApiConfig, + #[serde(flatten)] + pub other: serde_json::Value, +} + +#[derive(Debug, Deserialize, Serialize, Clone)] +pub struct Web3JsonRpcConfig { + /// Port to which the HTTP RPC server is listening. + pub http_port: u16, + /// URL to access HTTP RPC server. + pub http_url: String, + /// Port to which the WebSocket RPC server is listening. + pub ws_port: u16, + /// URL to access WebSocket RPC server. + pub ws_url: String, + /// Max possible limit of entities to be requested once. + pub req_entities_limit: Option, + #[serde(flatten)] + pub other: serde_json::Value, +} + +#[derive(Debug, Deserialize, Serialize, Clone)] +pub struct PrometheusConfig { + /// Port to which the Prometheus exporter server is listening. + pub listener_port: u16, + /// URL of the push gateway. + pub pushgateway_url: String, + /// Push interval in ms. + pub push_interval_ms: Option, + #[serde(flatten)] + pub other: serde_json::Value, +} + +#[derive(Debug, Deserialize, Serialize, Clone)] +pub struct HealthCheckConfig { + /// Port to which the REST server is listening. + pub port: u16, + /// Time limit in milliseconds to mark a health check as slow and log the corresponding warning. + /// If not specified, the default value in the health check crate will be used. + pub slow_time_limit_ms: Option, + /// Time limit in milliseconds to abort a health check and return "not ready" status for the corresponding component. + /// If not specified, the default value in the health check crate will be used. + pub hard_time_limit_ms: Option, + #[serde(flatten)] + pub other: serde_json::Value, +} + +/// Configuration for the Merkle tree API. +#[derive(Debug, Deserialize, Serialize, Clone)] +pub struct MerkleTreeApiConfig { + /// Port to bind the Merkle tree API server to. + pub port: u16, + #[serde(flatten)] + pub other: serde_json::Value, +} + +pub struct PortsConfig { + pub web3_json_rpc_http_port: u16, + pub web3_json_rpc_ws_port: u16, + pub healthcheck_port: u16, + pub merkle_tree_port: u16, + pub prometheus_listener_port: u16, +} + +impl PortsConfig { + pub fn next_empty_ports_config(&self) -> PortsConfig { + Self { + web3_json_rpc_http_port: self.web3_json_rpc_http_port + 100, + web3_json_rpc_ws_port: self.web3_json_rpc_ws_port + 100, + healthcheck_port: self.healthcheck_port + 100, + merkle_tree_port: self.merkle_tree_port + 100, + prometheus_listener_port: self.prometheus_listener_port + 100, + } + } +} diff --git a/zk_toolbox/crates/config/src/genesis.rs b/zk_toolbox/crates/config/src/genesis.rs index 4e3d931ea0f0..e666931870a8 100644 --- a/zk_toolbox/crates/config/src/genesis.rs +++ b/zk_toolbox/crates/config/src/genesis.rs @@ -2,7 +2,7 @@ use ethers::types::{Address, H256}; use serde::{Deserialize, Serialize}; use types::{ChainId, L1BatchCommitDataGeneratorMode, ProtocolSemanticVersion}; -use crate::{consts::GENESIS_FILE, traits::FileConfigWithDefaultName}; +use crate::{consts::GENESIS_FILE, traits::FileConfigWithDefaultName, ChainConfig}; #[derive(Debug, Deserialize, Serialize, Clone)] pub struct GenesisConfig { @@ -21,6 +21,14 @@ pub struct GenesisConfig { pub other: serde_json::Value, } +impl GenesisConfig { + pub fn update_from_chain_config(&mut self, config: &ChainConfig) { + self.l2_chain_id = config.chain_id; + self.l1_chain_id = config.l1_network.chain_id(); + self.l1_batch_commit_data_generator_mode = Some(config.l1_batch_commit_data_generator_mode); + } +} + impl FileConfigWithDefaultName for GenesisConfig { const FILE_NAME: &'static str = GENESIS_FILE; } diff --git a/zk_toolbox/crates/config/src/lib.rs b/zk_toolbox/crates/config/src/lib.rs index 8e40da7bf6bd..a80a2b6fe5de 100644 --- a/zk_toolbox/crates/config/src/lib.rs +++ b/zk_toolbox/crates/config/src/lib.rs @@ -1,3 +1,15 @@ +pub use chain::*; +pub use consts::{DOCKER_COMPOSE_FILE, ZKSYNC_ERA_GIT_REPO}; +pub use contracts::*; +pub use ecosystem::*; +pub use file_config::*; +pub use general::*; +pub use genesis::*; +pub use manipulations::*; +pub use secrets::*; +pub use wallet_creation::*; +pub use wallets::*; + mod chain; mod consts; mod contracts; @@ -10,17 +22,6 @@ mod secrets; mod wallet_creation; mod wallets; +pub mod external_node; pub mod forge_interface; pub mod traits; - -pub use chain::*; -pub use consts::{DOCKER_COMPOSE_FILE, ZKSYNC_ERA_GIT_REPO}; -pub use contracts::*; -pub use ecosystem::*; -pub use file_config::*; -pub use general::*; -pub use genesis::*; -pub use manipulations::*; -pub use secrets::*; -pub use wallet_creation::*; -pub use wallets::*; diff --git a/zk_toolbox/crates/config/src/secrets.rs b/zk_toolbox/crates/config/src/secrets.rs index ebacc5d437cb..98a9be6ffe61 100644 --- a/zk_toolbox/crates/config/src/secrets.rs +++ b/zk_toolbox/crates/config/src/secrets.rs @@ -1,3 +1,4 @@ +use common::db::DatabaseConfig; use serde::{Deserialize, Serialize}; use url::Url; @@ -6,7 +7,8 @@ use crate::{consts::SECRETS_FILE, traits::FileConfigWithDefaultName}; #[derive(Debug, Clone, Serialize, Deserialize)] pub struct DatabaseSecrets { pub server_url: Url, - pub prover_url: Url, + #[serde(skip_serializing_if = "Option::is_none")] + pub prover_url: Option, #[serde(flatten)] pub other: serde_json::Value, } @@ -26,6 +28,21 @@ pub struct SecretsConfig { pub other: serde_json::Value, } +impl SecretsConfig { + pub fn set_databases( + &mut self, + server_db_config: &DatabaseConfig, + prover_db_config: &DatabaseConfig, + ) { + self.database.server_url = server_db_config.full_url(); + self.database.prover_url = Some(prover_db_config.full_url()); + } + + pub fn set_l1_rpc_url(&mut self, l1_rpc_url: String) { + self.l1.l1_rpc_url = l1_rpc_url; + } +} + impl FileConfigWithDefaultName for SecretsConfig { const FILE_NAME: &'static str = SECRETS_FILE; } diff --git a/zk_toolbox/crates/config/src/traits.rs b/zk_toolbox/crates/config/src/traits.rs index 85c73e99f99b..772c5d964dab 100644 --- a/zk_toolbox/crates/config/src/traits.rs +++ b/zk_toolbox/crates/config/src/traits.rs @@ -18,16 +18,24 @@ pub trait FileConfigWithDefaultName { } impl FileConfig for T where T: FileConfigWithDefaultName {} -impl ReadConfig for T where T: FileConfig + Clone + DeserializeOwned {} + impl SaveConfig for T where T: FileConfig + Serialize {} + impl SaveConfigWithComment for T where T: FileConfig + Serialize {} + impl ReadConfigWithBasePath for T where T: FileConfigWithDefaultName + Clone + DeserializeOwned {} + impl SaveConfigWithBasePath for T where T: FileConfigWithDefaultName + Serialize {} + impl SaveConfigWithCommentAndBasePath for T where T: FileConfigWithDefaultName + Serialize {} +pub trait ReadConfig: Sized { + fn read(shell: &Shell, path: impl AsRef) -> anyhow::Result; +} + /// Reads a config file from a given path, correctly parsing file extension. /// Supported file extensions are: `yaml`, `yml`, `toml`, `json`. -pub trait ReadConfig: DeserializeOwned + Clone { +impl ReadConfig for T { fn read(shell: &Shell, path: impl AsRef) -> anyhow::Result { let error_context = || format!("Failed to parse config file {:?}.", path.as_ref()); diff --git a/zk_toolbox/crates/zk_inception/Cargo.toml b/zk_toolbox/crates/zk_inception/Cargo.toml index ff22e982e3cc..3a8b57e162f6 100644 --- a/zk_toolbox/crates/zk_inception/Cargo.toml +++ b/zk_toolbox/crates/zk_inception/Cargo.toml @@ -31,3 +31,5 @@ strum.workspace = true toml.workspace = true url.workspace = true thiserror.workspace = true +zksync_config.workspace = true +slugify-rs.workspace = true diff --git a/zk_toolbox/crates/zk_inception/src/accept_ownership.rs b/zk_toolbox/crates/zk_inception/src/accept_ownership.rs index 830da513d4f0..567506aef670 100644 --- a/zk_toolbox/crates/zk_inception/src/accept_ownership.rs +++ b/zk_toolbox/crates/zk_inception/src/accept_ownership.rs @@ -2,30 +2,48 @@ use common::{ forge::{Forge, ForgeScript, ForgeScriptArgs}, spinner::Spinner, }; -use config::{ - forge_interface::{ - accept_ownership::AcceptOwnershipInput, script_params::ACCEPT_GOVERNANCE_SCRIPT_PARAMS, - }, - traits::SaveConfig, - EcosystemConfig, +use config::{forge_interface::script_params::ACCEPT_GOVERNANCE_SCRIPT_PARAMS, EcosystemConfig}; +use ethers::{ + abi::parse_abi, + contract::BaseContract, + types::{Address, H256}, }; -use ethers::types::{Address, H256}; +use lazy_static::lazy_static; use xshell::Shell; use crate::{ - forge_utils::{check_the_balance, fill_forge_private_key}, messages::MSG_ACCEPTING_GOVERNANCE_SPINNER, + utils::forge::{check_the_balance, fill_forge_private_key}, }; +lazy_static! { + static ref ACCEPT_ADMIN: BaseContract = BaseContract::from( + parse_abi(&[ + "function acceptOwner(address governor, address target) public", + "function acceptAdmin(address admin, address target) public" + ]) + .unwrap(), + ); +} + pub async fn accept_admin( shell: &Shell, ecosystem_config: &EcosystemConfig, - governor_contract: Address, + admin: Address, governor: Option, target_address: Address, forge_args: &ForgeScriptArgs, l1_rpc_url: String, ) -> anyhow::Result<()> { + // Resume for accept admin doesn't work properly. Foundry assumes that if signature of the function is the same, + // than it's the same call, but because we are calling this function multiple times during the init process, + // code assumes that doing only once is enough, but actually we need to accept admin multiple times + let mut forge_args = forge_args.clone(); + forge_args.resume = false; + + let calldata = ACCEPT_ADMIN + .encode("acceptAdmin", (admin, target_address)) + .unwrap(); let foundry_contracts_path = ecosystem_config.path_to_foundry(); let forge = Forge::new(&foundry_contracts_path) .script( @@ -35,16 +53,8 @@ pub async fn accept_admin( .with_ffi() .with_rpc_url(l1_rpc_url) .with_broadcast() - .with_signature("acceptAdmin()"); - accept_ownership( - shell, - ecosystem_config, - governor_contract, - governor, - target_address, - forge, - ) - .await + .with_calldata(&calldata); + accept_ownership(shell, governor, forge).await } pub async fn accept_owner( @@ -56,6 +66,13 @@ pub async fn accept_owner( forge_args: &ForgeScriptArgs, l1_rpc_url: String, ) -> anyhow::Result<()> { + // resume doesn't properly work here. + let mut forge_args = forge_args.clone(); + forge_args.resume = false; + + let calldata = ACCEPT_ADMIN + .encode("acceptOwner", (governor_contract, target_address)) + .unwrap(); let foundry_contracts_path = ecosystem_config.path_to_foundry(); let forge = Forge::new(&foundry_contracts_path) .script( @@ -65,37 +82,16 @@ pub async fn accept_owner( .with_ffi() .with_rpc_url(l1_rpc_url) .with_broadcast() - .with_signature("acceptOwner()"); - accept_ownership( - shell, - ecosystem_config, - governor_contract, - governor, - target_address, - forge, - ) - .await + .with_calldata(&calldata); + accept_ownership(shell, governor, forge).await } async fn accept_ownership( shell: &Shell, - ecosystem_config: &EcosystemConfig, - governor_contract: Address, governor: Option, - target_address: Address, mut forge: ForgeScript, ) -> anyhow::Result<()> { - let input = AcceptOwnershipInput { - target_addr: target_address, - governor: governor_contract, - }; - input.save( - shell, - ACCEPT_GOVERNANCE_SCRIPT_PARAMS.input(&ecosystem_config.link_to_code), - )?; - forge = fill_forge_private_key(forge, governor)?; - check_the_balance(&forge).await?; let spinner = Spinner::new(MSG_ACCEPTING_GOVERNANCE_SPINNER); forge.run(shell)?; diff --git a/zk_toolbox/crates/zk_inception/src/commands/args/mod.rs b/zk_toolbox/crates/zk_inception/src/commands/args/mod.rs index bf1457ba92c6..7b21015691b9 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/args/mod.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/args/mod.rs @@ -1,3 +1,3 @@ -mod run_server; - pub use run_server::*; + +mod run_server; diff --git a/zk_toolbox/crates/zk_inception/src/commands/args/run_server.rs b/zk_toolbox/crates/zk_inception/src/commands/args/run_server.rs index 1ec211c25f6d..1e373319ec73 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/args/run_server.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/args/run_server.rs @@ -2,7 +2,8 @@ use clap::Parser; use serde::{Deserialize, Serialize}; use crate::messages::{ - MSG_SERVER_ADDITIONAL_ARGS_HELP, MSG_SERVER_COMPONENTS_HELP, MSG_SERVER_GENESIS_HELP, + MSG_SERVER_ADDITIONAL_ARGS_HELP, MSG_SERVER_BUILD_HELP, MSG_SERVER_COMPONENTS_HELP, + MSG_SERVER_GENESIS_HELP, }; #[derive(Debug, Serialize, Deserialize, Parser)] @@ -14,4 +15,6 @@ pub struct RunServerArgs { #[clap(long, short)] #[arg(trailing_var_arg = true, allow_hyphen_values = true, hide = false, help = MSG_SERVER_ADDITIONAL_ARGS_HELP)] additional_args: Vec, + #[clap(long, help = MSG_SERVER_BUILD_HELP)] + pub build: bool, } diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/args/create.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/args/create.rs index 986482df80b2..97a3de69c738 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/args/create.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/args/create.rs @@ -1,8 +1,9 @@ use std::{path::PathBuf, str::FromStr}; use clap::Parser; -use common::{slugify, Prompt, PromptConfirm, PromptSelect}; +use common::{Prompt, PromptConfirm, PromptSelect}; use serde::{Deserialize, Serialize}; +use slugify_rs::slugify; use strum::IntoEnumIterator; use strum_macros::{Display, EnumIter}; use types::{BaseToken, L1BatchCommitDataGeneratorMode, L1Network, ProverMode, WalletCreation}; @@ -26,7 +27,7 @@ use crate::{ pub struct ChainCreateArgs { #[arg(long)] pub chain_name: Option, - #[arg(value_parser = clap::value_parser!(u32).range(1..))] + #[arg(value_parser = clap::value_parser ! (u32).range(1..))] pub chain_id: Option, #[clap(long, help = MSG_PROVER_MODE_HELP, value_enum)] pub prover_mode: Option, @@ -55,7 +56,7 @@ impl ChainCreateArgs { let mut chain_name = self .chain_name .unwrap_or_else(|| Prompt::new(MSG_CHAIN_NAME_PROMPT).ask()); - chain_name = slugify(&chain_name); + chain_name = slugify!(&chain_name, separator = "_"); let chain_id = self.chain_id.unwrap_or_else(|| { Prompt::new(MSG_CHAIN_ID_PROMPT) diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/args/genesis.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/args/genesis.rs index d835b1eb36a6..483b78e9b267 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/args/genesis.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/args/genesis.rs @@ -1,15 +1,16 @@ use clap::Parser; -use common::{db::DatabaseConfig, slugify, Prompt}; +use common::{db::DatabaseConfig, Prompt}; use config::ChainConfig; use serde::{Deserialize, Serialize}; +use slugify_rs::slugify; use url::Url; use crate::{ defaults::{generate_db_names, DBNames, DATABASE_PROVER_URL, DATABASE_SERVER_URL}, messages::{ msg_prover_db_name_prompt, msg_prover_db_url_prompt, msg_server_db_name_prompt, - msg_server_db_url_prompt, MSG_GENESIS_USE_DEFAULT_HELP, MSG_PROVER_DB_NAME_HELP, - MSG_PROVER_DB_URL_HELP, MSG_SERVER_DB_NAME_HELP, MSG_SERVER_DB_URL_HELP, + msg_server_db_url_prompt, MSG_PROVER_DB_NAME_HELP, MSG_PROVER_DB_URL_HELP, + MSG_SERVER_DB_NAME_HELP, MSG_SERVER_DB_URL_HELP, MSG_USE_DEFAULT_DATABASES_HELP, }, }; @@ -23,7 +24,7 @@ pub struct GenesisArgs { pub prover_db_url: Option, #[clap(long, help = MSG_PROVER_DB_NAME_HELP)] pub prover_db_name: Option, - #[clap(long, short, help = MSG_GENESIS_USE_DEFAULT_HELP)] + #[clap(long, short, help = MSG_USE_DEFAULT_DATABASES_HELP)] pub use_default: bool, #[clap(long, short, action)] pub dont_drop: bool, @@ -48,21 +49,27 @@ impl GenesisArgs { .default(DATABASE_SERVER_URL.as_str()) .ask() }); - let server_db_name = slugify(&self.server_db_name.unwrap_or_else(|| { - Prompt::new(&msg_server_db_name_prompt(&chain_name)) - .default(&server_name) - .ask() - })); + let server_db_name = slugify!( + &self.server_db_name.unwrap_or_else(|| { + Prompt::new(&msg_server_db_name_prompt(&chain_name)) + .default(&server_name) + .ask() + }), + separator = "_" + ); let prover_db_url = self.prover_db_url.unwrap_or_else(|| { Prompt::new(&msg_prover_db_url_prompt(&chain_name)) .default(DATABASE_PROVER_URL.as_str()) .ask() }); - let prover_db_name = slugify(&self.prover_db_name.unwrap_or_else(|| { - Prompt::new(&msg_prover_db_name_prompt(&chain_name)) - .default(&prover_name) - .ask() - })); + let prover_db_name = slugify!( + &self.prover_db_name.unwrap_or_else(|| { + Prompt::new(&msg_prover_db_name_prompt(&chain_name)) + .default(&prover_name) + .ask() + }), + separator = "_" + ); GenesisArgsFinal { server_db: DatabaseConfig::new(server_db_url, server_db_name), prover_db: DatabaseConfig::new(prover_db_url, prover_db_name), diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/create.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/create.rs index f915a3b8d6f6..dc8f408db3b3 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/create.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/create.rs @@ -68,6 +68,7 @@ pub(crate) fn create_chain_inner( link_to_code: ecosystem_config.link_to_code.clone(), rocks_db_path: ecosystem_config.get_chain_rocks_db_path(&default_chain_name), configs: chain_configs_path.clone(), + external_node_config_path: None, l1_batch_commit_data_generator_mode: args.l1_batch_commit_data_generator_mode, base_token: args.base_token, wallet_creation: args.wallet_creation, diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/deploy_paymaster.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/deploy_paymaster.rs index fe8dcdc562b2..4f82a92c2edc 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/deploy_paymaster.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/deploy_paymaster.rs @@ -9,15 +9,14 @@ use config::{ paymaster::{DeployPaymasterInput, DeployPaymasterOutput}, script_params::DEPLOY_PAYMASTER_SCRIPT_PARAMS, }, - traits::{ReadConfig, SaveConfig}, - ChainConfig, EcosystemConfig, + traits::{ReadConfig, SaveConfig, SaveConfigWithBasePath}, + ChainConfig, ContractsConfig, EcosystemConfig, }; use xshell::Shell; use crate::{ - config_manipulations::update_paymaster, - forge_utils::{check_the_balance, fill_forge_private_key}, messages::{MSG_CHAIN_NOT_INITIALIZED, MSG_DEPLOYING_PAYMASTER}, + utils::forge::{check_the_balance, fill_forge_private_key}, }; pub async fn run(args: ForgeScriptArgs, shell: &Shell) -> anyhow::Result<()> { @@ -26,12 +25,15 @@ pub async fn run(args: ForgeScriptArgs, shell: &Shell) -> anyhow::Result<()> { let chain_config = ecosystem_config .load_chain(chain_name) .context(MSG_CHAIN_NOT_INITIALIZED)?; - deploy_paymaster(shell, &chain_config, args).await + let mut contracts = chain_config.get_contracts_config()?; + deploy_paymaster(shell, &chain_config, &mut contracts, args).await?; + contracts.save_with_base_path(shell, chain_config.configs) } pub async fn deploy_paymaster( shell: &Shell, chain_config: &ChainConfig, + contracts_config: &mut ContractsConfig, forge_args: ForgeScriptArgs, ) -> anyhow::Result<()> { let input = DeployPaymasterInput::new(chain_config)?; @@ -63,6 +65,6 @@ pub async fn deploy_paymaster( DEPLOY_PAYMASTER_SCRIPT_PARAMS.output(&chain_config.link_to_code), )?; - update_paymaster(shell, chain_config, &output)?; + contracts_config.l2.testnet_paymaster_addr = output.paymaster; Ok(()) } diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/genesis.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/genesis.rs index 8c4edc88290d..b42a1138229d 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/genesis.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/genesis.rs @@ -5,28 +5,32 @@ use common::{ config::global_config, db::{drop_db_if_exists, init_db, migrate_db, DatabaseConfig}, logger, + server::{Server, ServerMode}, spinner::Spinner, }; -use config::{ChainConfig, EcosystemConfig}; +use config::{ + traits::{FileConfigWithDefaultName, SaveConfigWithBasePath}, + ChainConfig, ContractsConfig, EcosystemConfig, GeneralConfig, GenesisConfig, SecretsConfig, + WalletsConfig, +}; +use types::ProverMode; use xshell::Shell; use super::args::genesis::GenesisArgsFinal; use crate::{ commands::chain::args::genesis::GenesisArgs, - config_manipulations::{update_database_secrets, update_general_config}, + consts::{PROVER_MIGRATIONS, SERVER_MIGRATIONS}, messages::{ MSG_CHAIN_NOT_INITIALIZED, MSG_FAILED_TO_DROP_PROVER_DATABASE_ERR, - MSG_FAILED_TO_DROP_SERVER_DATABASE_ERR, MSG_GENESIS_COMPLETED, - MSG_INITIALIZING_DATABASES_SPINNER, MSG_INITIALIZING_PROVER_DATABASE, - MSG_INITIALIZING_SERVER_DATABASE, MSG_SELECTED_CONFIG, MSG_STARTING_GENESIS, + MSG_FAILED_TO_DROP_SERVER_DATABASE_ERR, MSG_FAILED_TO_RUN_SERVER_ERR, + MSG_GENESIS_COMPLETED, MSG_INITIALIZING_DATABASES_SPINNER, + MSG_INITIALIZING_PROVER_DATABASE, MSG_INITIALIZING_SERVER_DATABASE, + MSG_RECREATE_ROCKS_DB_ERRROR, MSG_SELECTED_CONFIG, MSG_STARTING_GENESIS, MSG_STARTING_GENESIS_SPINNER, }, - server::{RunServer, ServerMode}, + utils::rocks_db::{recreate_rocksdb_dirs, RocksDBDirOption}, }; -const SERVER_MIGRATIONS: &str = "core/lib/dal/migrations"; -const PROVER_MIGRATIONS: &str = "prover/prover_dal/migrations"; - pub async fn run(args: GenesisArgs, shell: &Shell) -> anyhow::Result<()> { let chain_name = global_config().chain_name.clone(); let ecosystem_config = EcosystemConfig::from_file(shell)?; @@ -46,12 +50,20 @@ pub async fn genesis( shell: &Shell, config: &ChainConfig, ) -> anyhow::Result<()> { - // Clean the rocksdb - shell.remove_path(&config.rocks_db_path)?; shell.create_dir(&config.rocks_db_path)?; - update_general_config(shell, config)?; - update_database_secrets(shell, config, &args.server_db, &args.prover_db)?; + let rocks_db = recreate_rocksdb_dirs(shell, &config.rocks_db_path, RocksDBDirOption::Main) + .context(MSG_RECREATE_ROCKS_DB_ERRROR)?; + let mut general = config.get_general_config()?; + general.set_rocks_db_config(rocks_db)?; + if config.prover_version != ProverMode::NoProofs { + general.eth.sender.proof_sending_mode = "ONLY_REAL_PROOFS".to_string(); + } + general.save_with_base_path(shell, &config.configs)?; + + let mut secrets = config.get_secrets_config()?; + secrets.set_databases(&args.server_db, &args.prover_db); + secrets.save_with_base_path(shell, &config.configs)?; logger::note( MSG_SELECTED_CONFIG, @@ -127,6 +139,17 @@ async fn initialize_databases( } fn run_server_genesis(chain_config: &ChainConfig, shell: &Shell) -> anyhow::Result<()> { - let server = RunServer::new(None, chain_config); - server.run(shell, ServerMode::Genesis) + let server = Server::new(None, chain_config.link_to_code.clone()); + server + .run( + shell, + ServerMode::Genesis, + GenesisConfig::get_path_with_base_path(&chain_config.configs), + WalletsConfig::get_path_with_base_path(&chain_config.configs), + GeneralConfig::get_path_with_base_path(&chain_config.configs), + SecretsConfig::get_path_with_base_path(&chain_config.configs), + ContractsConfig::get_path_with_base_path(&chain_config.configs), + vec![], + ) + .context(MSG_FAILED_TO_RUN_SERVER_ERR) } diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/init.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/init.rs index 0c9ac8743eee..640f4a492869 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/init.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/init.rs @@ -2,7 +2,7 @@ use anyhow::Context; use common::{ config::global_config, forge::{Forge, ForgeScriptArgs}, - logger, + git, logger, spinner::Spinner, }; use config::{ @@ -11,24 +11,25 @@ use config::{ register_chain::{input::RegisterChainL1Config, output::RegisterChainOutput}, script_params::REGISTER_CHAIN_SCRIPT_PARAMS, }, - traits::{ReadConfig, ReadConfigWithBasePath, SaveConfig, SaveConfigWithBasePath}, + traits::{ReadConfig, SaveConfig, SaveConfigWithBasePath}, ChainConfig, ContractsConfig, EcosystemConfig, }; use xshell::Shell; -use super::args::init::InitArgsFinal; use crate::{ accept_ownership::accept_admin, commands::chain::{ - args::init::InitArgs, deploy_paymaster, genesis::genesis, initialize_bridges, + args::init::{InitArgs, InitArgsFinal}, + deploy_paymaster, + genesis::genesis, + initialize_bridges, }, - config_manipulations::{update_genesis, update_l1_contracts, update_l1_rpc_url_secret}, - forge_utils::{check_the_balance, fill_forge_private_key}, messages::{ msg_initializing_chain, MSG_ACCEPTING_ADMIN_SPINNER, MSG_CHAIN_INITIALIZED, - MSG_CHAIN_NOT_FOUND_ERR, MSG_CONTRACTS_CONFIG_NOT_FOUND_ERR, MSG_GENESIS_DATABASE_ERR, - MSG_REGISTERING_CHAIN_SPINNER, MSG_SELECTED_CONFIG, + MSG_CHAIN_NOT_FOUND_ERR, MSG_GENESIS_DATABASE_ERR, MSG_REGISTERING_CHAIN_SPINNER, + MSG_SELECTED_CONFIG, }, + utils::forge::{check_the_balance, fill_forge_private_key}, }; pub(crate) async fn run(args: InitArgs, shell: &Shell) -> anyhow::Result<()> { @@ -41,6 +42,7 @@ pub(crate) async fn run(args: InitArgs, shell: &Shell) -> anyhow::Result<()> { logger::note(MSG_SELECTED_CONFIG, logger::object_to_string(&chain_config)); logger::info(msg_initializing_chain("")); + git::submodule_update(shell, config.link_to_code.clone())?; init(&mut args, shell, &config, &chain_config).await?; @@ -56,29 +58,42 @@ pub async fn init( ) -> anyhow::Result<()> { copy_configs(shell, &ecosystem_config.link_to_code, &chain_config.configs)?; - update_genesis(shell, chain_config)?; - update_l1_rpc_url_secret(shell, chain_config, init_args.l1_rpc_url.clone())?; - let mut contracts_config = - ContractsConfig::read_with_base_path(shell, &ecosystem_config.config)?; - contracts_config.l1.base_token_addr = chain_config.base_token.address; + let mut genesis_config = chain_config.get_genesis_config()?; + genesis_config.update_from_chain_config(chain_config); + genesis_config.save_with_base_path(shell, &chain_config.configs)?; + // Copy ecosystem contracts + let mut contracts_config = ecosystem_config.get_contracts_config()?; + contracts_config.l1.base_token_addr = chain_config.base_token.address; contracts_config.save_with_base_path(shell, &chain_config.configs)?; + crate::commands::ecosystem::init::distribute_eth( + ecosystem_config, + chain_config, + init_args.l1_rpc_url.clone(), + ) + .await?; + let mut secrets = chain_config.get_secrets_config()?; + secrets.set_l1_rpc_url(init_args.l1_rpc_url.clone()); + secrets.save_with_base_path(shell, &chain_config.configs)?; + let spinner = Spinner::new(MSG_REGISTERING_CHAIN_SPINNER); - contracts_config = register_chain( + register_chain( shell, init_args.forge_args.clone(), ecosystem_config, chain_config, + &mut contracts_config, init_args.l1_rpc_url.clone(), ) .await?; + contracts_config.save_with_base_path(shell, &chain_config.configs)?; spinner.finish(); let spinner = Spinner::new(MSG_ACCEPTING_ADMIN_SPINNER); accept_admin( shell, ecosystem_config, - contracts_config.l1.governance_addr, + contracts_config.l1.chain_admin_addr, chain_config.get_wallets_config()?.governor_private_key(), contracts_config.l1.diamond_proxy_addr, &init_args.forge_args.clone(), @@ -91,13 +106,21 @@ pub async fn init( shell, chain_config, ecosystem_config, + &mut contracts_config, init_args.forge_args.clone(), ) .await?; + contracts_config.save_with_base_path(shell, &chain_config.configs)?; if init_args.deploy_paymaster { - deploy_paymaster::deploy_paymaster(shell, chain_config, init_args.forge_args.clone()) - .await?; + deploy_paymaster::deploy_paymaster( + shell, + chain_config, + &mut contracts_config, + init_args.forge_args.clone(), + ) + .await?; + contracts_config.save_with_base_path(shell, &chain_config.configs)?; } genesis(init_args.genesis_args.clone(), shell, chain_config) @@ -112,14 +135,12 @@ async fn register_chain( forge_args: ForgeScriptArgs, config: &EcosystemConfig, chain_config: &ChainConfig, + contracts: &mut ContractsConfig, l1_rpc_url: String, -) -> anyhow::Result { +) -> anyhow::Result<()> { let deploy_config_path = REGISTER_CHAIN_SCRIPT_PARAMS.input(&config.link_to_code); - let contracts = config - .get_contracts_config() - .context(MSG_CONTRACTS_CONFIG_NOT_FOUND_ERR)?; - let deploy_config = RegisterChainL1Config::new(chain_config, &contracts)?; + let deploy_config = RegisterChainL1Config::new(chain_config, contracts)?; deploy_config.save(shell, deploy_config_path)?; let mut forge = Forge::new(&config.path_to_foundry()) @@ -136,5 +157,6 @@ async fn register_chain( shell, REGISTER_CHAIN_SCRIPT_PARAMS.output(&chain_config.link_to_code), )?; - update_l1_contracts(shell, chain_config, ®ister_chain_output) + contracts.set_chain_contracts(®ister_chain_output); + Ok(()) } diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/initialize_bridges.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/initialize_bridges.rs index 4a81a2b26f1b..e81971eba7cb 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/initialize_bridges.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/initialize_bridges.rs @@ -12,15 +12,14 @@ use config::{ initialize_bridges::{input::InitializeBridgeInput, output::InitializeBridgeOutput}, script_params::INITIALIZE_BRIDGES_SCRIPT_PARAMS, }, - traits::{ReadConfig, SaveConfig}, - ChainConfig, EcosystemConfig, + traits::{ReadConfig, SaveConfig, SaveConfigWithBasePath}, + ChainConfig, ContractsConfig, EcosystemConfig, }; use xshell::{cmd, Shell}; use crate::{ - config_manipulations::update_l2_shared_bridge, - forge_utils::{check_the_balance, fill_forge_private_key}, messages::{MSG_CHAIN_NOT_INITIALIZED, MSG_INITIALIZING_BRIDGES_SPINNER}, + utils::forge::{check_the_balance, fill_forge_private_key}, }; pub async fn run(args: ForgeScriptArgs, shell: &Shell) -> anyhow::Result<()> { @@ -30,8 +29,17 @@ pub async fn run(args: ForgeScriptArgs, shell: &Shell) -> anyhow::Result<()> { .load_chain(chain_name) .context(MSG_CHAIN_NOT_INITIALIZED)?; + let mut contracts = chain_config.get_contracts_config()?; let spinner = Spinner::new(MSG_INITIALIZING_BRIDGES_SPINNER); - initialize_bridges(shell, &chain_config, &ecosystem_config, args).await?; + initialize_bridges( + shell, + &chain_config, + &ecosystem_config, + &mut contracts, + args, + ) + .await?; + contracts.save_with_base_path(shell, &chain_config.configs)?; spinner.finish(); Ok(()) @@ -41,6 +49,7 @@ pub async fn initialize_bridges( shell: &Shell, chain_config: &ChainConfig, ecosystem_config: &EcosystemConfig, + contracts_config: &mut ContractsConfig, forge_args: ForgeScriptArgs, ) -> anyhow::Result<()> { build_l2_contracts(shell, &ecosystem_config.link_to_code)?; @@ -74,11 +83,11 @@ pub async fn initialize_bridges( INITIALIZE_BRIDGES_SCRIPT_PARAMS.output(&chain_config.link_to_code), )?; - update_l2_shared_bridge(shell, chain_config, &output)?; + contracts_config.set_l2_shared_bridge(&output)?; Ok(()) } fn build_l2_contracts(shell: &Shell, link_to_code: &Path) -> anyhow::Result<()> { let _dir_guard = shell.push_dir(link_to_code.join("contracts")); - Cmd::new(cmd!(shell, "yarn l2 build")).run() + Ok(Cmd::new(cmd!(shell, "yarn l2 build")).run()?) } diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/mod.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/mod.rs index 759b4aaea557..fa4f81d76312 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/mod.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/mod.rs @@ -1,10 +1,3 @@ -pub(crate) mod args; -mod create; -pub mod deploy_paymaster; -pub mod genesis; -pub(crate) mod init; -mod initialize_bridges; - pub(crate) use args::create::ChainCreateArgsFinal; use clap::Subcommand; use common::forge::ForgeScriptArgs; @@ -13,6 +6,13 @@ use xshell::Shell; use crate::commands::chain::args::{create::ChainCreateArgs, genesis::GenesisArgs, init::InitArgs}; +pub(crate) mod args; +mod create; +pub mod deploy_paymaster; +pub mod genesis; +pub(crate) mod init; +mod initialize_bridges; + #[derive(Subcommand, Debug)] pub enum ChainCommands { /// Create a new chain, setting the necessary configurations for later initialization @@ -22,8 +22,10 @@ pub enum ChainCommands { /// Run server genesis Genesis(GenesisArgs), /// Initialize bridges on l2 + #[command(alias = "bridge")] InitializeBridges(ForgeScriptArgs), /// Initialize bridges on l2 + #[command(alias = "paymaster")] DeployPaymaster(ForgeScriptArgs), } diff --git a/zk_toolbox/crates/zk_inception/src/commands/containers.rs b/zk_toolbox/crates/zk_inception/src/commands/containers.rs index bba19fb89f94..b34b598afbe1 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/containers.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/containers.rs @@ -74,5 +74,8 @@ fn copy_dockerfile(shell: &Shell, link_to_code: PathBuf) -> anyhow::Result<()> { let data = docker_compose_text.replace(original_source, new_source); shell.write_file(DOCKER_COMPOSE_FILE, data)?; + // For some reasons our docker-compose sometimes required .env file while we are investigating this behaviour + // it's better to create file and don't make the life of customers harder + shell.write_file(".env", "")?; Ok(()) } diff --git a/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/args/init.rs b/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/args/init.rs new file mode 100644 index 000000000000..c74e4a4f765e --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/args/init.rs @@ -0,0 +1,169 @@ +use anyhow::Context; +use clap::Parser; +use common::PromptSelect; +use xshell::Shell; + +use super::releases::{get_releases_with_arch, Arch, Version}; +use crate::messages::{ + MSG_ARCH_NOT_SUPPORTED_ERR, MSG_FETCHING_VYPER_RELEASES_SPINNER, + MSG_FETCHING_ZKSOLC_RELEASES_SPINNER, MSG_FETCHING_ZKVYPER_RELEASES_SPINNER, + MSG_FETCH_SOLC_RELEASES_SPINNER, MSG_GET_SOLC_RELEASES_ERR, MSG_GET_VYPER_RELEASES_ERR, + MSG_GET_ZKSOLC_RELEASES_ERR, MSG_GET_ZKVYPER_RELEASES_ERR, MSG_NO_VERSION_FOUND_ERR, + MSG_OS_NOT_SUPPORTED_ERR, MSG_SOLC_VERSION_PROMPT, MSG_VYPER_VERSION_PROMPT, + MSG_ZKSOLC_VERSION_PROMPT, MSG_ZKVYPER_VERSION_PROMPT, +}; + +#[derive(Debug, Clone, Parser, Default)] +pub struct InitContractVerifierArgs { + /// Version of zksolc to install + #[clap(long)] + pub zksolc_version: Option, + /// Version of zkvyper to install + #[clap(long)] + pub zkvyper_version: Option, + /// Version of solc to install + #[clap(long)] + pub solc_version: Option, + /// Version of vyper to install + #[clap(long)] + pub vyper_version: Option, +} + +#[derive(Debug, Clone)] +pub struct InitContractVerifierArgsFinal { + pub zksolc_releases: Vec, + pub zkvyper_releases: Vec, + pub solc_releases: Vec, + pub vyper_releases: Vec, +} + +impl InitContractVerifierArgs { + pub fn fill_values_with_prompt( + self, + shell: &Shell, + ) -> anyhow::Result { + let arch = get_arch()?; + + let zksolc_releases = get_releases_with_arch( + shell, + "matter-labs/zksolc-bin", + arch, + MSG_FETCHING_ZKSOLC_RELEASES_SPINNER, + ) + .context(MSG_GET_ZKSOLC_RELEASES_ERR)?; + + let zkvyper_releases = get_releases_with_arch( + shell, + "matter-labs/zkvyper-bin", + arch, + MSG_FETCHING_ZKVYPER_RELEASES_SPINNER, + ) + .context(MSG_GET_ZKVYPER_RELEASES_ERR)?; + + let solc_releases = get_releases_with_arch( + shell, + "ethereum/solc-bin", + arch, + MSG_FETCH_SOLC_RELEASES_SPINNER, + ) + .context(MSG_GET_SOLC_RELEASES_ERR)?; + + let vyper_releases = get_releases_with_arch( + shell, + "vyperlang/vyper", + arch, + MSG_FETCHING_VYPER_RELEASES_SPINNER, + ) + .context(MSG_GET_VYPER_RELEASES_ERR)?; + + let zksolc_version = select_min_version( + self.zksolc_version, + zksolc_releases.clone(), + MSG_ZKSOLC_VERSION_PROMPT, + )?; + let zksolc_releases = get_releases_above_version(zksolc_releases, zksolc_version)?; + + let zkvyper_version = select_min_version( + self.zkvyper_version, + zkvyper_releases.clone(), + MSG_ZKVYPER_VERSION_PROMPT, + )?; + let zkvyper_releases = get_releases_above_version(zkvyper_releases, zkvyper_version)?; + + let solc_version = select_min_version( + self.solc_version, + solc_releases.clone(), + MSG_SOLC_VERSION_PROMPT, + )?; + let solc_releases = get_releases_above_version(solc_releases, solc_version)?; + + let vyper_version = select_min_version( + self.vyper_version, + vyper_releases.clone(), + MSG_VYPER_VERSION_PROMPT, + )?; + let vyper_releases = get_releases_above_version(vyper_releases, vyper_version)?; + + Ok(InitContractVerifierArgsFinal { + zksolc_releases, + zkvyper_releases, + solc_releases, + vyper_releases, + }) + } +} + +fn get_arch() -> anyhow::Result { + let os = std::env::consts::OS; + let arch = std::env::consts::ARCH; + + let arch = match os { + "linux" => match arch { + "x86_64" => Arch::LinuxAmd, + "aarch64" => Arch::LinuxArm, + "arm" => Arch::LinuxArm, + _ => anyhow::bail!(MSG_ARCH_NOT_SUPPORTED_ERR), + }, + "macos" => match arch { + "x86_64" => Arch::MacosAmd, + "aarch64" => Arch::MacosArm, + "arm" => Arch::MacosArm, + _ => anyhow::bail!(MSG_ARCH_NOT_SUPPORTED_ERR), + }, + _ => anyhow::bail!(MSG_OS_NOT_SUPPORTED_ERR), + }; + + Ok(arch) +} + +fn select_min_version( + selected: Option, + versions: Vec, + prompt_msg: &str, +) -> anyhow::Result { + let selected = selected.unwrap_or_else(|| { + PromptSelect::new(prompt_msg, versions.iter().map(|r| &r.version)) + .ask() + .into() + }); + + let selected = versions + .iter() + .find(|r| r.version == selected) + .context(MSG_NO_VERSION_FOUND_ERR)? + .to_owned(); + + Ok(selected) +} + +fn get_releases_above_version( + releases: Vec, + version: Version, +) -> anyhow::Result> { + let pos = releases + .iter() + .position(|r| r.version == version.version) + .context(MSG_NO_VERSION_FOUND_ERR)?; + + Ok(releases[..=pos].to_vec()) +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/args/mod.rs b/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/args/mod.rs new file mode 100644 index 000000000000..7f5df830d114 --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/args/mod.rs @@ -0,0 +1,2 @@ +pub mod init; +pub mod releases; diff --git a/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/args/releases.rs b/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/args/releases.rs new file mode 100644 index 000000000000..6f7eae4c1685 --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/args/releases.rs @@ -0,0 +1,159 @@ +use std::str::FromStr; + +use common::{cmd::Cmd, spinner::Spinner}; +use serde::Deserialize; +use xshell::{cmd, Shell}; + +use crate::messages::{MSG_INVALID_ARCH_ERR, MSG_NO_RELEASES_FOUND_ERR}; + +#[derive(Deserialize)] +struct GitHubRelease { + tag_name: String, + assets: Vec, +} + +#[derive(Deserialize)] +struct GitHubAsset { + name: String, + browser_download_url: String, +} + +#[derive(Deserialize)] +struct SolcList { + builds: Vec, +} + +#[derive(Deserialize)] +struct SolcBuild { + path: String, + version: String, +} + +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct Version { + pub version: String, + pub arch: Vec, + pub url: String, +} + +#[derive(Debug, Clone, PartialEq, Eq, Copy)] +pub enum Arch { + LinuxAmd, + LinuxArm, + MacosAmd, + MacosArm, +} + +impl std::str::FromStr for Arch { + type Err = anyhow::Error; + + fn from_str(s: &str) -> Result { + if s.contains("linux-amd64") { + Ok(Arch::LinuxAmd) + } else if s.contains("linux-arm64") { + Ok(Arch::LinuxArm) + } else if s.contains("macosx-amd64") { + Ok(Arch::MacosAmd) + } else if s.contains("macosx-arm64") { + Ok(Arch::MacosArm) + } else { + Err(anyhow::anyhow!(MSG_INVALID_ARCH_ERR)) + } + } +} + +fn get_compatible_archs(asset_name: &str) -> anyhow::Result> { + if let Ok(arch) = Arch::from_str(asset_name) { + Ok(vec![arch]) + } else if asset_name.contains(".linux") { + Ok(vec![Arch::LinuxAmd, Arch::LinuxArm]) + } else if asset_name.contains(".darwin") { + Ok(vec![Arch::MacosAmd, Arch::MacosArm]) + } else { + Err(anyhow::anyhow!(MSG_INVALID_ARCH_ERR)) + } +} + +fn get_releases(shell: &Shell, repo: &str, arch: Arch) -> anyhow::Result> { + if repo == "ethereum/solc-bin" { + return get_solc_releases(shell, arch); + } + + let response: std::process::Output = Cmd::new(cmd!( + shell, + "curl https://api.github.com/repos/{repo}/releases" + )) + .run_with_output()?; + + let response = String::from_utf8(response.stdout)?; + let releases: Vec = serde_json::from_str(&response)?; + + let mut versions = vec![]; + + for release in releases { + let version = release.tag_name; + for asset in release.assets { + let arch = match get_compatible_archs(&asset.name) { + Ok(arch) => arch, + Err(_) => continue, + }; + let url = asset.browser_download_url; + versions.push(Version { + version: version.clone(), + arch, + url, + }); + } + } + + Ok(versions) +} + +fn get_solc_releases(shell: &Shell, arch: Arch) -> anyhow::Result> { + let (arch_str, compatible_archs) = match arch { + Arch::LinuxAmd => ("linux-amd64", vec![Arch::LinuxAmd, Arch::LinuxArm]), + Arch::LinuxArm => ("linux-amd64", vec![Arch::LinuxAmd, Arch::LinuxArm]), + Arch::MacosAmd => ("macosx-amd64", vec![Arch::MacosAmd, Arch::MacosArm]), + Arch::MacosArm => ("macosx-amd64", vec![Arch::MacosAmd, Arch::MacosArm]), + }; + + let response: std::process::Output = Cmd::new(cmd!( + shell, + "curl https://raw.githubusercontent.com/ethereum/solc-bin/gh-pages/{arch_str}/list.json" + )) + .run_with_output()?; + + let response = String::from_utf8(response.stdout)?; + let solc_list: SolcList = serde_json::from_str(&response)?; + + let mut versions = vec![]; + for build in solc_list.builds { + let path = build.path; + versions.push(Version { + version: build.version, + arch: compatible_archs.clone(), + url: format!("https://github.com/ethereum/solc-bin/raw/gh-pages/{arch_str}/{path}"), + }); + } + versions.reverse(); + Ok(versions) +} + +pub fn get_releases_with_arch( + shell: &Shell, + repo: &str, + arch: Arch, + message: &str, +) -> anyhow::Result> { + let spinner = Spinner::new(message); + let releases = get_releases(shell, repo, arch)?; + let releases = releases + .into_iter() + .filter(|r| r.arch.contains(&arch)) + .collect::>(); + if releases.is_empty() { + anyhow::bail!(MSG_NO_RELEASES_FOUND_ERR); + } + spinner.finish(); + Ok(releases) +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/init.rs b/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/init.rs new file mode 100644 index 000000000000..5fd482ae5fff --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/init.rs @@ -0,0 +1,107 @@ +use std::path::{Path, PathBuf}; + +use common::{cmd::Cmd, logger, spinner::Spinner}; +use config::EcosystemConfig; +use xshell::{cmd, Shell}; + +use super::args::{init::InitContractVerifierArgs, releases::Version}; +use crate::messages::{msg_binary_already_exists, msg_downloading_binary_spinner}; + +pub(crate) async fn run(shell: &Shell, args: InitContractVerifierArgs) -> anyhow::Result<()> { + let args = args.fill_values_with_prompt(shell)?; + let ecosystem = EcosystemConfig::from_file(shell)?; + let link_to_code = ecosystem.link_to_code; + + download_binaries( + shell, + args.zksolc_releases, + get_zksolc_path, + &link_to_code, + "zksolc", + )?; + + download_binaries( + shell, + args.zkvyper_releases, + get_zkvyper_path, + &link_to_code, + "zkvyper", + )?; + + download_binaries( + shell, + args.solc_releases, + get_solc_path, + &link_to_code, + "solc", + )?; + + download_binaries( + shell, + args.vyper_releases, + get_vyper_path, + &link_to_code, + "vyper", + )?; + + Ok(()) +} + +fn download_binaries( + shell: &Shell, + releases: Vec, + get_path: fn(&Path, &str) -> PathBuf, + link_to_code: &Path, + name: &str, +) -> anyhow::Result<()> { + for release in releases { + download_binary( + shell, + &release.url, + &get_path(link_to_code, &release.version), + name, + &release.version, + )?; + } + Ok(()) +} + +fn download_binary( + shell: &Shell, + url: &str, + path: &Path, + name: &str, + version: &str, +) -> anyhow::Result<()> { + let binary_path = path.join(name); + if shell.path_exists(binary_path.clone()) { + logger::info(msg_binary_already_exists(name, version)); + return Ok(()); + } + + let spinner = Spinner::new(&msg_downloading_binary_spinner(name, version)); + Cmd::new(cmd!(shell, "mkdir -p {path}")).run()?; + Cmd::new(cmd!(shell, "wget {url} -O {binary_path}")).run()?; + Cmd::new(cmd!(shell, "chmod +x {binary_path}")).run()?; + spinner.finish(); + + Ok(()) +} + +fn get_zksolc_path(link_to_code: &Path, version: &str) -> PathBuf { + link_to_code.join("etc/zksolc-bin/").join(version) +} + +fn get_zkvyper_path(link_to_code: &Path, version: &str) -> PathBuf { + link_to_code.join("etc/zkvyper-bin/").join(version) +} + +fn get_vyper_path(link_to_code: &Path, version: &str) -> PathBuf { + link_to_code + .join("etc/vyper-bin/") + .join(version.replace('v', "")) +} + +fn get_solc_path(link_to_code: &Path, version: &str) -> PathBuf { + link_to_code.join("etc/solc-bin/").join(version) +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/mod.rs b/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/mod.rs new file mode 100644 index 000000000000..78bdc5fae7ec --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/mod.rs @@ -0,0 +1,22 @@ +use args::init::InitContractVerifierArgs; +use clap::Subcommand; +use xshell::Shell; + +pub mod args; +pub mod init; +pub mod run; + +#[derive(Subcommand, Debug)] +pub enum ContractVerifierCommands { + /// Run contract verifier + Run, + /// Download required binaries for contract verifier + Init(InitContractVerifierArgs), +} + +pub(crate) async fn run(shell: &Shell, args: ContractVerifierCommands) -> anyhow::Result<()> { + match args { + ContractVerifierCommands::Run => run::run(shell).await, + ContractVerifierCommands::Init(args) => init::run(shell, args).await, + } +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/run.rs b/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/run.rs new file mode 100644 index 000000000000..1ae06c810ba1 --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/run.rs @@ -0,0 +1,29 @@ +use anyhow::Context; +use common::{cmd::Cmd, logger}; +use config::EcosystemConfig; +use xshell::{cmd, Shell}; + +use crate::messages::{ + MSG_CHAIN_NOT_FOUND_ERR, MSG_FAILED_TO_RUN_CONTRACT_VERIFIER_ERR, MSG_RUNNING_CONTRACT_VERIFIER, +}; + +pub(crate) async fn run(shell: &Shell) -> anyhow::Result<()> { + let ecosystem = EcosystemConfig::from_file(shell)?; + let chain = ecosystem + .load_chain(Some(ecosystem.default_chain.clone())) + .context(MSG_CHAIN_NOT_FOUND_ERR)?; + + let config_path = chain.path_to_general_config(); + let secrets_path = chain.path_to_secrets_config(); + + let _dir_guard = shell.push_dir(&chain.link_to_code); + + logger::info(MSG_RUNNING_CONTRACT_VERIFIER); + + let mut cmd = Cmd::new(cmd!( + shell, + "cargo run --bin zksync_contract_verifier -- --config-path={config_path} --secrets-path={secrets_path}" + )); + cmd = cmd.with_force_run(); + cmd.run().context(MSG_FAILED_TO_RUN_CONTRACT_VERIFIER_ERR) +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/create.rs b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/create.rs index 77ee3d42966a..d3d5fe129678 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/create.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/create.rs @@ -1,19 +1,23 @@ -use std::path::PathBuf; +use std::path::{Path, PathBuf}; +use anyhow::bail; use clap::Parser; -use common::{slugify, Prompt, PromptConfirm, PromptSelect}; +use common::{cmd::Cmd, logger, Prompt, PromptConfirm, PromptSelect}; use serde::{Deserialize, Serialize}; +use slugify_rs::slugify; use strum::IntoEnumIterator; use strum_macros::EnumIter; use types::{L1Network, WalletCreation}; +use xshell::{cmd, Shell}; use crate::{ commands::chain::{args::create::ChainCreateArgs, ChainCreateArgsFinal}, messages::{ + msg_path_to_zksync_does_not_exist_err, MSG_CONFIRM_STILL_USE_FOLDER, MSG_ECOSYSTEM_NAME_PROMPT, MSG_L1_NETWORK_HELP, MSG_L1_NETWORK_PROMPT, MSG_LINK_TO_CODE_HELP, MSG_LINK_TO_CODE_PROMPT, MSG_LINK_TO_CODE_SELECTION_CLONE, - MSG_LINK_TO_CODE_SELECTION_PATH, MSG_REPOSITORY_ORIGIN_PROMPT, MSG_START_CONTAINERS_HELP, - MSG_START_CONTAINERS_PROMPT, + MSG_LINK_TO_CODE_SELECTION_PATH, MSG_NOT_MAIN_REPO_OR_FORK_ERR, + MSG_REPOSITORY_ORIGIN_PROMPT, MSG_START_CONTAINERS_HELP, MSG_START_CONTAINERS_PROMPT, }, }; @@ -33,18 +37,27 @@ pub struct EcosystemCreateArgs { } impl EcosystemCreateArgs { - pub fn fill_values_with_prompt(mut self) -> EcosystemCreateArgsFinal { + pub fn fill_values_with_prompt(mut self, shell: &Shell) -> EcosystemCreateArgsFinal { let mut ecosystem_name = self .ecosystem_name .unwrap_or_else(|| Prompt::new(MSG_ECOSYSTEM_NAME_PROMPT).ask()); - ecosystem_name = slugify(&ecosystem_name); + ecosystem_name = slugify!(&ecosystem_name, separator = "_"); let link_to_code = self.link_to_code.unwrap_or_else(|| { let link_to_code_selection = PromptSelect::new(MSG_REPOSITORY_ORIGIN_PROMPT, LinkToCodeSelection::iter()).ask(); match link_to_code_selection { LinkToCodeSelection::Clone => "".to_string(), - LinkToCodeSelection::Path => Prompt::new(MSG_LINK_TO_CODE_PROMPT).ask(), + LinkToCodeSelection::Path => { + let mut path: String = Prompt::new(MSG_LINK_TO_CODE_PROMPT).ask(); + if let Err(err) = check_link_to_code(shell, &path) { + logger::warn(err); + if !PromptConfirm::new(MSG_CONFIRM_STILL_USE_FOLDER).ask() { + path = pick_new_link_to_code(shell); + } + } + path + } } }); @@ -104,3 +117,40 @@ impl std::fmt::Display for LinkToCodeSelection { } } } + +fn check_link_to_code(shell: &Shell, path: &str) -> anyhow::Result<()> { + let path = Path::new(path); + if !shell.path_exists(path) { + bail!(msg_path_to_zksync_does_not_exist_err( + path.to_str().unwrap() + )); + } + + let _guard = shell.push_dir(path); + let out = String::from_utf8( + Cmd::new(cmd!(shell, "git remote -v")) + .run_with_output()? + .stdout, + )?; + + if !out.contains("matter-labs/zksync-era") { + bail!(MSG_NOT_MAIN_REPO_OR_FORK_ERR); + } + + Ok(()) +} + +fn pick_new_link_to_code(shell: &Shell) -> String { + let link_to_code: String = Prompt::new(MSG_LINK_TO_CODE_PROMPT).ask(); + match check_link_to_code(shell, &link_to_code) { + Ok(_) => link_to_code, + Err(err) => { + logger::warn(err); + if !PromptConfirm::new(MSG_CONFIRM_STILL_USE_FOLDER).ask() { + pick_new_link_to_code(shell) + } else { + link_to_code + } + } + } +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/init.rs b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/init.rs index 075435cf86f6..3e5e7b06dcfb 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/init.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/init.rs @@ -11,7 +11,7 @@ use crate::{ defaults::LOCAL_RPC_URL, messages::{ MSG_DEPLOY_ECOSYSTEM_PROMPT, MSG_DEPLOY_ERC20_PROMPT, MSG_DEPLOY_PAYMASTER_PROMPT, - MSG_GENESIS_ARGS_HELP, MSG_L1_RPC_URL_HELP, MSG_L1_RPC_URL_INVALID_ERR, + MSG_DEV_ARG_HELP, MSG_GENESIS_ARGS_HELP, MSG_L1_RPC_URL_HELP, MSG_L1_RPC_URL_INVALID_ERR, MSG_L1_RPC_URL_PROMPT, }, }; @@ -29,15 +29,22 @@ pub struct EcosystemArgs { } impl EcosystemArgs { - pub fn fill_values_with_prompt(self, l1_network: L1Network) -> EcosystemArgsFinal { + pub fn fill_values_with_prompt(self, l1_network: L1Network, dev: bool) -> EcosystemArgsFinal { let deploy_ecosystem = self.deploy_ecosystem.unwrap_or_else(|| { - PromptConfirm::new(MSG_DEPLOY_ECOSYSTEM_PROMPT) - .default(true) - .ask() + if dev { + true + } else { + PromptConfirm::new(MSG_DEPLOY_ECOSYSTEM_PROMPT) + .default(true) + .ask() + } }); let l1_rpc_url = self.l1_rpc_url.unwrap_or_else(|| { let mut prompt = Prompt::new(MSG_L1_RPC_URL_PROMPT); + if dev { + return LOCAL_RPC_URL.to_string(); + } if l1_network == L1Network::Localhost { prompt = prompt.default(LOCAL_RPC_URL); } @@ -81,27 +88,35 @@ pub struct EcosystemInitArgs { #[clap(flatten, next_help_heading = MSG_GENESIS_ARGS_HELP)] #[serde(flatten)] pub genesis_args: GenesisArgs, + #[clap(long, help = MSG_DEV_ARG_HELP)] + pub dev: bool, } impl EcosystemInitArgs { pub fn fill_values_with_prompt(self, l1_network: L1Network) -> EcosystemInitArgsFinal { - let deploy_paymaster = self.deploy_paymaster.unwrap_or_else(|| { - PromptConfirm::new(MSG_DEPLOY_PAYMASTER_PROMPT) - .default(true) - .ask() - }); - let deploy_erc20 = self.deploy_erc20.unwrap_or_else(|| { - PromptConfirm::new(MSG_DEPLOY_ERC20_PROMPT) - .default(true) - .ask() - }); - let ecosystem = self.ecosystem.fill_values_with_prompt(l1_network); + let (deploy_paymaster, deploy_erc20) = if self.dev { + (true, true) + } else { + let deploy_paymaster = self.deploy_paymaster.unwrap_or_else(|| { + PromptConfirm::new(MSG_DEPLOY_PAYMASTER_PROMPT) + .default(true) + .ask() + }); + let deploy_erc20 = self.deploy_erc20.unwrap_or_else(|| { + PromptConfirm::new(MSG_DEPLOY_ERC20_PROMPT) + .default(true) + .ask() + }); + (deploy_paymaster, deploy_erc20) + }; + let ecosystem = self.ecosystem.fill_values_with_prompt(l1_network, self.dev); EcosystemInitArgsFinal { deploy_paymaster, deploy_erc20, ecosystem, forge_args: self.forge_args.clone(), + dev: self.dev, } } } @@ -112,4 +127,5 @@ pub struct EcosystemInitArgsFinal { pub deploy_erc20: bool, pub ecosystem: EcosystemArgsFinal, pub forge_args: ForgeScriptArgs, + pub dev: bool, } diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/create.rs b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/create.rs index 4daab36c56b8..30dffad035ab 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/create.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/create.rs @@ -1,16 +1,13 @@ -use std::{ - path::{Path, PathBuf}, - str::FromStr, -}; +use std::{path::PathBuf, str::FromStr}; use anyhow::bail; -use common::{cmd::Cmd, logger, spinner::Spinner}; +use common::{git, logger, spinner::Spinner}; use config::{ create_local_configs_dir, create_wallets, get_default_era_chain_id, traits::SaveConfigWithBasePath, EcosystemConfig, EcosystemConfigFromFileError, ZKSYNC_ERA_GIT_REPO, }; -use xshell::{cmd, Shell}; +use xshell::Shell; use crate::{ commands::{ @@ -22,7 +19,7 @@ use crate::{ }, }, messages::{ - MSG_CLONING_ERA_REPO_SPINNER, MSG_CREATED_ECOSYSTEM, MSG_CREATING_DEFAULT_CHAIN_SPINNER, + msg_created_ecosystem, MSG_CLONING_ERA_REPO_SPINNER, MSG_CREATING_DEFAULT_CHAIN_SPINNER, MSG_CREATING_ECOSYSTEM, MSG_CREATING_INITIAL_CONFIGURATIONS_SPINNER, MSG_ECOSYSTEM_ALREADY_EXISTS_ERR, MSG_ECOSYSTEM_CONFIG_INVALID_ERR, MSG_SELECTED_CONFIG, MSG_STARTING_CONTAINERS_SPINNER, @@ -35,14 +32,14 @@ pub fn run(args: EcosystemCreateArgs, shell: &Shell) -> anyhow::Result<()> { Err(EcosystemConfigFromFileError::InvalidConfig { .. }) => { bail!(MSG_ECOSYSTEM_CONFIG_INVALID_ERR) } - Err(EcosystemConfigFromFileError::NotExists) => create(args, shell)?, + Err(EcosystemConfigFromFileError::NotExists { .. }) => create(args, shell)?, }; Ok(()) } fn create(args: EcosystemCreateArgs, shell: &Shell) -> anyhow::Result<()> { - let args = args.fill_values_with_prompt(); + let args = args.fill_values_with_prompt(shell); logger::note(MSG_SELECTED_CONFIG, logger::object_to_string(&args)); logger::info(MSG_CREATING_ECOSYSTEM); @@ -55,12 +52,17 @@ fn create(args: EcosystemCreateArgs, shell: &Shell) -> anyhow::Result<()> { let link_to_code = if args.link_to_code.is_empty() { let spinner = Spinner::new(MSG_CLONING_ERA_REPO_SPINNER); - let link_to_code = clone_era_repo(shell)?; + let link_to_code = git::clone( + shell, + shell.current_dir(), + ZKSYNC_ERA_GIT_REPO, + "zksync-era", + )?; spinner.finish(); link_to_code } else { let path = PathBuf::from_str(&args.link_to_code)?; - update_submodules_recursive(shell, &path)?; + git::submodule_update(shell, path.clone())?; path }; @@ -76,6 +78,7 @@ fn create(args: EcosystemCreateArgs, shell: &Shell) -> anyhow::Result<()> { name: ecosystem_name.clone(), l1_network: args.l1_network, link_to_code: link_to_code.clone(), + bellman_cuda_dir: None, chains: chains_path.clone(), config: configs_path, era_chain_id: get_default_era_chain_id(), @@ -108,26 +111,6 @@ fn create(args: EcosystemCreateArgs, shell: &Shell) -> anyhow::Result<()> { spinner.finish(); } - logger::outro(MSG_CREATED_ECOSYSTEM); - Ok(()) -} - -fn clone_era_repo(shell: &Shell) -> anyhow::Result { - Cmd::new(cmd!( - shell, - "git clone --recurse-submodules {ZKSYNC_ERA_GIT_REPO}" - )) - .run()?; - Ok(shell.current_dir().join("zksync-era")) -} - -fn update_submodules_recursive(shell: &Shell, link_to_code: &Path) -> anyhow::Result<()> { - let _dir_guard = shell.push_dir(link_to_code); - Cmd::new(cmd!( - shell, - "git submodule update --init --recursive -" - )) - .run()?; + logger::outro(msg_created_ecosystem(ecosystem_name)); Ok(()) } diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/init.rs b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/init.rs index fecda40c7760..4fa6c8c47d8d 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/init.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/init.rs @@ -8,7 +8,7 @@ use common::{ cmd::Cmd, config::global_config, forge::{Forge, ForgeScriptArgs}, - logger, + git, logger, spinner::Spinner, Prompt, }; @@ -41,7 +41,6 @@ use crate::{ }, }, consts::AMOUNT_FOR_DISTRIBUTION_TO_WALLETS, - forge_utils::{check_the_balance, fill_forge_private_key}, messages::{ msg_ecosystem_initialized, msg_initializing_chain, MSG_CHAIN_NOT_INITIALIZED, MSG_DEPLOYING_ECOSYSTEM_CONTRACTS_SPINNER, MSG_DEPLOYING_ERC20, @@ -49,17 +48,23 @@ use crate::{ MSG_ECOSYSTEM_CONTRACTS_PATH_INVALID_ERR, MSG_ECOSYSTEM_CONTRACTS_PATH_PROMPT, MSG_INITIALIZING_ECOSYSTEM, MSG_INTALLING_DEPS_SPINNER, }, + utils::forge::{check_the_balance, fill_forge_private_key}, }; pub async fn run(args: EcosystemInitArgs, shell: &Shell) -> anyhow::Result<()> { let ecosystem_config = EcosystemConfig::from_file(shell)?; + git::submodule_update(shell, ecosystem_config.link_to_code.clone())?; + let initial_deployment_config = match ecosystem_config.get_initial_deployment_config() { Ok(config) => config, Err(_) => create_initial_deployments_config(shell, &ecosystem_config.config)?, }; - let genesis_args = args.genesis_args.clone(); + let mut genesis_args = args.genesis_args.clone(); + if args.dev { + genesis_args.use_default = true; + } let mut final_ecosystem_args = args.fill_values_with_prompt(ecosystem_config.l1_network); logger::info(MSG_INITIALIZING_ECOSYSTEM); @@ -109,13 +114,6 @@ pub async fn run(args: EcosystemInitArgs, shell: &Shell) -> anyhow::Result<()> { l1_rpc_url: final_ecosystem_args.ecosystem.l1_rpc_url.clone(), }; - distribute_eth( - &ecosystem_config, - &chain_config, - final_ecosystem_args.ecosystem.l1_rpc_url.clone(), - ) - .await?; - chain::init::init( &mut chain_init_args, shell, @@ -195,8 +193,17 @@ async fn deploy_erc20( l1_rpc_url: String, ) -> anyhow::Result { let deploy_config_path = DEPLOY_ERC20_SCRIPT_PARAMS.input(&ecosystem_config.link_to_code); - DeployErc20Config::new(erc20_deployment_config, contracts_config) - .save(shell, deploy_config_path)?; + let wallets = ecosystem_config.get_wallets()?; + DeployErc20Config::new( + erc20_deployment_config, + contracts_config, + vec![ + wallets.governor.address, + wallets.operator.address, + wallets.blob_operator.address, + ], + ) + .save(shell, deploy_config_path)?; let mut forge = Forge::new(&ecosystem_config.path_to_foundry()) .script(&DEPLOY_ERC20_SCRIPT_PARAMS.script(), forge_args.clone()) @@ -349,10 +356,10 @@ async fn deploy_ecosystem_inner( fn install_yarn_dependencies(shell: &Shell, link_to_code: &Path) -> anyhow::Result<()> { let _dir_guard = shell.push_dir(link_to_code); - Cmd::new(cmd!(shell, "yarn install")).run() + Ok(Cmd::new(cmd!(shell, "yarn install")).run()?) } fn build_system_contracts(shell: &Shell, link_to_code: &Path) -> anyhow::Result<()> { let _dir_guard = shell.push_dir(link_to_code.join("contracts")); - Cmd::new(cmd!(shell, "yarn sc build")).run() + Ok(Cmd::new(cmd!(shell, "yarn sc build")).run()?) } diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/mod.rs b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/mod.rs index e2db65b213f8..1e4b4f9bd2af 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/mod.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/mod.rs @@ -9,7 +9,7 @@ mod args; mod change_default; mod create; pub mod create_configs; -mod init; +pub(crate) mod init; #[derive(Subcommand, Debug)] #[allow(clippy::large_enum_variant)] @@ -21,6 +21,7 @@ pub enum EcosystemCommands { /// deploying necessary contracts and performing on-chain operations Init(EcosystemInitArgs), /// Change the default chain + #[command(alias = "cd")] ChangeDefaultChain(ChangeDefaultChain), } diff --git a/zk_toolbox/crates/zk_inception/src/commands/external_node/args/mod.rs b/zk_toolbox/crates/zk_inception/src/commands/external_node/args/mod.rs new file mode 100644 index 000000000000..ebc7855c2b58 --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/external_node/args/mod.rs @@ -0,0 +1,2 @@ +pub mod prepare_configs; +pub mod run; diff --git a/zk_toolbox/crates/zk_inception/src/commands/external_node/args/prepare_configs.rs b/zk_toolbox/crates/zk_inception/src/commands/external_node/args/prepare_configs.rs new file mode 100644 index 000000000000..3f91380b7bdf --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/external_node/args/prepare_configs.rs @@ -0,0 +1,69 @@ +use clap::Parser; +use common::{db::DatabaseConfig, Prompt}; +use config::ChainConfig; +use serde::{Deserialize, Serialize}; +use slugify_rs::slugify; +use url::Url; + +use crate::{ + defaults::{generate_external_node_db_name, DATABASE_SERVER_URL, LOCAL_RPC_URL}, + messages::{ + msg_external_node_db_name_prompt, msg_external_node_db_url_prompt, MSG_L1_RPC_URL_PROMPT, + MSG_USE_DEFAULT_DATABASES_HELP, + }, +}; + +#[derive(Debug, Clone, Serialize, Deserialize, Parser, Default)] +pub struct PrepareConfigArgs { + #[clap(long)] + pub db_url: Option, + #[clap(long)] + pub db_name: Option, + #[clap(long)] + pub l1_rpc_url: Option, + #[clap(long, short, help = MSG_USE_DEFAULT_DATABASES_HELP)] + pub use_default: bool, +} + +impl PrepareConfigArgs { + pub fn fill_values_with_prompt(self, config: &ChainConfig) -> PrepareConfigFinal { + let db_name = generate_external_node_db_name(config); + let chain_name = config.name.clone(); + if self.use_default { + PrepareConfigFinal { + db: DatabaseConfig::new(DATABASE_SERVER_URL.clone(), db_name), + l1_rpc_url: LOCAL_RPC_URL.to_string(), + } + } else { + let db_url = self.db_url.unwrap_or_else(|| { + Prompt::new(&msg_external_node_db_url_prompt(&chain_name)) + .default(DATABASE_SERVER_URL.as_str()) + .ask() + }); + let db_name = slugify!( + &self.db_name.unwrap_or_else(|| { + Prompt::new(&msg_external_node_db_name_prompt(&chain_name)) + .default(&db_name) + .ask() + }), + separator = "_" + ); + let l1_rpc_url = self.l1_rpc_url.unwrap_or_else(|| { + Prompt::new(MSG_L1_RPC_URL_PROMPT) + .default(LOCAL_RPC_URL) + .ask() + }); + + PrepareConfigFinal { + db: DatabaseConfig::new(db_url, db_name), + l1_rpc_url, + } + } + } +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct PrepareConfigFinal { + pub db: DatabaseConfig, + pub l1_rpc_url: String, +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/external_node/args/run.rs b/zk_toolbox/crates/zk_inception/src/commands/external_node/args/run.rs new file mode 100644 index 000000000000..1bc0c06728d7 --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/external_node/args/run.rs @@ -0,0 +1,15 @@ +use clap::Parser; +use serde::{Deserialize, Serialize}; + +use crate::messages::{MSG_SERVER_ADDITIONAL_ARGS_HELP, MSG_SERVER_COMPONENTS_HELP}; + +#[derive(Debug, Serialize, Deserialize, Parser)] +pub struct RunExternalNodeArgs { + #[clap(long)] + pub reinit: bool, + #[clap(long, help = MSG_SERVER_COMPONENTS_HELP)] + pub components: Option>, + #[clap(long, short)] + #[arg(trailing_var_arg = true, allow_hyphen_values = true, hide = false, help = MSG_SERVER_ADDITIONAL_ARGS_HELP)] + pub additional_args: Vec, +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/external_node/init.rs b/zk_toolbox/crates/zk_inception/src/commands/external_node/init.rs new file mode 100644 index 000000000000..c6101e88739c --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/external_node/init.rs @@ -0,0 +1,53 @@ +use anyhow::Context; +use common::{ + config::global_config, + db::{drop_db_if_exists, init_db, migrate_db, DatabaseConfig}, + spinner::Spinner, +}; +use config::{traits::ReadConfigWithBasePath, ChainConfig, EcosystemConfig, SecretsConfig}; +use xshell::Shell; + +use crate::{ + consts::SERVER_MIGRATIONS, + messages::{ + MSG_CHAIN_NOT_INITIALIZED, MSG_EXTERNAL_NODE_CONFIG_NOT_INITIALIZED, + MSG_FAILED_TO_DROP_SERVER_DATABASE_ERR, MSG_INITIALIZING_DATABASES_SPINNER, + }, + utils::rocks_db::{recreate_rocksdb_dirs, RocksDBDirOption}, +}; + +pub async fn run(shell: &Shell) -> anyhow::Result<()> { + let ecosystem_config = EcosystemConfig::from_file(shell)?; + + let chain = global_config().chain_name.clone(); + let chain_config = ecosystem_config + .load_chain(chain) + .context(MSG_CHAIN_NOT_INITIALIZED)?; + + init(shell, &chain_config).await +} + +pub async fn init(shell: &Shell, chain_config: &ChainConfig) -> anyhow::Result<()> { + let spin = Spinner::new(MSG_INITIALIZING_DATABASES_SPINNER); + let secrets = SecretsConfig::read_with_base_path( + shell, + chain_config + .external_node_config_path + .clone() + .context(MSG_EXTERNAL_NODE_CONFIG_NOT_INITIALIZED)?, + )?; + let db_config = DatabaseConfig::from_url(secrets.database.server_url)?; + drop_db_if_exists(&db_config) + .await + .context(MSG_FAILED_TO_DROP_SERVER_DATABASE_ERR)?; + init_db(&db_config).await?; + recreate_rocksdb_dirs( + shell, + &chain_config.rocks_db_path, + RocksDBDirOption::ExternalNode, + )?; + let path_to_server_migration = chain_config.link_to_code.join(SERVER_MIGRATIONS); + migrate_db(shell, path_to_server_migration, &db_config.full_url()).await?; + spin.finish(); + Ok(()) +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/external_node/mod.rs b/zk_toolbox/crates/zk_inception/src/commands/external_node/mod.rs new file mode 100644 index 000000000000..06e422de08b8 --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/external_node/mod.rs @@ -0,0 +1,24 @@ +use args::{prepare_configs::PrepareConfigArgs, run::RunExternalNodeArgs}; +use clap::Parser; +use serde::{Deserialize, Serialize}; +use xshell::Shell; + +mod args; +mod init; +mod prepare_configs; +mod run; + +#[derive(Debug, Serialize, Deserialize, Parser)] +pub enum ExternalNodeCommands { + Configs(PrepareConfigArgs), + Init, + Run(RunExternalNodeArgs), +} + +pub async fn run(shell: &Shell, commands: ExternalNodeCommands) -> anyhow::Result<()> { + match commands { + ExternalNodeCommands::Configs(args) => prepare_configs::run(shell, args), + ExternalNodeCommands::Init => init::run(shell).await, + ExternalNodeCommands::Run(args) => run::run(shell, args).await, + } +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/external_node/prepare_configs.rs b/zk_toolbox/crates/zk_inception/src/commands/external_node/prepare_configs.rs new file mode 100644 index 000000000000..09e9d1b460c4 --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/external_node/prepare_configs.rs @@ -0,0 +1,79 @@ +use std::path::Path; + +use anyhow::Context; +use common::{config::global_config, logger}; +use config::{ + external_node::ENConfig, traits::SaveConfigWithBasePath, ChainConfig, DatabaseSecrets, + EcosystemConfig, L1Secret, SecretsConfig, +}; +use xshell::Shell; + +use crate::{ + commands::external_node::args::prepare_configs::{PrepareConfigArgs, PrepareConfigFinal}, + messages::{ + msg_preparing_en_config_is_done, MSG_CHAIN_NOT_INITIALIZED, MSG_PREPARING_EN_CONFIGS, + }, + utils::rocks_db::{recreate_rocksdb_dirs, RocksDBDirOption}, +}; + +pub fn run(shell: &Shell, args: PrepareConfigArgs) -> anyhow::Result<()> { + logger::info(MSG_PREPARING_EN_CONFIGS); + let chain_name = global_config().chain_name.clone(); + let ecosystem_config = EcosystemConfig::from_file(shell)?; + let mut chain_config = ecosystem_config + .load_chain(chain_name) + .context(MSG_CHAIN_NOT_INITIALIZED)?; + + let args = args.fill_values_with_prompt(&chain_config); + let external_node_config_path = chain_config + .external_node_config_path + .unwrap_or_else(|| chain_config.configs.join("external_node")); + shell.create_dir(&external_node_config_path)?; + chain_config.external_node_config_path = Some(external_node_config_path.clone()); + prepare_configs(shell, &chain_config, &external_node_config_path, args)?; + let chain_path = ecosystem_config.chains.join(&chain_config.name); + chain_config.save_with_base_path(shell, chain_path)?; + logger::info(msg_preparing_en_config_is_done(&external_node_config_path)); + Ok(()) +} + +fn prepare_configs( + shell: &Shell, + config: &ChainConfig, + en_configs_path: &Path, + args: PrepareConfigFinal, +) -> anyhow::Result<()> { + let genesis = config.get_genesis_config()?; + let general = config.get_general_config()?; + let en_config = ENConfig { + l2_chain_id: genesis.l2_chain_id, + l1_chain_id: genesis.l1_chain_id, + l1_batch_commit_data_generator_mode: genesis + .l1_batch_commit_data_generator_mode + .unwrap_or_default(), + main_node_url: general.api.web3_json_rpc.http_url.clone(), + main_node_rate_limit_rps: None, + }; + let mut general_en = general.clone(); + general_en.update_ports(&general.ports_config().next_empty_ports_config())?; + let secrets = SecretsConfig { + database: DatabaseSecrets { + server_url: args.db.full_url(), + prover_url: None, + other: Default::default(), + }, + l1: L1Secret { + l1_rpc_url: args.l1_rpc_url.clone(), + other: Default::default(), + }, + other: Default::default(), + }; + secrets.save_with_base_path(shell, en_configs_path)?; + let dirs = recreate_rocksdb_dirs(shell, &config.rocks_db_path, RocksDBDirOption::ExternalNode)?; + general_en.set_rocks_db_config(dirs)?; + + general_en.save_with_base_path(shell, en_configs_path)?; + en_config.save_with_base_path(shell, en_configs_path)?; + + Ok(()) +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/external_node/run.rs b/zk_toolbox/crates/zk_inception/src/commands/external_node/run.rs new file mode 100644 index 000000000000..9d3da4663859 --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/external_node/run.rs @@ -0,0 +1,37 @@ +use anyhow::Context; +use common::{config::global_config, logger}; +use config::{ChainConfig, EcosystemConfig}; +use xshell::Shell; + +use crate::{ + commands::external_node::{args::run::RunExternalNodeArgs, init}, + external_node::RunExternalNode, + messages::{MSG_CHAIN_NOT_INITIALIZED, MSG_STARTING_EN}, +}; + +pub async fn run(shell: &Shell, args: RunExternalNodeArgs) -> anyhow::Result<()> { + let ecosystem_config = EcosystemConfig::from_file(shell)?; + + let chain = global_config().chain_name.clone(); + let chain_config = ecosystem_config + .load_chain(chain) + .context(MSG_CHAIN_NOT_INITIALIZED)?; + + logger::info(MSG_STARTING_EN); + + run_external_node(args, &chain_config, shell).await?; + + Ok(()) +} + +async fn run_external_node( + args: RunExternalNodeArgs, + chain_config: &ChainConfig, + shell: &Shell, +) -> anyhow::Result<()> { + if args.reinit { + init::init(shell, chain_config).await? + } + let server = RunExternalNode::new(args.components.clone(), chain_config)?; + server.run(shell, args.additional_args.clone()) +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/mod.rs b/zk_toolbox/crates/zk_inception/src/commands/mod.rs index ccdf5b082caa..5cba51265981 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/mod.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/mod.rs @@ -1,6 +1,8 @@ pub mod args; pub mod chain; pub mod containers; +pub mod contract_verifier; pub mod ecosystem; +pub mod external_node; pub mod prover; pub mod server; diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/args/init.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/args/init.rs new file mode 100644 index 000000000000..c398b1852c61 --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/args/init.rs @@ -0,0 +1,410 @@ +use clap::{Parser, ValueEnum}; +use common::{logger, Prompt, PromptConfirm, PromptSelect}; +use serde::{Deserialize, Serialize}; +use strum::IntoEnumIterator; +use strum_macros::EnumIter; +use xshell::Shell; + +use super::init_bellman_cuda::InitBellmanCudaArgs; +use crate::{ + commands::prover::gcs::get_project_ids, + consts::{DEFAULT_CREDENTIALS_FILE, DEFAULT_PROOF_STORE_DIR}, + messages::{ + MSG_CREATE_GCS_BUCKET_LOCATION_PROMPT, MSG_CREATE_GCS_BUCKET_NAME_PROMTP, + MSG_CREATE_GCS_BUCKET_PROJECT_ID_NO_PROJECTS_PROMPT, + MSG_CREATE_GCS_BUCKET_PROJECT_ID_PROMPT, MSG_CREATE_GCS_BUCKET_PROMPT, + MSG_DOWNLOAD_SETUP_KEY_PROMPT, MSG_GETTING_PROOF_STORE_CONFIG, + MSG_GETTING_PUBLIC_STORE_CONFIG, MSG_PROOF_STORE_CONFIG_PROMPT, MSG_PROOF_STORE_DIR_PROMPT, + MSG_PROOF_STORE_GCS_BUCKET_BASE_URL_ERR, MSG_PROOF_STORE_GCS_BUCKET_BASE_URL_PROMPT, + MSG_PROOF_STORE_GCS_CREDENTIALS_FILE_PROMPT, MSG_SAVE_TO_PUBLIC_BUCKET_PROMPT, + MSG_SETUP_KEY_PATH_PROMPT, + }, +}; + +#[derive(Debug, Clone, Serialize, Deserialize, Parser, Default)] +pub struct ProverInitArgs { + // Proof store object + #[clap(long)] + pub proof_store_dir: Option, + #[clap(flatten)] + #[serde(flatten)] + pub proof_store_gcs_config: ProofStorageGCSTmp, + #[clap(flatten)] + #[serde(flatten)] + pub create_gcs_bucket_config: ProofStorageGCSCreateBucketTmp, + + // Public store object + #[clap(long)] + pub shall_save_to_public_bucket: Option, + #[clap(long)] + pub public_store_dir: Option, + #[clap(flatten)] + #[serde(flatten)] + pub public_store_gcs_config: PublicStorageGCSTmp, + #[clap(flatten)] + #[serde(flatten)] + pub public_create_gcs_bucket_config: PublicStorageGCSCreateBucketTmp, + + // Bellman cuda + #[clap(flatten)] + #[serde(flatten)] + pub bellman_cuda_config: InitBellmanCudaArgs, + + #[clap(flatten)] + #[serde(flatten)] + pub setup_key_config: SetupKeyConfigTmp, +} + +#[derive(Debug, Clone, ValueEnum, EnumIter, strum_macros::Display, PartialEq, Eq)] +#[allow(clippy::upper_case_acronyms)] +enum ProofStoreConfig { + Local, + GCS, +} + +#[derive(Clone, Debug, Serialize, Deserialize, Parser, Default)] +pub struct ProofStorageGCSTmp { + #[clap(long)] + pub bucket_base_url: Option, + #[clap(long)] + pub credentials_file: Option, +} + +#[derive(Debug, Clone, Serialize, Deserialize, Parser, Default)] +pub struct ProofStorageGCSCreateBucketTmp { + #[clap(long)] + pub bucket_name: Option, + #[clap(long)] + pub location: Option, + #[clap(long)] + pub project_id: Option, +} + +#[derive(Clone, Debug, Serialize, Deserialize, Parser, Default)] +pub struct PublicStorageGCSTmp { + #[clap(long)] + pub public_bucket_base_url: Option, + #[clap(long)] + pub public_credentials_file: Option, +} + +#[derive(Debug, Clone, Serialize, Deserialize, Parser, Default)] +pub struct PublicStorageGCSCreateBucketTmp { + #[clap(long)] + pub public_bucket_name: Option, + #[clap(long)] + pub public_location: Option, + #[clap(long)] + pub public_project_id: Option, +} + +#[derive(Clone, Debug, Serialize, Deserialize, Parser, Default)] +pub struct SetupKeyConfigTmp { + #[clap(long)] + pub download_key: Option, + #[clap(long)] + pub setup_key_path: Option, +} + +#[derive(Debug, Clone)] +pub struct ProofStorageFileBacked { + pub proof_store_dir: String, +} + +#[derive(Debug, Clone)] +pub struct ProofStorageGCS { + pub bucket_base_url: String, + pub credentials_file: String, +} + +#[derive(Debug, Clone)] +pub struct ProofStorageGCSCreateBucket { + pub bucket_name: String, + pub location: String, + pub project_id: String, + pub credentials_file: String, +} + +#[derive(Debug, Clone)] +#[allow(clippy::upper_case_acronyms)] +pub enum ProofStorageConfig { + FileBacked(ProofStorageFileBacked), + GCS(ProofStorageGCS), + GCSCreateBucket(ProofStorageGCSCreateBucket), +} + +#[derive(Debug, Clone)] +pub struct SetupKeyConfig { + pub download_key: bool, + pub setup_key_path: String, +} + +#[derive(Debug, Clone)] +pub struct ProverInitArgsFinal { + pub proof_store: ProofStorageConfig, + pub public_store: Option, + pub setup_key_config: SetupKeyConfig, + pub bellman_cuda_config: InitBellmanCudaArgs, +} + +impl ProverInitArgs { + pub(crate) fn fill_values_with_prompt( + &self, + shell: &Shell, + setup_key_path: &str, + ) -> anyhow::Result { + let proof_store = self.fill_proof_storage_values_with_prompt(shell)?; + let public_store = self.fill_public_storage_values_with_prompt(shell)?; + let setup_key_config = self.fill_setup_key_values_with_prompt(setup_key_path); + let bellman_cuda_config = self.fill_bellman_cuda_values_with_prompt()?; + Ok(ProverInitArgsFinal { + proof_store, + public_store, + setup_key_config, + bellman_cuda_config, + }) + } + + fn fill_proof_storage_values_with_prompt( + &self, + shell: &Shell, + ) -> anyhow::Result { + logger::info(MSG_GETTING_PROOF_STORE_CONFIG); + + if self.proof_store_dir.is_some() { + return Ok(self.handle_file_backed_config(self.proof_store_dir.clone())); + } + + if self.partial_gcs_config_provided( + self.proof_store_gcs_config.bucket_base_url.clone(), + self.proof_store_gcs_config.credentials_file.clone(), + ) { + return Ok(self.ask_gcs_config( + self.proof_store_gcs_config.bucket_base_url.clone(), + self.proof_store_gcs_config.credentials_file.clone(), + )); + } + + if self.partial_create_gcs_bucket_config_provided( + self.create_gcs_bucket_config.bucket_name.clone(), + self.create_gcs_bucket_config.location.clone(), + self.create_gcs_bucket_config.project_id.clone(), + ) { + let project_ids = get_project_ids(shell)?; + return Ok(self.handle_create_gcs_bucket( + project_ids, + self.create_gcs_bucket_config.project_id.clone(), + self.create_gcs_bucket_config.bucket_name.clone(), + self.create_gcs_bucket_config.location.clone(), + self.proof_store_gcs_config.credentials_file.clone(), + )); + } + + match PromptSelect::new(MSG_PROOF_STORE_CONFIG_PROMPT, ProofStoreConfig::iter()).ask() { + ProofStoreConfig::Local => { + Ok(self.handle_file_backed_config(self.proof_store_dir.clone())) + } + ProofStoreConfig::GCS => { + let project_ids = get_project_ids(shell)?; + Ok(self.handle_gcs_config( + project_ids, + self.proof_store_gcs_config.bucket_base_url.clone(), + self.proof_store_gcs_config.credentials_file.clone(), + )) + } + } + } + + fn fill_public_storage_values_with_prompt( + &self, + shell: &Shell, + ) -> anyhow::Result> { + logger::info(MSG_GETTING_PUBLIC_STORE_CONFIG); + let shall_save_to_public_bucket = self + .shall_save_to_public_bucket + .unwrap_or_else(|| PromptConfirm::new(MSG_SAVE_TO_PUBLIC_BUCKET_PROMPT).ask()); + + if !shall_save_to_public_bucket { + return Ok(None); + } + + if self.public_store_dir.is_some() { + return Ok(Some( + self.handle_file_backed_config(self.public_store_dir.clone()), + )); + } + + if self.partial_gcs_config_provided( + self.public_store_gcs_config.public_bucket_base_url.clone(), + self.public_store_gcs_config.public_credentials_file.clone(), + ) { + return Ok(Some(self.ask_gcs_config( + self.public_store_gcs_config.public_bucket_base_url.clone(), + self.public_store_gcs_config.public_credentials_file.clone(), + ))); + } + + if self.partial_create_gcs_bucket_config_provided( + self.public_create_gcs_bucket_config + .public_bucket_name + .clone(), + self.public_create_gcs_bucket_config.public_location.clone(), + self.public_create_gcs_bucket_config + .public_project_id + .clone(), + ) { + let project_ids = get_project_ids(shell)?; + return Ok(Some( + self.handle_create_gcs_bucket( + project_ids, + self.public_create_gcs_bucket_config + .public_project_id + .clone(), + self.public_create_gcs_bucket_config + .public_bucket_name + .clone(), + self.public_create_gcs_bucket_config.public_location.clone(), + self.public_store_gcs_config.public_credentials_file.clone(), + ), + )); + } + + match PromptSelect::new(MSG_PROOF_STORE_CONFIG_PROMPT, ProofStoreConfig::iter()).ask() { + ProofStoreConfig::Local => Ok(Some( + self.handle_file_backed_config(self.public_store_dir.clone()), + )), + ProofStoreConfig::GCS => { + let project_ids = get_project_ids(shell)?; + Ok(Some(self.handle_gcs_config( + project_ids, + self.public_store_gcs_config.public_bucket_base_url.clone(), + self.public_store_gcs_config.public_credentials_file.clone(), + ))) + } + } + } + + fn fill_setup_key_values_with_prompt(&self, setup_key_path: &str) -> SetupKeyConfig { + let download_key = self + .clone() + .setup_key_config + .download_key + .unwrap_or_else(|| PromptConfirm::new(MSG_DOWNLOAD_SETUP_KEY_PROMPT).ask()); + let setup_key_path = self + .clone() + .setup_key_config + .setup_key_path + .unwrap_or_else(|| { + Prompt::new(MSG_SETUP_KEY_PATH_PROMPT) + .default(setup_key_path) + .ask() + }); + + SetupKeyConfig { + download_key, + setup_key_path, + } + } + + fn partial_create_gcs_bucket_config_provided( + &self, + bucket_name: Option, + location: Option, + project_id: Option, + ) -> bool { + bucket_name.is_some() || location.is_some() || project_id.is_some() + } + + fn partial_gcs_config_provided( + &self, + bucket_base_url: Option, + credentials_file: Option, + ) -> bool { + bucket_base_url.is_some() || credentials_file.is_some() + } + + fn handle_file_backed_config(&self, store_dir: Option) -> ProofStorageConfig { + let proof_store_dir = store_dir.unwrap_or_else(|| { + Prompt::new(MSG_PROOF_STORE_DIR_PROMPT) + .default(DEFAULT_PROOF_STORE_DIR) + .ask() + }); + + ProofStorageConfig::FileBacked(ProofStorageFileBacked { proof_store_dir }) + } + + fn handle_gcs_config( + &self, + project_ids: Vec, + bucket_base_url: Option, + credentials_file: Option, + ) -> ProofStorageConfig { + if !self.partial_gcs_config_provided(bucket_base_url.clone(), credentials_file.clone()) + && PromptConfirm::new(MSG_CREATE_GCS_BUCKET_PROMPT).ask() + { + return self.handle_create_gcs_bucket(project_ids, None, None, None, None); + } + + self.ask_gcs_config(bucket_base_url, credentials_file) + } + + fn handle_create_gcs_bucket( + &self, + project_ids: Vec, + project_id: Option, + bucket_name: Option, + location: Option, + credentials_file: Option, + ) -> ProofStorageConfig { + let project_id = project_id.unwrap_or_else(|| { + if project_ids.is_empty() { + Prompt::new(MSG_CREATE_GCS_BUCKET_PROJECT_ID_NO_PROJECTS_PROMPT).ask() + } else { + PromptSelect::new(MSG_CREATE_GCS_BUCKET_PROJECT_ID_PROMPT, project_ids).ask() + } + }); + let bucket_name = + bucket_name.unwrap_or_else(|| Prompt::new(MSG_CREATE_GCS_BUCKET_NAME_PROMTP).ask()); + let location = + location.unwrap_or_else(|| Prompt::new(MSG_CREATE_GCS_BUCKET_LOCATION_PROMPT).ask()); + let credentials_file = credentials_file.unwrap_or_else(|| { + Prompt::new(MSG_PROOF_STORE_GCS_CREDENTIALS_FILE_PROMPT) + .default(DEFAULT_CREDENTIALS_FILE) + .ask() + }); + + ProofStorageConfig::GCSCreateBucket(ProofStorageGCSCreateBucket { + bucket_name, + location, + project_id, + credentials_file, + }) + } + + fn ask_gcs_config( + &self, + bucket_base_url: Option, + credentials_file: Option, + ) -> ProofStorageConfig { + let mut bucket_base_url = bucket_base_url + .unwrap_or_else(|| Prompt::new(MSG_PROOF_STORE_GCS_BUCKET_BASE_URL_PROMPT).ask()); + while !bucket_base_url.starts_with("gs://") { + logger::error(MSG_PROOF_STORE_GCS_BUCKET_BASE_URL_ERR); + bucket_base_url = Prompt::new(MSG_PROOF_STORE_GCS_BUCKET_BASE_URL_PROMPT).ask(); + } + let credentials_file = credentials_file.unwrap_or_else(|| { + Prompt::new(MSG_PROOF_STORE_GCS_CREDENTIALS_FILE_PROMPT) + .default(DEFAULT_CREDENTIALS_FILE) + .ask() + }); + + ProofStorageConfig::GCS(ProofStorageGCS { + bucket_base_url, + credentials_file, + }) + } + + fn fill_bellman_cuda_values_with_prompt(&self) -> anyhow::Result { + self.bellman_cuda_config.clone().fill_values_with_prompt() + } +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/args/init_bellman_cuda.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/args/init_bellman_cuda.rs new file mode 100644 index 000000000000..848457c53271 --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/args/init_bellman_cuda.rs @@ -0,0 +1,50 @@ +use clap::Parser; +use common::{Prompt, PromptSelect}; +use serde::{Deserialize, Serialize}; +use strum::{EnumIter, IntoEnumIterator}; + +use crate::messages::{ + MSG_BELLMAN_CUDA_DIR_PROMPT, MSG_BELLMAN_CUDA_ORIGIN_SELECT, MSG_BELLMAN_CUDA_SELECTION_CLONE, + MSG_BELLMAN_CUDA_SELECTION_PATH, +}; + +#[derive(Debug, Clone, Parser, Default, Serialize, Deserialize)] +pub struct InitBellmanCudaArgs { + #[clap(long)] + pub bellman_cuda_dir: Option, +} + +#[derive(Debug, Clone, EnumIter, PartialEq, Eq)] +enum BellmanCudaPathSelection { + Clone, + Path, +} + +impl std::fmt::Display for BellmanCudaPathSelection { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + BellmanCudaPathSelection::Clone => write!(f, "{MSG_BELLMAN_CUDA_SELECTION_CLONE}"), + BellmanCudaPathSelection::Path => write!(f, "{MSG_BELLMAN_CUDA_SELECTION_PATH}"), + } + } +} + +impl InitBellmanCudaArgs { + pub fn fill_values_with_prompt(self) -> anyhow::Result { + let bellman_cuda_dir = self.bellman_cuda_dir.unwrap_or_else(|| { + match PromptSelect::new( + MSG_BELLMAN_CUDA_ORIGIN_SELECT, + BellmanCudaPathSelection::iter(), + ) + .ask() + { + BellmanCudaPathSelection::Clone => "".to_string(), + BellmanCudaPathSelection::Path => Prompt::new(MSG_BELLMAN_CUDA_DIR_PROMPT).ask(), + } + }); + + Ok(InitBellmanCudaArgs { + bellman_cuda_dir: Some(bellman_cuda_dir), + }) + } +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/args/mod.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/args/mod.rs new file mode 100644 index 000000000000..66d97d75094c --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/args/mod.rs @@ -0,0 +1,3 @@ +pub mod init; +pub mod init_bellman_cuda; +pub mod run; diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/args/run.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/args/run.rs new file mode 100644 index 000000000000..678c548cea64 --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/args/run.rs @@ -0,0 +1,87 @@ +use clap::{Parser, ValueEnum}; +use common::PromptSelect; +use strum::{EnumIter, IntoEnumIterator}; + +use crate::messages::{MSG_ROUND_SELECT_PROMPT, MSG_RUN_COMPONENT_PROMPT}; + +#[derive(Debug, Clone, Parser, Default)] +pub struct ProverRunArgs { + #[clap(long)] + pub component: Option, + #[clap(flatten)] + pub witness_generator_args: WitnessGeneratorArgs, +} + +#[derive( + Debug, Clone, ValueEnum, strum::EnumString, EnumIter, PartialEq, Eq, Copy, strum_macros::Display, +)] +pub enum ProverComponent { + #[strum(to_string = "Gateway")] + Gateway, + #[strum(to_string = "Witness generator")] + WitnessGenerator, + #[strum(to_string = "Witness vector generator")] + WitnessVectorGenerator, + #[strum(to_string = "Prover")] + Prover, + #[strum(to_string = "Compressor")] + Compressor, +} + +#[derive(Debug, Clone, Parser, Default)] +pub struct WitnessGeneratorArgs { + #[clap(long)] + pub round: Option, +} + +#[derive( + Debug, Clone, ValueEnum, strum::EnumString, EnumIter, PartialEq, Eq, strum_macros::Display, +)] +pub enum WitnessGeneratorRound { + #[strum(to_string = "All rounds")] + AllRounds, + #[strum(to_string = "Basic circuits")] + BasicCircuits, + #[strum(to_string = "Leaf aggregation")] + LeafAggregation, + #[strum(to_string = "Node aggregation")] + NodeAggregation, + #[strum(to_string = "Recursion tip")] + RecursionTip, + #[strum(to_string = "Scheduler")] + Scheduler, +} + +impl ProverRunArgs { + pub fn fill_values_with_prompt(&self) -> anyhow::Result { + let component = self.component.unwrap_or_else(|| { + PromptSelect::new(MSG_RUN_COMPONENT_PROMPT, ProverComponent::iter()).ask() + }); + + let witness_generator_args = self + .witness_generator_args + .fill_values_with_prompt(component)?; + + Ok(ProverRunArgs { + component: Some(component), + witness_generator_args, + }) + } +} + +impl WitnessGeneratorArgs { + pub fn fill_values_with_prompt( + &self, + component: ProverComponent, + ) -> anyhow::Result { + if component != ProverComponent::WitnessGenerator { + return Ok(Self::default()); + } + + let round = self.round.clone().unwrap_or_else(|| { + PromptSelect::new(MSG_ROUND_SELECT_PROMPT, WitnessGeneratorRound::iter()).ask() + }); + + Ok(WitnessGeneratorArgs { round: Some(round) }) + } +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/gcs.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/gcs.rs new file mode 100644 index 000000000000..0c76cb10f542 --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/gcs.rs @@ -0,0 +1,54 @@ +use common::{cmd::Cmd, logger, spinner::Spinner}; +use xshell::{cmd, Shell}; +use zksync_config::{configs::object_store::ObjectStoreMode, ObjectStoreConfig}; + +use super::args::init::ProofStorageGCSCreateBucket; +use crate::{ + consts::PROVER_STORE_MAX_RETRIES, + messages::{ + msg_bucket_created, MSG_CREATING_GCS_BUCKET_SPINNER, MSG_GETTING_GCP_PROJECTS_SPINNER, + }, +}; + +pub(crate) fn create_gcs_bucket( + shell: &Shell, + config: ProofStorageGCSCreateBucket, +) -> anyhow::Result { + let bucket_name = config.bucket_name; + let location = config.location; + let project_id = config.project_id; + let cmd = Cmd::new(cmd!( + shell, + "gcloud storage buckets create gs://{bucket_name} --location={location} --project={project_id}" + )); + let spinner = Spinner::new(MSG_CREATING_GCS_BUCKET_SPINNER); + cmd.run()?; + spinner.finish(); + + logger::info(msg_bucket_created(&bucket_name)); + + Ok(ObjectStoreConfig { + mode: ObjectStoreMode::GCSWithCredentialFile { + bucket_base_url: format!("gs://{}", bucket_name), + gcs_credential_file_path: config.credentials_file, + }, + max_retries: PROVER_STORE_MAX_RETRIES, + local_mirror_path: None, + }) +} + +pub(crate) fn get_project_ids(shell: &Shell) -> anyhow::Result> { + let spinner = Spinner::new(MSG_GETTING_GCP_PROJECTS_SPINNER); + + let mut cmd = Cmd::new(cmd!( + shell, + "gcloud projects list --format='value(projectId)'" + )); + let output = cmd.run_with_output()?; + let project_ids: Vec = String::from_utf8(output.stdout)? + .lines() + .map(|line| line.to_string()) + .collect(); + spinner.finish(); + Ok(project_ids) +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/generate_sk.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/generate_sk.rs index a14dd6fb87e5..1657ab2c99fb 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/prover/generate_sk.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/generate_sk.rs @@ -1,5 +1,5 @@ use anyhow::Ok; -use common::{cmd::Cmd, logger, spinner::Spinner}; +use common::{check_prover_prequisites, cmd::Cmd, logger, spinner::Spinner}; use config::EcosystemConfig; use xshell::{cmd, Shell}; @@ -7,12 +7,14 @@ use super::utils::get_link_to_prover; use crate::messages::{MSG_GENERATING_SK_SPINNER, MSG_SK_GENERATED}; pub(crate) async fn run(shell: &Shell) -> anyhow::Result<()> { + check_prover_prequisites(shell); + let ecosystem_config = EcosystemConfig::from_file(shell)?; let link_to_prover = get_link_to_prover(&ecosystem_config); shell.change_dir(&link_to_prover); let spinner = Spinner::new(MSG_GENERATING_SK_SPINNER); - let mut cmd = Cmd::new(cmd!( + let cmd = Cmd::new(cmd!( shell, "cargo run --features gpu --release --bin key_generator -- generate-sk all --recompute-if-missing diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/init.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/init.rs new file mode 100644 index 000000000000..31785338bf3e --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/init.rs @@ -0,0 +1,139 @@ +use anyhow::Context; +use common::{check_prover_prequisites, cmd::Cmd, logger, spinner::Spinner}; +use config::EcosystemConfig; +use xshell::{cmd, Shell}; +use zksync_config::{ + configs::{object_store::ObjectStoreMode, GeneralConfig}, + ObjectStoreConfig, +}; + +use super::{ + args::init::{ProofStorageConfig, ProverInitArgs}, + gcs::create_gcs_bucket, + init_bellman_cuda::run as init_bellman_cuda, + utils::get_link_to_prover, +}; +use crate::{ + consts::PROVER_STORE_MAX_RETRIES, + messages::{ + MSG_CHAIN_NOT_FOUND_ERR, MSG_DOWNLOADING_SETUP_KEY_SPINNER, + MSG_GENERAL_CONFIG_NOT_FOUND_ERR, MSG_PROOF_COMPRESSOR_CONFIG_NOT_FOUND_ERR, + MSG_PROVER_CONFIG_NOT_FOUND_ERR, MSG_PROVER_INITIALIZED, MSG_SETUP_KEY_PATH_ERROR, + }, +}; + +pub(crate) async fn run(args: ProverInitArgs, shell: &Shell) -> anyhow::Result<()> { + check_prover_prequisites(shell); + let ecosystem_config = EcosystemConfig::from_file(shell)?; + let chain_config = ecosystem_config + .load_chain(Some(ecosystem_config.default_chain.clone())) + .context(MSG_CHAIN_NOT_FOUND_ERR)?; + let mut general_config = chain_config + .get_zksync_general_config() + .context(MSG_GENERAL_CONFIG_NOT_FOUND_ERR)?; + + let setup_key_path = get_default_setup_key_path(&ecosystem_config)?; + + let args = args.fill_values_with_prompt(shell, &setup_key_path)?; + + let proof_object_store_config = get_object_store_config(shell, Some(args.proof_store))?; + let public_object_store_config = get_object_store_config(shell, args.public_store)?; + + if args.setup_key_config.download_key { + download_setup_key( + shell, + &general_config, + &args.setup_key_config.setup_key_path, + )?; + } + + let mut prover_config = general_config + .prover_config + .expect(MSG_PROVER_CONFIG_NOT_FOUND_ERR); + prover_config + .prover_object_store + .clone_from(&proof_object_store_config); + if let Some(public_object_store_config) = public_object_store_config { + prover_config.shall_save_to_public_bucket = true; + prover_config.public_object_store = Some(public_object_store_config); + } else { + prover_config.shall_save_to_public_bucket = false; + } + general_config.prover_config = Some(prover_config); + + let mut proof_compressor_config = general_config + .proof_compressor_config + .expect(MSG_PROOF_COMPRESSOR_CONFIG_NOT_FOUND_ERR); + proof_compressor_config.universal_setup_path = args.setup_key_config.setup_key_path; + general_config.proof_compressor_config = Some(proof_compressor_config); + + chain_config.save_zksync_general_config(&general_config)?; + + init_bellman_cuda(shell, args.bellman_cuda_config).await?; + + logger::outro(MSG_PROVER_INITIALIZED); + Ok(()) +} + +fn download_setup_key( + shell: &Shell, + general_config: &GeneralConfig, + path: &str, +) -> anyhow::Result<()> { + let spinner = Spinner::new(MSG_DOWNLOADING_SETUP_KEY_SPINNER); + let compressor_config: zksync_config::configs::FriProofCompressorConfig = general_config + .proof_compressor_config + .as_ref() + .expect(MSG_PROOF_COMPRESSOR_CONFIG_NOT_FOUND_ERR) + .clone(); + let url = compressor_config.universal_setup_download_url; + let path = std::path::Path::new(path); + let parent = path.parent().expect(MSG_SETUP_KEY_PATH_ERROR); + let file_name = path.file_name().expect(MSG_SETUP_KEY_PATH_ERROR); + + Cmd::new(cmd!(shell, "wget {url} -P {parent}")).run()?; + + if file_name != "setup_2^24.key" { + Cmd::new(cmd!(shell, "mv {parent}/setup_2^24.key {path}")).run()?; + } + + spinner.finish(); + Ok(()) +} + +fn get_default_setup_key_path(ecosystem_config: &EcosystemConfig) -> anyhow::Result { + let link_to_prover = get_link_to_prover(ecosystem_config); + let path = link_to_prover.join("keys/setup/setup_2^24.key"); + let string = path.to_str().unwrap(); + + Ok(String::from(string)) +} + +fn get_object_store_config( + shell: &Shell, + config: Option, +) -> anyhow::Result> { + let object_store = match config { + Some(ProofStorageConfig::FileBacked(config)) => Some(ObjectStoreConfig { + mode: ObjectStoreMode::FileBacked { + file_backed_base_path: config.proof_store_dir, + }, + max_retries: PROVER_STORE_MAX_RETRIES, + local_mirror_path: None, + }), + Some(ProofStorageConfig::GCS(config)) => Some(ObjectStoreConfig { + mode: ObjectStoreMode::GCSWithCredentialFile { + bucket_base_url: config.bucket_base_url, + gcs_credential_file_path: config.credentials_file, + }, + max_retries: PROVER_STORE_MAX_RETRIES, + local_mirror_path: None, + }), + Some(ProofStorageConfig::GCSCreateBucket(config)) => { + Some(create_gcs_bucket(shell, config)?) + } + None => None, + }; + + Ok(object_store) +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/init_bellman_cuda.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/init_bellman_cuda.rs new file mode 100644 index 000000000000..c6c5d3ef23d9 --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/init_bellman_cuda.rs @@ -0,0 +1,62 @@ +use anyhow::Context; +use common::{check_prover_prequisites, cmd::Cmd, git, logger, spinner::Spinner}; +use config::{traits::SaveConfigWithBasePath, EcosystemConfig}; +use xshell::{cmd, Shell}; + +use super::args::init_bellman_cuda::InitBellmanCudaArgs; +use crate::{ + consts::BELLMAN_CUDA_DIR, + messages::{ + MSG_BELLMAN_CUDA_DIR_ERR, MSG_BELLMAN_CUDA_INITIALIZED, MSG_BUILDING_BELLMAN_CUDA_SPINNER, + MSG_CLONING_BELLMAN_CUDA_SPINNER, + }, +}; + +pub(crate) async fn run(shell: &Shell, args: InitBellmanCudaArgs) -> anyhow::Result<()> { + check_prover_prequisites(shell); + + let mut ecosystem_config = EcosystemConfig::from_file(shell)?; + + let args = args.fill_values_with_prompt()?; + + let bellman_cuda_dir = args.bellman_cuda_dir.unwrap_or("".to_string()); + let bellman_cuda_dir = if bellman_cuda_dir.is_empty() { + clone_bellman_cuda(shell)? + } else { + bellman_cuda_dir + }; + + ecosystem_config.bellman_cuda_dir = Some(bellman_cuda_dir.clone().into()); + + build_bellman_cuda(shell, &bellman_cuda_dir)?; + + ecosystem_config.save_with_base_path(shell, ".")?; + + logger::outro(MSG_BELLMAN_CUDA_INITIALIZED); + Ok(()) +} + +fn clone_bellman_cuda(shell: &Shell) -> anyhow::Result { + let spinner = Spinner::new(MSG_CLONING_BELLMAN_CUDA_SPINNER); + let path = git::clone( + shell, + shell.current_dir(), + "https://github.com/matter-labs/era-bellman-cuda", + BELLMAN_CUDA_DIR, + )?; + spinner.finish(); + + Ok(path.to_str().context(MSG_BELLMAN_CUDA_DIR_ERR)?.to_string()) +} + +fn build_bellman_cuda(shell: &Shell, bellman_cuda_dir: &str) -> anyhow::Result<()> { + let spinner = Spinner::new(MSG_BUILDING_BELLMAN_CUDA_SPINNER); + Cmd::new(cmd!( + shell, + "cmake -B{bellman_cuda_dir}/build -S{bellman_cuda_dir}/ -DCMAKE_BUILD_TYPE=Release" + )) + .run()?; + Cmd::new(cmd!(shell, "cmake --build {bellman_cuda_dir}/build")).run()?; + spinner.finish(); + Ok(()) +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/mod.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/mod.rs index c617b915a52c..31c3a02e3806 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/prover/mod.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/mod.rs @@ -1,16 +1,34 @@ +use args::{init::ProverInitArgs, init_bellman_cuda::InitBellmanCudaArgs, run::ProverRunArgs}; use clap::Subcommand; use xshell::Shell; + +mod args; +mod gcs; mod generate_sk; +mod init; +mod init_bellman_cuda; +mod run; mod utils; #[derive(Subcommand, Debug)] pub enum ProverCommands { /// Initialize prover + Init(Box), + /// Generate setup keys + #[command(alias = "sk")] GenerateSK, + /// Run prover + Run(ProverRunArgs), + /// Initialize bellman-cuda + #[command(alias = "cuda")] + InitBellmanCuda(Box), } pub(crate) async fn run(shell: &Shell, args: ProverCommands) -> anyhow::Result<()> { match args { + ProverCommands::Init(args) => init::run(*args, shell).await, ProverCommands::GenerateSK => generate_sk::run(shell).await, + ProverCommands::Run(args) => run::run(args, shell).await, + ProverCommands::InitBellmanCuda(args) => init_bellman_cuda::run(shell, *args).await, } } diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/run.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/run.rs new file mode 100644 index 000000000000..898cf0e45d66 --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/run.rs @@ -0,0 +1,121 @@ +use anyhow::Context; +use common::{check_prover_prequisites, cmd::Cmd, logger}; +use config::{ChainConfig, EcosystemConfig}; +use xshell::{cmd, Shell}; + +use super::{ + args::run::{ProverComponent, ProverRunArgs, WitnessGeneratorArgs, WitnessGeneratorRound}, + utils::get_link_to_prover, +}; +use crate::messages::{ + MSG_BELLMAN_CUDA_DIR_ERR, MSG_CHAIN_NOT_FOUND_ERR, MSG_MISSING_COMPONENT_ERR, + MSG_RUNNING_COMPRESSOR, MSG_RUNNING_COMPRESSOR_ERR, MSG_RUNNING_PROVER, MSG_RUNNING_PROVER_ERR, + MSG_RUNNING_PROVER_GATEWAY, MSG_RUNNING_PROVER_GATEWAY_ERR, MSG_RUNNING_WITNESS_GENERATOR, + MSG_RUNNING_WITNESS_GENERATOR_ERR, MSG_RUNNING_WITNESS_VECTOR_GENERATOR, + MSG_RUNNING_WITNESS_VECTOR_GENERATOR_ERR, MSG_WITNESS_GENERATOR_ROUND_ERR, +}; + +pub(crate) async fn run(args: ProverRunArgs, shell: &Shell) -> anyhow::Result<()> { + check_prover_prequisites(shell); + let args = args.fill_values_with_prompt()?; + let ecosystem_config = EcosystemConfig::from_file(shell)?; + let chain = ecosystem_config + .load_chain(Some(ecosystem_config.default_chain.clone())) + .expect(MSG_CHAIN_NOT_FOUND_ERR); + + let link_to_prover = get_link_to_prover(&ecosystem_config); + shell.change_dir(link_to_prover.clone()); + + match args.component { + Some(ProverComponent::Gateway) => run_gateway(shell, &chain)?, + Some(ProverComponent::WitnessGenerator) => { + run_witness_generator(shell, &chain, args.witness_generator_args)? + } + Some(ProverComponent::WitnessVectorGenerator) => { + run_witness_vector_generator(shell, &chain)? + } + Some(ProverComponent::Prover) => run_prover(shell, &chain)?, + Some(ProverComponent::Compressor) => run_compressor(shell, &chain, &ecosystem_config)?, + None => anyhow::bail!(MSG_MISSING_COMPONENT_ERR), + } + + Ok(()) +} + +fn run_gateway(shell: &Shell, chain: &ChainConfig) -> anyhow::Result<()> { + logger::info(MSG_RUNNING_PROVER_GATEWAY); + let config_path = chain.path_to_general_config(); + let secrets_path = chain.path_to_secrets_config(); + + let mut cmd = Cmd::new(cmd!(shell, "cargo run --release --bin zksync_prover_fri_gateway -- --config-path={config_path} --secrets-path={secrets_path}")); + cmd = cmd.with_force_run(); + cmd.run().context(MSG_RUNNING_PROVER_GATEWAY_ERR) +} + +fn run_witness_generator( + shell: &Shell, + chain: &ChainConfig, + args: WitnessGeneratorArgs, +) -> anyhow::Result<()> { + logger::info(MSG_RUNNING_WITNESS_GENERATOR); + let config_path = chain.path_to_general_config(); + let secrets_path = chain.path_to_secrets_config(); + let round = args.round.expect(MSG_WITNESS_GENERATOR_ROUND_ERR); + + let round_str = match round { + WitnessGeneratorRound::AllRounds => "--all_rounds", + WitnessGeneratorRound::BasicCircuits => "--round=basic_circuits", + WitnessGeneratorRound::LeafAggregation => "--round=leaf_aggregation", + WitnessGeneratorRound::NodeAggregation => "--round=node_aggregation", + WitnessGeneratorRound::RecursionTip => "--round=recursion_tip", + WitnessGeneratorRound::Scheduler => "--round=scheduler", + }; + + let mut cmd = Cmd::new(cmd!(shell, "cargo run --release --bin zksync_witness_generator -- {round_str} --config-path={config_path} --secrets-path={secrets_path}")); + cmd = cmd.with_force_run(); + cmd.run().context(MSG_RUNNING_WITNESS_GENERATOR_ERR) +} + +fn run_witness_vector_generator(shell: &Shell, chain: &ChainConfig) -> anyhow::Result<()> { + logger::info(MSG_RUNNING_WITNESS_VECTOR_GENERATOR); + let config_path = chain.path_to_general_config(); + let secrets_path = chain.path_to_secrets_config(); + + let mut cmd = Cmd::new(cmd!(shell, "cargo run --release --bin zksync_witness_vector_generator -- --config-path={config_path} --secrets-path={secrets_path}")); + cmd = cmd.with_force_run(); + cmd.run().context(MSG_RUNNING_WITNESS_VECTOR_GENERATOR_ERR) +} + +fn run_prover(shell: &Shell, chain: &ChainConfig) -> anyhow::Result<()> { + logger::info(MSG_RUNNING_PROVER); + let config_path = chain.path_to_general_config(); + let secrets_path = chain.path_to_secrets_config(); + + let mut cmd = Cmd::new( + cmd!(shell, "cargo run --features gpu --release --bin zksync_prover_fri -- --config-path={config_path} --secrets-path={secrets_path}"), + ); + cmd = cmd.with_force_run(); + cmd.run().context(MSG_RUNNING_PROVER_ERR) +} + +fn run_compressor( + shell: &Shell, + chain: &ChainConfig, + ecosystem: &EcosystemConfig, +) -> anyhow::Result<()> { + logger::info(MSG_RUNNING_COMPRESSOR); + let config_path = chain.path_to_general_config(); + let secrets_path = chain.path_to_secrets_config(); + + shell.set_var( + "BELLMAN_CUDA_DIR", + ecosystem + .bellman_cuda_dir + .clone() + .expect(MSG_BELLMAN_CUDA_DIR_ERR), + ); + + let mut cmd = Cmd::new(cmd!(shell, "cargo run --features gpu --release --bin zksync_proof_fri_compressor -- --config-path={config_path} --secrets-path={secrets_path}")); + cmd = cmd.with_force_run(); + cmd.run().context(MSG_RUNNING_COMPRESSOR_ERR) +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/server.rs b/zk_toolbox/crates/zk_inception/src/commands/server.rs index e2d35dd9b792..b5a09ed04370 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/server.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/server.rs @@ -1,12 +1,18 @@ use anyhow::Context; -use common::{cmd::Cmd, config::global_config, logger, spinner::Spinner}; -use config::{ChainConfig, EcosystemConfig}; -use xshell::{cmd, Shell}; +use common::{ + config::global_config, + logger, + server::{Server, ServerMode}, +}; +use config::{ + traits::FileConfigWithDefaultName, ChainConfig, ContractsConfig, EcosystemConfig, + GeneralConfig, GenesisConfig, SecretsConfig, WalletsConfig, +}; +use xshell::Shell; use crate::{ commands::args::RunServerArgs, - messages::{MSG_BUILDING_L1_CONTRACTS, MSG_CHAIN_NOT_INITIALIZED, MSG_STARTING_SERVER}, - server::{RunServer, ServerMode}, + messages::{MSG_CHAIN_NOT_INITIALIZED, MSG_FAILED_TO_RUN_SERVER_ERR, MSG_STARTING_SERVER}, }; pub fn run(shell: &Shell, args: RunServerArgs) -> anyhow::Result<()> { @@ -19,30 +25,38 @@ pub fn run(shell: &Shell, args: RunServerArgs) -> anyhow::Result<()> { logger::info(MSG_STARTING_SERVER); - build_l1_contracts(shell, &ecosystem_config)?; run_server(args, &chain_config, shell)?; Ok(()) } -fn build_l1_contracts(shell: &Shell, ecosystem_config: &EcosystemConfig) -> anyhow::Result<()> { - let _dir_guard = shell.push_dir(ecosystem_config.path_to_foundry()); - let spinner = Spinner::new(MSG_BUILDING_L1_CONTRACTS); - Cmd::new(cmd!(shell, "yarn build")).run()?; - spinner.finish(); - Ok(()) -} - fn run_server( args: RunServerArgs, chain_config: &ChainConfig, shell: &Shell, ) -> anyhow::Result<()> { - let server = RunServer::new(args.components.clone(), chain_config); + let server = Server::new(args.components.clone(), chain_config.link_to_code.clone()); + + if args.build { + server.build(shell)?; + return Ok(()); + } + let mode = if args.genesis { ServerMode::Genesis } else { ServerMode::Normal }; - server.run(shell, mode) + server + .run( + shell, + mode, + GenesisConfig::get_path_with_base_path(&chain_config.configs), + WalletsConfig::get_path_with_base_path(&chain_config.configs), + GeneralConfig::get_path_with_base_path(&chain_config.configs), + SecretsConfig::get_path_with_base_path(&chain_config.configs), + ContractsConfig::get_path_with_base_path(&chain_config.configs), + vec![], + ) + .context(MSG_FAILED_TO_RUN_SERVER_ERR) } diff --git a/zk_toolbox/crates/zk_inception/src/config_manipulations.rs b/zk_toolbox/crates/zk_inception/src/config_manipulations.rs index a300a15e76c6..e69de29bb2d1 100644 --- a/zk_toolbox/crates/zk_inception/src/config_manipulations.rs +++ b/zk_toolbox/crates/zk_inception/src/config_manipulations.rs @@ -1,97 +0,0 @@ -use common::db::DatabaseConfig; -use config::{ - forge_interface::{ - initialize_bridges::output::InitializeBridgeOutput, paymaster::DeployPaymasterOutput, - register_chain::output::RegisterChainOutput, - }, - traits::{ReadConfigWithBasePath, SaveConfigWithBasePath}, - ChainConfig, ContractsConfig, GeneralConfig, GenesisConfig, SecretsConfig, -}; -use types::ProverMode; -use xshell::Shell; - -use crate::defaults::{ROCKS_DB_STATE_KEEPER, ROCKS_DB_TREE}; - -pub(crate) fn update_genesis(shell: &Shell, config: &ChainConfig) -> anyhow::Result<()> { - let mut genesis = GenesisConfig::read_with_base_path(shell, &config.configs)?; - - genesis.l2_chain_id = config.chain_id; - genesis.l1_chain_id = config.l1_network.chain_id(); - genesis.l1_batch_commit_data_generator_mode = Some(config.l1_batch_commit_data_generator_mode); - - genesis.save_with_base_path(shell, &config.configs)?; - Ok(()) -} - -pub(crate) fn update_database_secrets( - shell: &Shell, - config: &ChainConfig, - server_db_config: &DatabaseConfig, - prover_db_config: &DatabaseConfig, -) -> anyhow::Result<()> { - let mut secrets = SecretsConfig::read_with_base_path(shell, &config.configs)?; - secrets.database.server_url = server_db_config.full_url(); - secrets.database.prover_url = prover_db_config.full_url(); - secrets.save_with_base_path(shell, &config.configs)?; - Ok(()) -} - -pub(crate) fn update_l1_rpc_url_secret( - shell: &Shell, - config: &ChainConfig, - l1_rpc_url: String, -) -> anyhow::Result<()> { - let mut secrets = SecretsConfig::read_with_base_path(shell, &config.configs)?; - secrets.l1.l1_rpc_url = l1_rpc_url; - secrets.save_with_base_path(shell, &config.configs)?; - Ok(()) -} - -pub(crate) fn update_general_config(shell: &Shell, config: &ChainConfig) -> anyhow::Result<()> { - let mut general = GeneralConfig::read_with_base_path(shell, &config.configs)?; - general.db.state_keeper_db_path = - shell.create_dir(config.rocks_db_path.join(ROCKS_DB_STATE_KEEPER))?; - general.db.merkle_tree.path = shell.create_dir(config.rocks_db_path.join(ROCKS_DB_TREE))?; - if config.prover_version != ProverMode::NoProofs { - general.eth.sender.proof_sending_mode = "ONLY_REAL_PROOFS".to_string(); - } - general.save_with_base_path(shell, &config.configs)?; - Ok(()) -} - -pub fn update_l1_contracts( - shell: &Shell, - config: &ChainConfig, - register_chain_output: &RegisterChainOutput, -) -> anyhow::Result { - let mut contracts_config = ContractsConfig::read_with_base_path(shell, &config.configs)?; - contracts_config.l1.diamond_proxy_addr = register_chain_output.diamond_proxy_addr; - contracts_config.l1.governance_addr = register_chain_output.governance_addr; - contracts_config.save_with_base_path(shell, &config.configs)?; - Ok(contracts_config) -} - -pub fn update_l2_shared_bridge( - shell: &Shell, - config: &ChainConfig, - initialize_bridges_output: &InitializeBridgeOutput, -) -> anyhow::Result<()> { - let mut contracts_config = ContractsConfig::read_with_base_path(shell, &config.configs)?; - contracts_config.bridges.shared.l2_address = - Some(initialize_bridges_output.l2_shared_bridge_proxy); - contracts_config.bridges.erc20.l2_address = - Some(initialize_bridges_output.l2_shared_bridge_proxy); - contracts_config.save_with_base_path(shell, &config.configs)?; - Ok(()) -} - -pub fn update_paymaster( - shell: &Shell, - config: &ChainConfig, - paymaster_output: &DeployPaymasterOutput, -) -> anyhow::Result<()> { - let mut contracts_config = ContractsConfig::read_with_base_path(shell, &config.configs)?; - contracts_config.l2.testnet_paymaster_addr = paymaster_output.paymaster; - contracts_config.save_with_base_path(shell, &config.configs)?; - Ok(()) -} diff --git a/zk_toolbox/crates/zk_inception/src/consts.rs b/zk_toolbox/crates/zk_inception/src/consts.rs index a59024d09b40..d9b61d49185a 100644 --- a/zk_toolbox/crates/zk_inception/src/consts.rs +++ b/zk_toolbox/crates/zk_inception/src/consts.rs @@ -1,3 +1,9 @@ pub const AMOUNT_FOR_DISTRIBUTION_TO_WALLETS: u128 = 1000000000000000000000; pub const MINIMUM_BALANCE_FOR_WALLET: u128 = 5000000000000000000; +pub const SERVER_MIGRATIONS: &str = "core/lib/dal/migrations"; +pub const PROVER_MIGRATIONS: &str = "prover/crates/lib/prover_dal/migrations"; +pub const PROVER_STORE_MAX_RETRIES: u16 = 10; +pub const DEFAULT_CREDENTIALS_FILE: &str = "~/.config/gcloud/application_default_credentials.json"; +pub const DEFAULT_PROOF_STORE_DIR: &str = "artifacts"; +pub const BELLMAN_CUDA_DIR: &str = "era-bellman-cuda"; diff --git a/zk_toolbox/crates/zk_inception/src/defaults.rs b/zk_toolbox/crates/zk_inception/src/defaults.rs index 04b735e02275..40be1293614b 100644 --- a/zk_toolbox/crates/zk_inception/src/defaults.rs +++ b/zk_toolbox/crates/zk_inception/src/defaults.rs @@ -9,8 +9,10 @@ lazy_static! { Url::parse("postgres://postgres:notsecurepassword@localhost:5432").unwrap(); } -pub const ROCKS_DB_STATE_KEEPER: &str = "main/state_keeper"; -pub const ROCKS_DB_TREE: &str = "main/tree"; +pub const ROCKS_DB_STATE_KEEPER: &str = "state_keeper"; +pub const ROCKS_DB_TREE: &str = "tree"; +pub const EN_ROCKS_DB_PREFIX: &str = "en"; +pub const MAIN_ROCKS_DB_PREFIX: &str = "main"; pub const L2_CHAIN_ID: u32 = 271; /// Path to base chain configuration inside zksync-era @@ -36,3 +38,11 @@ pub fn generate_db_names(config: &ChainConfig) -> DBNames { ), } } + +pub fn generate_external_node_db_name(config: &ChainConfig) -> String { + format!( + "external_node_{}_{}", + config.l1_network.to_string().to_ascii_lowercase(), + config.name + ) +} diff --git a/zk_toolbox/crates/zk_inception/src/external_node.rs b/zk_toolbox/crates/zk_inception/src/external_node.rs new file mode 100644 index 000000000000..0770fa8b14cd --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/external_node.rs @@ -0,0 +1,77 @@ +use std::path::PathBuf; + +use anyhow::Context; +use common::cmd::Cmd; +use config::{ + external_node::ENConfig, traits::FileConfigWithDefaultName, ChainConfig, GeneralConfig, + SecretsConfig, +}; +use xshell::{cmd, Shell}; + +use crate::messages::MSG_FAILED_TO_RUN_SERVER_ERR; + +pub struct RunExternalNode { + components: Option>, + code_path: PathBuf, + general_config: PathBuf, + secrets: PathBuf, + en_config: PathBuf, +} + +impl RunExternalNode { + pub fn new( + components: Option>, + chain_config: &ChainConfig, + ) -> anyhow::Result { + let en_path = chain_config + .external_node_config_path + .clone() + .context("External node is not initialized")?; + let general_config = GeneralConfig::get_path_with_base_path(&en_path); + let secrets = SecretsConfig::get_path_with_base_path(&en_path); + let enconfig = ENConfig::get_path_with_base_path(&en_path); + + Ok(Self { + components, + code_path: chain_config.link_to_code.clone(), + general_config, + secrets, + en_config: enconfig, + }) + } + + pub fn run(&self, shell: &Shell, mut additional_args: Vec) -> anyhow::Result<()> { + shell.change_dir(&self.code_path); + let config_general_config = &self.general_config.to_str().unwrap(); + let en_config = &self.en_config.to_str().unwrap(); + let secrets = &self.secrets.to_str().unwrap(); + if let Some(components) = self.components() { + additional_args.push(format!("--components={}", components)) + } + let cmd = Cmd::new( + cmd!( + shell, + "cargo run --release --bin zksync_external_node -- + --config-path {config_general_config} + --secrets-path {secrets} + --external-node-config-path {en_config} + " + ) + .args(additional_args) + .env_remove("RUSTUP_TOOLCHAIN"), + ) + .with_force_run(); + + cmd.run().context(MSG_FAILED_TO_RUN_SERVER_ERR)?; + Ok(()) + } + + fn components(&self) -> Option { + self.components.as_ref().and_then(|components| { + if components.is_empty() { + return None; + } + Some(components.join(",")) + }) + } +} diff --git a/zk_toolbox/crates/zk_inception/src/main.rs b/zk_toolbox/crates/zk_inception/src/main.rs index dff9e479e01f..dd10e9494627 100644 --- a/zk_toolbox/crates/zk_inception/src/main.rs +++ b/zk_toolbox/crates/zk_inception/src/main.rs @@ -1,24 +1,26 @@ use clap::{command, Parser, Subcommand}; +use commands::contract_verifier::ContractVerifierCommands; use common::{ - check_prerequisites, + check_general_prerequisites, config::{global_config, init_global_config, GlobalConfig}, + error::log_error, init_prompt_theme, logger, }; use config::EcosystemConfig; use xshell::Shell; use crate::commands::{ - args::RunServerArgs, chain::ChainCommands, ecosystem::EcosystemCommands, prover::ProverCommands, + args::RunServerArgs, chain::ChainCommands, ecosystem::EcosystemCommands, + external_node::ExternalNodeCommands, prover::ProverCommands, }; pub mod accept_ownership; mod commands; -mod config_manipulations; mod consts; mod defaults; -pub mod forge_utils; +pub mod external_node; mod messages; -pub mod server; +mod utils; #[derive(Parser, Debug)] #[command(version, about)] @@ -32,18 +34,25 @@ struct Inception { #[derive(Subcommand, Debug)] pub enum InceptionSubcommands { /// Ecosystem related commands - #[command(subcommand)] + #[command(subcommand, alias = "e")] Ecosystem(EcosystemCommands), /// Chain related commands - #[command(subcommand)] + #[command(subcommand, alias = "c")] Chain(ChainCommands), /// Prover related commands - #[command(subcommand)] + #[command(subcommand, alias = "p")] Prover(ProverCommands), /// Run server Server(RunServerArgs), + // Run External Node + #[command(subcommand, alias = "en")] + ExternalNode(ExternalNodeCommands), /// Run containers for local development + #[command(alias = "up")] Containers, + /// Run contract verifier + #[command(subcommand)] + ContractVerifier(ContractVerifierCommands), } #[derive(Parser, Debug)] @@ -75,27 +84,13 @@ async fn main() -> anyhow::Result<()> { init_global_config_inner(&shell, &inception_args.global)?; if !global_config().ignore_prerequisites { - check_prerequisites(&shell); + check_general_prerequisites(&shell); } match run_subcommand(inception_args, &shell).await { Ok(_) => {} - Err(e) => { - logger::error(e.to_string()); - - if e.chain().count() > 1 { - logger::error_note( - "Caused by:", - &e.chain() - .skip(1) - .enumerate() - .map(|(i, cause)| format!(" {i}: {}", cause)) - .collect::>() - .join("\n"), - ); - } - - logger::outro("Failed"); + Err(error) => { + log_error(error); std::process::exit(1); } } @@ -109,6 +104,12 @@ async fn run_subcommand(inception_args: Inception, shell: &Shell) -> anyhow::Res InceptionSubcommands::Prover(args) => commands::prover::run(shell, args).await?, InceptionSubcommands::Server(args) => commands::server::run(shell, args)?, InceptionSubcommands::Containers => commands::containers::run(shell)?, + InceptionSubcommands::ExternalNode(args) => { + commands::external_node::run(shell, args).await? + } + InceptionSubcommands::ContractVerifier(args) => { + commands::contract_verifier::run(shell, args).await? + } } Ok(()) } diff --git a/zk_toolbox/crates/zk_inception/src/messages.rs b/zk_toolbox/crates/zk_inception/src/messages.rs index 1b3c05258753..a33143b4bd66 100644 --- a/zk_toolbox/crates/zk_inception/src/messages.rs +++ b/zk_toolbox/crates/zk_inception/src/messages.rs @@ -1,3 +1,5 @@ +use std::path::Path; + use ethers::{ types::{H160, U256}, utils::format_ether, @@ -13,14 +15,18 @@ pub(super) const MSG_L1_NETWORK_HELP: &str = "L1 Network"; pub(super) const MSG_LINK_TO_CODE_HELP: &str = "Code link"; pub(super) const MSG_START_CONTAINERS_HELP: &str = "Start reth and postgres containers after creation"; -pub(super) const MSG_ECOSYSTEM_NAME_PROMPT: &str = "How do you want to name the ecosystem?"; +pub(super) const MSG_ECOSYSTEM_NAME_PROMPT: &str = "What do you want to name the ecosystem?"; pub(super) const MSG_REPOSITORY_ORIGIN_PROMPT: &str = "Select the origin of zksync-era repository"; pub(super) const MSG_LINK_TO_CODE_PROMPT: &str = "Where's the code located?"; pub(super) const MSG_L1_NETWORK_PROMPT: &str = "Select the L1 network"; pub(super) const MSG_START_CONTAINERS_PROMPT: &str = "Do you want to start containers after creating the ecosystem?"; pub(super) const MSG_CREATING_ECOSYSTEM: &str = "Creating ecosystem"; -pub(super) const MSG_CREATED_ECOSYSTEM: &str = "Ecosystem created successfully"; + +pub fn msg_created_ecosystem(name: &str) -> String { + format!("Ecosystem {name} created successfully (All subsequent commands should be executed from ecosystem folder `cd {name}`)") +} + pub(super) const MSG_CLONING_ERA_REPO_SPINNER: &str = "Cloning zksync-era repository..."; pub(super) const MSG_CREATING_INITIAL_CONFIGURATIONS_SPINNER: &str = "Creating initial configurations..."; @@ -30,10 +36,19 @@ pub(super) const MSG_ECOSYSTEM_ALREADY_EXISTS_ERR: &str = "Ecosystem already exi pub(super) const MSG_ECOSYSTEM_CONFIG_INVALID_ERR: &str = "Invalid ecosystem configuration"; pub(super) const MSG_LINK_TO_CODE_SELECTION_CLONE: &str = "Clone for me (recommended)"; pub(super) const MSG_LINK_TO_CODE_SELECTION_PATH: &str = "I have the code already"; +pub(super) const MSG_NOT_MAIN_REPO_OR_FORK_ERR: &str = + "It's not a zkSync Era main repository or fork"; +pub(super) const MSG_CONFIRM_STILL_USE_FOLDER: &str = "Do you still want to use this folder?"; + +pub(super) fn msg_path_to_zksync_does_not_exist_err(path: &str) -> String { + format!("Path to zkSync Era repo does not exist: {path:?}") +} /// Ecosystem and chain init related messages pub(super) const MSG_L1_RPC_URL_HELP: &str = "L1 RPC URL"; pub(super) const MSG_GENESIS_ARGS_HELP: &str = "Genesis options"; +pub(super) const MSG_DEV_ARG_HELP: &str = + "Deploy ecosystem using all defaults. Suitable for local development"; pub(super) const MSG_DEPLOY_ECOSYSTEM_PROMPT: &str = "Do you want to deploy ecosystem contracts? (Not needed if you already have an existing one)"; pub(super) const MSG_L1_RPC_URL_PROMPT: &str = "What is the RPC URL of the L1 network?"; @@ -43,7 +58,6 @@ pub(super) const MSG_ECOSYSTEM_CONTRACTS_PATH_PROMPT: &str = "Provide the path t pub(super) const MSG_L1_RPC_URL_INVALID_ERR: &str = "Invalid RPC URL"; pub(super) const MSG_ECOSYSTEM_CONTRACTS_PATH_INVALID_ERR: &str = "Invalid path"; pub(super) const MSG_GENESIS_DATABASE_ERR: &str = "Unable to perform genesis on the database"; -pub(super) const MSG_CONTRACTS_CONFIG_NOT_FOUND_ERR: &str = "Ecosystem contracts config not found"; pub(super) const MSG_CHAIN_NOT_FOUND_ERR: &str = "Chain not found"; pub(super) const MSG_INITIALIZING_ECOSYSTEM: &str = "Initializing ecosystem"; pub(super) const MSG_DEPLOYING_ERC20: &str = "Deploying ERC20 contracts"; @@ -55,6 +69,7 @@ pub(super) const MSG_DEPLOYING_ECOSYSTEM_CONTRACTS_SPINNER: &str = "Deploying ecosystem contracts..."; pub(super) const MSG_REGISTERING_CHAIN_SPINNER: &str = "Registering chain..."; pub(super) const MSG_ACCEPTING_ADMIN_SPINNER: &str = "Accepting admin..."; +pub(super) const MSG_RECREATE_ROCKS_DB_ERRROR: &str = "Failed to create rocks db path"; pub(super) fn msg_initializing_chain(chain_name: &str) -> String { format!("Initializing chain {chain_name}") @@ -65,7 +80,7 @@ pub(super) fn msg_ecosystem_initialized(chains: &str) -> String { } /// Ecosystem default related messages -pub(super) const MSG_DEFAULT_CHAIN_PROMPT: &str = "What chain you want to set as default?"; +pub(super) const MSG_DEFAULT_CHAIN_PROMPT: &str = "What chain do you want to set as default?"; /// Ecosystem config related messages pub(super) const MSG_SAVE_INITIAL_CONFIG_ATTENTION: &str = @@ -118,7 +133,7 @@ pub(super) const MSG_SERVER_DB_URL_HELP: &str = "Server database url without dat pub(super) const MSG_SERVER_DB_NAME_HELP: &str = "Server database name"; pub(super) const MSG_PROVER_DB_URL_HELP: &str = "Prover database url without database name"; pub(super) const MSG_PROVER_DB_NAME_HELP: &str = "Prover database name"; -pub(super) const MSG_GENESIS_USE_DEFAULT_HELP: &str = "Use default database urls and names"; +pub(super) const MSG_USE_DEFAULT_DATABASES_HELP: &str = "Use default database urls and names"; pub(super) const MSG_GENESIS_COMPLETED: &str = "Genesis completed successfully"; pub(super) const MSG_STARTING_GENESIS: &str = "Starting genesis process"; pub(super) const MSG_INITIALIZING_DATABASES_SPINNER: &str = "Initializing databases..."; @@ -133,6 +148,10 @@ pub(super) fn msg_server_db_url_prompt(chain_name: &str) -> String { format!("Please provide server database url for chain {chain_name}") } +pub(super) fn msg_external_node_db_url_prompt(chain_name: &str) -> String { + format!("Please provide external_node database url for chain {chain_name}") +} + pub(super) fn msg_prover_db_url_prompt(chain_name: &str) -> String { format!("Please provide prover database url for chain {chain_name}") } @@ -141,6 +160,10 @@ pub(super) fn msg_prover_db_name_prompt(chain_name: &str) -> String { format!("Please provide prover database name for chain {chain_name}") } +pub(super) fn msg_external_node_db_name_prompt(chain_name: &str) -> String { + format!("Please provide external_node database name for chain {chain_name}") +} + pub(super) fn msg_server_db_name_prompt(chain_name: &str) -> String { format!("Please provide server database name for chain {chain_name}") } @@ -156,6 +179,7 @@ pub(super) const MSG_SERVER_COMPONENTS_HELP: &str = "Components of server to run pub(super) const MSG_SERVER_GENESIS_HELP: &str = "Run server in genesis mode"; pub(super) const MSG_SERVER_ADDITIONAL_ARGS_HELP: &str = "Additional arguments that can be passed through the CLI"; +pub(super) const MSG_SERVER_BUILD_HELP: &str = "Build server but don't run it"; /// Accept ownership related messages pub(super) const MSG_ACCEPTING_GOVERNANCE_SPINNER: &str = "Accepting governance..."; @@ -172,7 +196,7 @@ pub(super) const MSG_FAILED_TO_FIND_ECOSYSTEM_ERR: &str = "Failed to find ecosys /// Server related messages pub(super) const MSG_STARTING_SERVER: &str = "Starting server"; pub(super) const MSG_FAILED_TO_RUN_SERVER_ERR: &str = "Failed to start server"; -pub(super) const MSG_BUILDING_L1_CONTRACTS: &str = "Building L1 contracts..."; +pub(super) const MSG_PREPARING_EN_CONFIGS: &str = "Preparing External Node config"; /// Forge utils related messages pub(super) const MSG_DEPLOYER_PK_NOT_SET_ERR: &str = "Deployer private key is not set"; @@ -189,6 +213,107 @@ pub(super) fn msg_address_doesnt_have_enough_money_prompt( ) } +pub(super) fn msg_preparing_en_config_is_done(path: &Path) -> String { + format!("External nodes configs could be found in: {path:?}") +} + +pub(super) const MSG_EXTERNAL_NODE_CONFIG_NOT_INITIALIZED: &str = + "External node is not initialized"; + +pub(super) const MSG_STARTING_EN: &str = "Starting external node"; + /// Prover related messages pub(super) const MSG_GENERATING_SK_SPINNER: &str = "Generating setup keys..."; pub(super) const MSG_SK_GENERATED: &str = "Setup keys generated successfully"; +pub(super) const MSG_MISSING_COMPONENT_ERR: &str = "Missing component"; +pub(super) const MSG_RUNNING_PROVER_GATEWAY: &str = "Running gateway"; +pub(super) const MSG_RUNNING_WITNESS_GENERATOR: &str = "Running witness generator"; +pub(super) const MSG_RUNNING_WITNESS_VECTOR_GENERATOR: &str = "Running witness vector generator"; +pub(super) const MSG_RUNNING_PROVER: &str = "Running prover"; +pub(super) const MSG_RUNNING_COMPRESSOR: &str = "Running compressor"; +pub(super) const MSG_RUN_COMPONENT_PROMPT: &str = "What component do you want to run?"; +pub(super) const MSG_RUNNING_PROVER_GATEWAY_ERR: &str = "Failed to run prover gateway"; +pub(super) const MSG_RUNNING_WITNESS_GENERATOR_ERR: &str = "Failed to run witness generator"; +pub(super) const MSG_RUNNING_WITNESS_VECTOR_GENERATOR_ERR: &str = + "Failed to run witness vector generator"; +pub(super) const MSG_RUNNING_COMPRESSOR_ERR: &str = "Failed to run compressor"; +pub(super) const MSG_RUNNING_PROVER_ERR: &str = "Failed to run prover"; +pub(super) const MSG_PROOF_STORE_CONFIG_PROMPT: &str = + "Select where you would like to store the proofs"; +pub(super) const MSG_PROOF_STORE_DIR_PROMPT: &str = + "Provide the path where you would like to store the proofs:"; +pub(super) const MSG_PROOF_STORE_GCS_BUCKET_BASE_URL_PROMPT: &str = + "Provide the base URL of the GCS bucket (e.g., gs://bucket-name):"; +pub(super) const MSG_PROOF_STORE_GCS_BUCKET_BASE_URL_ERR: &str = + "Bucket base URL should start with gs://"; +pub(super) const MSG_PROOF_STORE_GCS_CREDENTIALS_FILE_PROMPT: &str = + "Provide the path to the GCS credentials file:"; +pub(super) const MSG_GENERAL_CONFIG_NOT_FOUND_ERR: &str = "General config not found"; +pub(super) const MSG_PROVER_CONFIG_NOT_FOUND_ERR: &str = "Prover config not found"; +pub(super) const MSG_PROVER_INITIALIZED: &str = "Prover has been initialized successfully"; +pub(super) const MSG_CREATE_GCS_BUCKET_PROMPT: &str = "Do you want to create a new GCS bucket?"; +pub(super) const MSG_CREATE_GCS_BUCKET_PROJECT_ID_PROMPT: &str = "Select the project ID:"; +pub(super) const MSG_CREATE_GCS_BUCKET_PROJECT_ID_NO_PROJECTS_PROMPT: &str = + "Provide a project ID:"; +pub(super) const MSG_CREATE_GCS_BUCKET_NAME_PROMTP: &str = "What do you want to name the bucket?"; +pub(super) const MSG_CREATE_GCS_BUCKET_LOCATION_PROMPT: &str = "What location do you want to use? Find available locations at https://cloud.google.com/storage/docs/locations"; +pub(super) const MSG_PROOF_COMPRESSOR_CONFIG_NOT_FOUND_ERR: &str = + "Proof compressor config not found"; +pub(super) const MSG_DOWNLOADING_SETUP_KEY_SPINNER: &str = "Downloading setup key..."; +pub(super) const MSG_DOWNLOAD_SETUP_KEY_PROMPT: &str = "Do you want to download the setup key?"; +pub(super) const MSG_SETUP_KEY_PATH_PROMPT: &str = "Provide the path to the setup key:"; +pub(super) const MSG_GETTING_GCP_PROJECTS_SPINNER: &str = "Getting GCP projects..."; +pub(super) const MSG_GETTING_PROOF_STORE_CONFIG: &str = "Getting proof store configuration..."; +pub(super) const MSG_GETTING_PUBLIC_STORE_CONFIG: &str = "Getting public store configuration..."; +pub(super) const MSG_CREATING_GCS_BUCKET_SPINNER: &str = "Creating GCS bucket..."; +pub(super) const MSG_SAVE_TO_PUBLIC_BUCKET_PROMPT: &str = "Do you want to save to public bucket?"; +pub(super) const MSG_ROUND_SELECT_PROMPT: &str = "Select the round to run"; +pub(super) const MSG_WITNESS_GENERATOR_ROUND_ERR: &str = "Witness generator round not found"; +pub(super) const MSG_SETUP_KEY_PATH_ERROR: &str = "Failed to get setup key path"; +pub(super) const MSG_CLONING_BELLMAN_CUDA_SPINNER: &str = "Cloning bellman-cuda..."; +pub(super) const MSG_BUILDING_BELLMAN_CUDA_SPINNER: &str = "Building bellman-cuda..."; +pub(super) const MSG_BELLMAN_CUDA_DIR_ERR: &str = "Failed to get bellman-cuda directory"; +pub(super) const MSG_BELLMAN_CUDA_DIR_PROMPT: &str = + "Provide the path to the bellman-cuda directory:"; +pub(super) const MSG_BELLMAN_CUDA_INITIALIZED: &str = + "bellman-cuda has been initialized successfully"; +pub(super) const MSG_BELLMAN_CUDA_ORIGIN_SELECT: &str = + "Select the origin of bellman-cuda repository"; +pub(super) const MSG_BELLMAN_CUDA_SELECTION_CLONE: &str = "Clone for me (recommended)"; +pub(super) const MSG_BELLMAN_CUDA_SELECTION_PATH: &str = "I have the code already"; + +pub(super) fn msg_bucket_created(bucket_name: &str) -> String { + format!("Bucket created successfully with url: gs://{bucket_name}") +} + +/// Contract verifier related messages +pub(super) const MSG_RUNNING_CONTRACT_VERIFIER: &str = "Running contract verifier"; +pub(super) const MSG_FAILED_TO_RUN_CONTRACT_VERIFIER_ERR: &str = "Failed to run contract verifier"; +pub(super) const MSG_INVALID_ARCH_ERR: &str = "Invalid arch"; +pub(super) const MSG_GET_ZKSOLC_RELEASES_ERR: &str = "Failed to get zksolc releases"; +pub(super) const MSG_FETCHING_ZKSOLC_RELEASES_SPINNER: &str = "Fetching zksolc releases..."; +pub(super) const MSG_FETCHING_ZKVYPER_RELEASES_SPINNER: &str = "Fetching zkvyper releases..."; +pub(super) const MSG_FETCH_SOLC_RELEASES_SPINNER: &str = "Fetching solc releases..."; +pub(super) const MSG_FETCHING_VYPER_RELEASES_SPINNER: &str = "Fetching vyper releases..."; +pub(super) const MSG_ZKSOLC_VERSION_PROMPT: &str = "Select the minimal zksolc version:"; +pub(super) const MSG_ZKVYPER_VERSION_PROMPT: &str = "Select the minimal zkvyper version:"; +pub(super) const MSG_SOLC_VERSION_PROMPT: &str = "Select the minimal solc version:"; +pub(super) const MSG_VYPER_VERSION_PROMPT: &str = "Select the minimal vyper version:"; +pub(super) const MSG_NO_RELEASES_FOUND_ERR: &str = "No releases found for current architecture"; +pub(super) const MSG_NO_VERSION_FOUND_ERR: &str = "No version found"; +pub(super) const MSG_ARCH_NOT_SUPPORTED_ERR: &str = "Architecture not supported"; +pub(super) const MSG_OS_NOT_SUPPORTED_ERR: &str = "OS not supported"; +pub(super) const MSG_GET_VYPER_RELEASES_ERR: &str = "Failed to get vyper releases"; +pub(super) const MSG_GET_SOLC_RELEASES_ERR: &str = "Failed to get solc releases"; +pub(super) const MSG_GET_ZKVYPER_RELEASES_ERR: &str = "Failed to get zkvyper releases"; + +pub(super) fn msg_binary_already_exists(name: &str, version: &str) -> String { + format!( + "{} {} binary already exists. Skipping download.", + name, version + ) +} + +pub(super) fn msg_downloading_binary_spinner(name: &str, version: &str) -> String { + format!("Downloading {} {} binary", name, version) +} diff --git a/zk_toolbox/crates/zk_inception/src/server.rs b/zk_toolbox/crates/zk_inception/src/server.rs deleted file mode 100644 index 6773d224cba3..000000000000 --- a/zk_toolbox/crates/zk_inception/src/server.rs +++ /dev/null @@ -1,95 +0,0 @@ -use std::path::PathBuf; - -use anyhow::Context; -use common::cmd::Cmd; -use config::{ - traits::FileConfigWithDefaultName, ChainConfig, ContractsConfig, GeneralConfig, GenesisConfig, - SecretsConfig, WalletsConfig, -}; -use xshell::{cmd, Shell}; - -use crate::messages::MSG_FAILED_TO_RUN_SERVER_ERR; - -pub struct RunServer { - components: Option>, - code_path: PathBuf, - wallets: PathBuf, - contracts: PathBuf, - general_config: PathBuf, - genesis: PathBuf, - secrets: PathBuf, -} - -pub enum ServerMode { - Normal, - Genesis, -} - -impl RunServer { - pub fn new(components: Option>, chain_config: &ChainConfig) -> Self { - let wallets = WalletsConfig::get_path_with_base_path(&chain_config.configs); - let general_config = GeneralConfig::get_path_with_base_path(&chain_config.configs); - let genesis = GenesisConfig::get_path_with_base_path(&chain_config.configs); - let contracts = ContractsConfig::get_path_with_base_path(&chain_config.configs); - let secrets = SecretsConfig::get_path_with_base_path(&chain_config.configs); - - Self { - components, - code_path: chain_config.link_to_code.clone(), - wallets, - contracts, - general_config, - genesis, - secrets, - } - } - - pub fn run(&self, shell: &Shell, server_mode: ServerMode) -> anyhow::Result<()> { - shell.change_dir(&self.code_path); - let config_genesis = &self.genesis.to_str().unwrap(); - let config_wallets = &self.wallets.to_str().unwrap(); - let config_general_config = &self.general_config.to_str().unwrap(); - let config_contracts = &self.contracts.to_str().unwrap(); - let secrets = &self.secrets.to_str().unwrap(); - let mut additional_args = vec![]; - if let Some(components) = self.components() { - additional_args.push(format!("--components={}", components)) - } - if let ServerMode::Genesis = server_mode { - additional_args.push("--genesis".to_string()); - } - - let mut cmd = Cmd::new( - cmd!( - shell, - "cargo run --release --bin zksync_server -- - --genesis-path {config_genesis} - --wallets-path {config_wallets} - --config-path {config_general_config} - --secrets-path {secrets} - --contracts-config-path {config_contracts} - " - ) - .args(additional_args) - .env_remove("RUSTUP_TOOLCHAIN"), - ); - - // If we are running server in normal mode - // we need to get the output to the console - if let ServerMode::Normal = server_mode { - cmd = cmd.with_force_run(); - } - - cmd.run().context(MSG_FAILED_TO_RUN_SERVER_ERR)?; - Ok(()) - } - - fn components(&self) -> Option { - self.components.as_ref().and_then(|components| { - if components.is_empty() { - return None; - } - Some(components.join(",")) - }) - } -} diff --git a/zk_toolbox/crates/zk_inception/src/forge_utils.rs b/zk_toolbox/crates/zk_inception/src/utils/forge.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/forge_utils.rs rename to zk_toolbox/crates/zk_inception/src/utils/forge.rs diff --git a/zk_toolbox/crates/zk_inception/src/utils/mod.rs b/zk_toolbox/crates/zk_inception/src/utils/mod.rs new file mode 100644 index 000000000000..a84f0a336de5 --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/utils/mod.rs @@ -0,0 +1,2 @@ +pub mod forge; +pub mod rocks_db; diff --git a/zk_toolbox/crates/zk_inception/src/utils/rocks_db.rs b/zk_toolbox/crates/zk_inception/src/utils/rocks_db.rs new file mode 100644 index 000000000000..fc80aca100bc --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/utils/rocks_db.rs @@ -0,0 +1,39 @@ +use std::path::Path; + +use config::RocksDbs; +use xshell::Shell; + +use crate::defaults::{ + EN_ROCKS_DB_PREFIX, MAIN_ROCKS_DB_PREFIX, ROCKS_DB_STATE_KEEPER, ROCKS_DB_TREE, +}; + +pub enum RocksDBDirOption { + Main, + ExternalNode, +} + +impl RocksDBDirOption { + pub fn prefix(&self) -> &str { + match self { + RocksDBDirOption::Main => MAIN_ROCKS_DB_PREFIX, + RocksDBDirOption::ExternalNode => EN_ROCKS_DB_PREFIX, + } + } +} + +pub fn recreate_rocksdb_dirs( + shell: &Shell, + rocks_db_path: &Path, + option: RocksDBDirOption, +) -> anyhow::Result { + let state_keeper = rocks_db_path + .join(option.prefix()) + .join(ROCKS_DB_STATE_KEEPER); + shell.remove_path(&state_keeper)?; + let merkle_tree = rocks_db_path.join(option.prefix()).join(ROCKS_DB_TREE); + shell.remove_path(&merkle_tree)?; + Ok(RocksDbs { + state_keeper: shell.create_dir(state_keeper)?, + merkle_tree: shell.create_dir(merkle_tree)?, + }) +} diff --git a/zk_toolbox/crates/zk_supervisor/Cargo.toml b/zk_toolbox/crates/zk_supervisor/Cargo.toml index 79d2bac74905..d8f5d7862a04 100644 --- a/zk_toolbox/crates/zk_supervisor/Cargo.toml +++ b/zk_toolbox/crates/zk_supervisor/Cargo.toml @@ -21,3 +21,4 @@ strum_macros.workspace = true tokio.workspace = true url.workspace = true xshell.workspace = true +serde.workspace = true diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/clean/mod.rs b/zk_toolbox/crates/zk_supervisor/src/commands/clean/mod.rs new file mode 100644 index 000000000000..0c5d2f52682a --- /dev/null +++ b/zk_toolbox/crates/zk_supervisor/src/commands/clean/mod.rs @@ -0,0 +1,73 @@ +use anyhow::Context; +use clap::Subcommand; +use common::{docker, logger}; +use config::{EcosystemConfig, DOCKER_COMPOSE_FILE}; +use xshell::Shell; + +use crate::messages::{ + MSG_CONTRACTS_CLEANING, MSG_CONTRACTS_CLEANING_FINISHED, MSG_DOCKER_COMPOSE_CLEANED, + MSG_DOCKER_COMPOSE_DOWN, MSG_DOCKER_COMPOSE_REMOVE_VOLUMES, +}; + +#[derive(Subcommand, Debug)] +pub enum CleanCommands { + All, + Containers, + ContractsCache, +} + +pub fn run(shell: &Shell, args: CleanCommands) -> anyhow::Result<()> { + let ecosystem = EcosystemConfig::from_file(shell)?; + match args { + CleanCommands::All => { + containers(shell)?; + contracts(shell, &ecosystem)?; + } + CleanCommands::Containers => containers(shell)?, + CleanCommands::ContractsCache => contracts(shell, &ecosystem)?, + } + Ok(()) +} + +pub fn containers(shell: &Shell) -> anyhow::Result<()> { + logger::info(MSG_DOCKER_COMPOSE_DOWN); + docker::down(shell, DOCKER_COMPOSE_FILE)?; + logger::info(MSG_DOCKER_COMPOSE_REMOVE_VOLUMES); + shell.remove_path("volumes")?; + logger::info(MSG_DOCKER_COMPOSE_CLEANED); + Ok(()) +} + +pub fn contracts(shell: &Shell, ecosystem_config: &EcosystemConfig) -> anyhow::Result<()> { + let path_to_foundry = ecosystem_config.path_to_foundry(); + logger::info(MSG_CONTRACTS_CLEANING); + shell + .remove_path(path_to_foundry.join("artifacts")) + .context("artifacts")?; + shell + .remove_path(path_to_foundry.join("cache")) + .context("cache")?; + shell + .remove_path(path_to_foundry.join("cache-forge")) + .context("cache-forge")?; + shell + .remove_path(path_to_foundry.join("out")) + .context("out")?; + shell + .remove_path(path_to_foundry.join("typechain")) + .context("typechain")?; + shell + .remove_path(path_to_foundry.join("script-config")) + .context("remove script-config")?; + shell + .create_dir(path_to_foundry.join("script-config")) + .context("create script-config")?; + shell + .remove_path(path_to_foundry.join("script-out")) + .context("remove script-out")?; + shell + .create_dir(path_to_foundry.join("script-out")) + .context("create script-out")?; + logger::info(MSG_CONTRACTS_CLEANING_FINISHED); + Ok(()) +} diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/mod.rs b/zk_toolbox/crates/zk_supervisor/src/commands/mod.rs index 98d4cdfe990d..b2c6df6a4864 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/mod.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/mod.rs @@ -1,2 +1,3 @@ +pub mod clean; pub mod database; -pub mod integration_tests; +pub mod test; diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/args/integration.rs b/zk_toolbox/crates/zk_supervisor/src/commands/test/args/integration.rs new file mode 100644 index 000000000000..a41ccf3d48df --- /dev/null +++ b/zk_toolbox/crates/zk_supervisor/src/commands/test/args/integration.rs @@ -0,0 +1,10 @@ +use clap::Parser; +use serde::{Deserialize, Serialize}; + +use crate::messages::MSG_TESTS_EXTERNAL_NODE_HELP; + +#[derive(Debug, Serialize, Deserialize, Parser)] +pub struct IntegrationArgs { + #[clap(short, long, help = MSG_TESTS_EXTERNAL_NODE_HELP)] + pub external_node: bool, +} diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/args/mod.rs b/zk_toolbox/crates/zk_supervisor/src/commands/test/args/mod.rs new file mode 100644 index 000000000000..6a00b2152bdd --- /dev/null +++ b/zk_toolbox/crates/zk_supervisor/src/commands/test/args/mod.rs @@ -0,0 +1,2 @@ +pub mod integration; +pub mod revert; diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/args/revert.rs b/zk_toolbox/crates/zk_supervisor/src/commands/test/args/revert.rs new file mode 100644 index 000000000000..e4305b6796c2 --- /dev/null +++ b/zk_toolbox/crates/zk_supervisor/src/commands/test/args/revert.rs @@ -0,0 +1,11 @@ +use clap::Parser; + +use crate::messages::{MSG_REVERT_TEST_ENABLE_CONSENSUS_HELP, MSG_TESTS_EXTERNAL_NODE_HELP}; + +#[derive(Debug, Parser)] +pub struct RevertArgs { + #[clap(long, help = MSG_REVERT_TEST_ENABLE_CONSENSUS_HELP)] + pub enable_consensus: bool, + #[clap(short, long, help = MSG_TESTS_EXTERNAL_NODE_HELP)] + pub external_node: bool, +} diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/integration_tests.rs b/zk_toolbox/crates/zk_supervisor/src/commands/test/integration.rs similarity index 61% rename from zk_toolbox/crates/zk_supervisor/src/commands/integration_tests.rs rename to zk_toolbox/crates/zk_supervisor/src/commands/test/integration.rs index c5b1229dd2ce..f44559fe4e07 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/integration_tests.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/test/integration.rs @@ -1,30 +1,39 @@ -use common::{cmd::Cmd, logger, spinner::Spinner}; +use common::{cmd::Cmd, config::global_config, logger, spinner::Spinner}; use config::EcosystemConfig; use xshell::{cmd, Shell}; +use super::args::integration::IntegrationArgs; use crate::messages::{ - MSG_INTEGRATION_TESTS_BUILDING_CONTRACTS, MSG_INTEGRATION_TESTS_BUILDING_DEPENDENCIES, - MSG_INTEGRATION_TESTS_RUN_INFO, MSG_INTEGRATION_TESTS_RUN_SUCCESS, + msg_integration_tests_run, MSG_INTEGRATION_TESTS_BUILDING_CONTRACTS, + MSG_INTEGRATION_TESTS_BUILDING_DEPENDENCIES, MSG_INTEGRATION_TESTS_RUN_SUCCESS, }; const TS_INTEGRATION_PATH: &str = "core/tests/ts-integration"; const CONTRACTS_TEST_DATA_PATH: &str = "etc/contracts-test-data"; -pub fn run(shell: &Shell) -> anyhow::Result<()> { +pub fn run(shell: &Shell, args: IntegrationArgs) -> anyhow::Result<()> { let ecosystem_config = EcosystemConfig::from_file(shell)?; shell.change_dir(ecosystem_config.link_to_code.join(TS_INTEGRATION_PATH)); - logger::info(MSG_INTEGRATION_TESTS_RUN_INFO); + logger::info(msg_integration_tests_run(args.external_node)); build_repository(shell, &ecosystem_config)?; build_test_contracts(shell, &ecosystem_config)?; - Cmd::new( - cmd!(shell, "yarn jest --forceExit --testTimeout 60000") - .env("CHAIN_NAME", ecosystem_config.default_chain), - ) - .with_force_run() - .run()?; + let mut command = cmd!(shell, "yarn jest --forceExit --testTimeout 60000") + .env("CHAIN_NAME", ecosystem_config.default_chain); + + if args.external_node { + command = command.env("EXTERNAL_NODE", format!("{:?}", args.external_node)) + } + if global_config().verbose { + command = command.env( + "ZKSYNC_DEBUG_LOGS", + format!("{:?}", global_config().verbose), + ) + } + + Cmd::new(command).with_force_run().run()?; logger::outro(MSG_INTEGRATION_TESTS_RUN_SUCCESS); diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/mod.rs b/zk_toolbox/crates/zk_supervisor/src/commands/test/mod.rs new file mode 100644 index 000000000000..857190dba3b0 --- /dev/null +++ b/zk_toolbox/crates/zk_supervisor/src/commands/test/mod.rs @@ -0,0 +1,24 @@ +use args::{integration::IntegrationArgs, revert::RevertArgs}; +use clap::Subcommand; +use xshell::Shell; + +use crate::messages::{MSG_INTEGRATION_TESTS_ABOUT, MSG_REVERT_TEST_ABOUT}; + +mod args; +mod integration; +mod revert; + +#[derive(Subcommand, Debug)] +pub enum TestCommands { + #[clap(about = MSG_INTEGRATION_TESTS_ABOUT, alias = "i")] + Integration(IntegrationArgs), + #[clap(about = MSG_REVERT_TEST_ABOUT, alias = "r")] + Revert(RevertArgs), +} + +pub fn run(shell: &Shell, args: TestCommands) -> anyhow::Result<()> { + match args { + TestCommands::Integration(args) => integration::run(shell, args), + TestCommands::Revert(args) => revert::run(shell, args), + } +} diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/revert.rs b/zk_toolbox/crates/zk_supervisor/src/commands/test/revert.rs new file mode 100644 index 000000000000..eead83303eed --- /dev/null +++ b/zk_toolbox/crates/zk_supervisor/src/commands/test/revert.rs @@ -0,0 +1,58 @@ +use common::{cmd::Cmd, logger, spinner::Spinner}; +use config::EcosystemConfig; +use xshell::{cmd, Shell}; + +use super::args::revert::RevertArgs; +use crate::messages::{ + msg_revert_tests_run, MSG_REVERT_TEST_INSTALLING_DEPENDENCIES, MSG_REVERT_TEST_RUN_INFO, + MSG_REVERT_TEST_RUN_SUCCESS, +}; + +const REVERT_TESTS_PATH: &str = "core/tests/revert-test"; + +pub fn run(shell: &Shell, args: RevertArgs) -> anyhow::Result<()> { + let ecosystem_config = EcosystemConfig::from_file(shell)?; + shell.change_dir(ecosystem_config.link_to_code.join(REVERT_TESTS_PATH)); + + logger::info(MSG_REVERT_TEST_RUN_INFO); + install_and_build_dependencies(shell, &ecosystem_config)?; + run_test(shell, &args, &ecosystem_config)?; + logger::outro(MSG_REVERT_TEST_RUN_SUCCESS); + + Ok(()) +} + +fn install_and_build_dependencies( + shell: &Shell, + ecosystem_config: &EcosystemConfig, +) -> anyhow::Result<()> { + let _dir_guard = shell.push_dir(&ecosystem_config.link_to_code); + let spinner = Spinner::new(MSG_REVERT_TEST_INSTALLING_DEPENDENCIES); + Cmd::new(cmd!(shell, "yarn install")).run()?; + Cmd::new(cmd!(shell, "yarn utils build")).run()?; + + spinner.finish(); + Ok(()) +} + +fn run_test( + shell: &Shell, + args: &RevertArgs, + ecosystem_config: &EcosystemConfig, +) -> anyhow::Result<()> { + Spinner::new(&msg_revert_tests_run(args.external_node)).freeze(); + + let cmd = if args.external_node { + cmd!(shell, "yarn mocha tests/revert-and-restart-en.test.ts") + } else { + cmd!(shell, "yarn mocha tests/revert-and-restart.test.ts") + }; + + let mut cmd = Cmd::new(cmd).env("CHAIN_NAME", &ecosystem_config.default_chain); + if args.enable_consensus { + cmd = cmd.env("ENABLE_CONSENSUS", "true"); + } + cmd.with_force_run().run()?; + + Ok(()) +} diff --git a/zk_toolbox/crates/zk_supervisor/src/dals.rs b/zk_toolbox/crates/zk_supervisor/src/dals.rs index f2f6f86cfc61..854a6b979494 100644 --- a/zk_toolbox/crates/zk_supervisor/src/dals.rs +++ b/zk_toolbox/crates/zk_supervisor/src/dals.rs @@ -1,13 +1,13 @@ -use anyhow::anyhow; +use anyhow::{anyhow, Context}; use common::config::global_config; use config::{EcosystemConfig, SecretsConfig}; use url::Url; use xshell::Shell; -use crate::messages::MSG_CHAIN_NOT_FOUND_ERR; +use crate::messages::{MSG_CHAIN_NOT_FOUND_ERR, MSG_PROVER_URL_MUST_BE_PRESENTED}; const CORE_DAL_PATH: &str = "core/lib/dal"; -const PROVER_DAL_PATH: &str = "prover/prover_dal"; +const PROVER_DAL_PATH: &str = "prover/crates/lib/prover_dal"; #[derive(Debug, Clone)] pub struct SelectedDals { @@ -46,7 +46,11 @@ pub fn get_prover_dal(shell: &Shell) -> anyhow::Result { Ok(Dal { path: PROVER_DAL_PATH.to_string(), - url: secrets.database.prover_url.clone(), + url: secrets + .database + .prover_url + .context(MSG_PROVER_URL_MUST_BE_PRESENTED)? + .clone(), }) } diff --git a/zk_toolbox/crates/zk_supervisor/src/main.rs b/zk_toolbox/crates/zk_supervisor/src/main.rs index ab5629465a88..d6cc82c0994d 100644 --- a/zk_toolbox/crates/zk_supervisor/src/main.rs +++ b/zk_toolbox/crates/zk_supervisor/src/main.rs @@ -1,17 +1,20 @@ use clap::{Parser, Subcommand}; -use commands::database::DatabaseCommands; +use commands::{database::DatabaseCommands, test::TestCommands}; use common::{ - check_prerequisites, + check_general_prerequisites, config::{global_config, init_global_config, GlobalConfig}, + error::log_error, init_prompt_theme, logger, }; use config::EcosystemConfig; use messages::{ - msg_global_chain_does_not_exist, MSG_SUBCOMMAND_DATABASE_ABOUT, - MSG_SUBCOMMAND_INTEGRATION_TESTS_ABOUT, + msg_global_chain_does_not_exist, MSG_SUBCOMMAND_CLEAN, MSG_SUBCOMMAND_DATABASE_ABOUT, + MSG_SUBCOMMAND_TESTS_ABOUT, }; use xshell::Shell; +use crate::commands::clean::CleanCommands; + mod commands; mod dals; mod messages; @@ -27,10 +30,12 @@ struct Supervisor { #[derive(Subcommand, Debug)] enum SupervisorSubcommands { - #[command(subcommand, about = MSG_SUBCOMMAND_DATABASE_ABOUT)] + #[command(subcommand, about = MSG_SUBCOMMAND_DATABASE_ABOUT, alias = "db")] Database(DatabaseCommands), - #[command(about = MSG_SUBCOMMAND_INTEGRATION_TESTS_ABOUT)] - IntegrationTests, + #[command(subcommand, about = MSG_SUBCOMMAND_TESTS_ABOUT, alias = "t")] + Test(TestCommands), + #[command(subcommand, about = MSG_SUBCOMMAND_CLEAN)] + Clean(CleanCommands), } #[derive(Parser, Debug)] @@ -62,27 +67,13 @@ async fn main() -> anyhow::Result<()> { init_global_config_inner(&shell, &args.global)?; if !global_config().ignore_prerequisites { - check_prerequisites(&shell); + check_general_prerequisites(&shell); } match run_subcommand(args, &shell).await { Ok(_) => {} - Err(e) => { - logger::error(e.to_string()); - - if e.chain().count() > 1 { - logger::error_note( - "Caused by:", - &e.chain() - .skip(1) - .enumerate() - .map(|(i, cause)| format!(" {i}: {}", cause)) - .collect::>() - .join("\n"), - ); - } - - logger::outro("Failed"); + Err(error) => { + log_error(error); std::process::exit(1); } } @@ -93,7 +84,8 @@ async fn main() -> anyhow::Result<()> { async fn run_subcommand(args: Supervisor, shell: &Shell) -> anyhow::Result<()> { match args.command { SupervisorSubcommands::Database(command) => commands::database::run(shell, command).await?, - SupervisorSubcommands::IntegrationTests => commands::integration_tests::run(shell)?, + SupervisorSubcommands::Test(command) => commands::test::run(shell, command)?, + SupervisorSubcommands::Clean(command) => commands::clean::run(shell, command)?, } Ok(()) } diff --git a/zk_toolbox/crates/zk_supervisor/src/messages.rs b/zk_toolbox/crates/zk_supervisor/src/messages.rs index 31bdb0eb9b1d..863f1c4b1aef 100644 --- a/zk_toolbox/crates/zk_supervisor/src/messages.rs +++ b/zk_toolbox/crates/zk_supervisor/src/messages.rs @@ -1,21 +1,26 @@ // Ecosystem related messages pub(super) const MSG_CHAIN_NOT_FOUND_ERR: &str = "Chain not found"; + pub(super) fn msg_global_chain_does_not_exist(chain: &str, available_chains: &str) -> String { format!("Chain with name {chain} doesnt exist, please choose one of: {available_chains}") } // Subcommands help pub(super) const MSG_SUBCOMMAND_DATABASE_ABOUT: &str = "Database related commands"; -pub(super) const MSG_SUBCOMMAND_INTEGRATION_TESTS_ABOUT: &str = "Run integration tests"; +pub(super) const MSG_SUBCOMMAND_TESTS_ABOUT: &str = "Run tests"; +pub(super) const MSG_SUBCOMMAND_CLEAN: &str = "Clean artifacts"; // Database related messages pub(super) const MSG_NO_DATABASES_SELECTED: &str = "No databases selected"; + pub(super) fn msg_database_info(gerund_verb: &str) -> String { format!("{gerund_verb} databases") } + pub(super) fn msg_database_success(past_verb: &str) -> String { format!("Databases {past_verb} successfully") } + pub(super) fn msg_database_loading(gerund_verb: &str, dal: &str) -> String { format!("{gerund_verb} database for dal {dal}...") } @@ -33,6 +38,8 @@ pub(super) const MSG_DATABASE_RESET_PAST: &str = "reset"; pub(super) const MSG_DATABASE_SETUP_GERUND: &str = "Setting up"; pub(super) const MSG_DATABASE_SETUP_PAST: &str = "set up"; +pub(super) const MSG_PROVER_URL_MUST_BE_PRESENTED: &str = "Prover url must be presented"; + pub(super) const MSG_DATABASE_COMMON_PROVER_HELP: &str = "Prover database"; pub(super) const MSG_DATABASE_COMMON_CORE_HELP: &str = "Core database"; pub(super) const MSG_DATABASE_NEW_MIGRATION_DATABASE_HELP: &str = @@ -57,14 +64,54 @@ pub(super) const MSG_DATABASE_NEW_MIGRATION_DB_PROMPT: &str = "What database do you want to create a new migration for?"; pub(super) const MSG_DATABASE_NEW_MIGRATION_NAME_PROMPT: &str = "How do you want to name the migration?"; + pub(super) fn msg_database_new_migration_loading(dal: &str) -> String { format!("Creating new database migration for dal {}...", dal) } + pub(super) const MSG_DATABASE_NEW_MIGRATION_SUCCESS: &str = "Migration created successfully"; +// Tests related messages +pub(super) const MSG_INTEGRATION_TESTS_ABOUT: &str = "Run integration tests"; +pub(super) const MSG_REVERT_TEST_ABOUT: &str = "Run revert tests"; +pub(super) const MSG_TESTS_EXTERNAL_NODE_HELP: &str = "Run tests for external node"; + // Integration tests related messages -pub(super) const MSG_INTEGRATION_TESTS_RUN_INFO: &str = "Running integration tests"; +pub(super) fn msg_integration_tests_run(external_node: bool) -> String { + let base = "Running integration tests"; + if external_node { + format!("{} for external node", base) + } else { + format!("{} for main server", base) + } +} + pub(super) const MSG_INTEGRATION_TESTS_RUN_SUCCESS: &str = "Integration tests ran successfully"; pub(super) const MSG_INTEGRATION_TESTS_BUILDING_DEPENDENCIES: &str = "Building repository dependencies..."; pub(super) const MSG_INTEGRATION_TESTS_BUILDING_CONTRACTS: &str = "Building test contracts..."; + +// Revert tests related messages +pub(super) const MSG_REVERT_TEST_ENABLE_CONSENSUS_HELP: &str = "Enable consensus"; +pub(super) const MSG_REVERT_TEST_INSTALLING_DEPENDENCIES: &str = + "Building and installing dependencies. This process may take a lot of time..."; +pub(super) const MSG_REVERT_TEST_RUN_INFO: &str = "Running revert and restart test"; +pub(super) fn msg_revert_tests_run(external_node: bool) -> String { + let base = "Running integration tests"; + if external_node { + format!("{} for external node", base) + } else { + format!("{} for main server", base) + } +} + +pub(super) const MSG_REVERT_TEST_RUN_SUCCESS: &str = "Revert and restart test ran successfully"; + +// Cleaning related messages +pub(super) const MSG_DOCKER_COMPOSE_DOWN: &str = "docker compose down"; +pub(super) const MSG_DOCKER_COMPOSE_REMOVE_VOLUMES: &str = "docker compose remove volumes"; +pub(super) const MSG_DOCKER_COMPOSE_CLEANED: &str = "docker compose network cleaned"; +pub(super) const MSG_CONTRACTS_CLEANING: &str = + "Removing contracts building and deployment artifacts"; +pub(super) const MSG_CONTRACTS_CLEANING_FINISHED: &str = + "Contracts building and deployment artifacts are cleaned up";