From adfddfc1c81ab05072282e96df96d43280d82c7e Mon Sep 17 00:00:00 2001 From: Yury Akudovich Date: Fri, 29 Sep 2023 15:53:50 +0200 Subject: [PATCH 01/18] chore: Adds release workflows (#106) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # What ❔ Adds staging release workflows. Adds post-merge prometheus statistic. Moves from unsupported actions-rs/toolchain to actions-rust-lang/setup-rust-toolchain in ci-prover-reusable.yml. Adds devops as CODEOWNERS for Github workflows ## Why ❔ To create staging releases from this repo. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --- .github/workflows/build-docker-from-tag.yml | 70 +++++++++++++ .../workflows/build-external-node-docker.yml | 51 ++++++++++ .github/workflows/build-gar-reusable.yml | 81 ++++++++++++++++ .github/workflows/build-local-node-docker.yml | 51 ++++++++++ .../workflows/build-prover-fri-gpu-gar.yml | 46 +++++++++ .github/workflows/ci-prover-reusable.yml | 2 +- .github/workflows/ci.yml | 17 ---- .github/workflows/release-test-stage.yml | 97 +++++++++++++++++++ .github/workflows/vm-perf-to-prometheus.yml | 39 ++++++++ CODEOWNERS | 1 + 10 files changed, 437 insertions(+), 18 deletions(-) create mode 100644 .github/workflows/build-docker-from-tag.yml create mode 100644 .github/workflows/build-external-node-docker.yml create mode 100644 .github/workflows/build-gar-reusable.yml create mode 100644 .github/workflows/build-local-node-docker.yml create mode 100644 .github/workflows/build-prover-fri-gpu-gar.yml create mode 100644 .github/workflows/release-test-stage.yml create mode 100644 .github/workflows/vm-perf-to-prometheus.yml diff --git a/.github/workflows/build-docker-from-tag.yml b/.github/workflows/build-docker-from-tag.yml new file mode 100644 index 000000000000..95f6e5998bc1 --- /dev/null +++ b/.github/workflows/build-docker-from-tag.yml @@ -0,0 +1,70 @@ +name: Build Image from tag +on: + push: + tags: + - core-v** + - prover-v** + +concurrency: docker-build + +jobs: + setup: + name: Setup + runs-on: [k8s, stage] + outputs: + image_tag_suffix: ${{ steps.set.outputs.image_tag_suffix }} + steps: + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3 + with: + submodules: "recursive" + + - name: Generate output with git tag + id: set + run: | + git_tag="${GITHUB_REF#refs/*/}" + version=$(cut -d "-" -f2 <<< ${git_tag}) + echo "image_tag_suffix=${version}" >> $GITHUB_OUTPUT + + build-push-core-images: + name: Build and push image + needs: [setup] + uses: ./.github/workflows/build-core-template.yml + if: contains(github.ref_name, 'core') + secrets: + DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }} + DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} + with: + image_tag: ${{ needs.setup.outputs.image_tag }} + image_tag_suffix: ${{ needs.setup.outputs.image_tag_suffix }} + + build-push-prover-images: + name: Build and push image + needs: [setup] + uses: ./.github/workflows/build-prover-template.yml + if: contains(github.ref_name, 'prover') + with: + image_tag: ${{ needs.setup.outputs.image_tag }} + image_tag_suffix: ${{ needs.setup.outputs.image_tag_suffix }} + ERA_BELLMAN_CUDA_RELEASE: ${{ vars.ERA_BELLMAN_CUDA_RELEASE }} + secrets: + DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }} + DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} + + build-gar-prover: + name: Build GAR prover + needs: [setup, build-push-prover-images] + uses: ./.github/workflows/build-gar-reusable.yml + if: contains(github.ref_name, 'prover') + with: + setup_keys_id: 4989f12 + image_tag_suffix: ${{ needs.setup.outputs.image_tag_suffix }} + push_asia: true + + build-gar-prover-fri-gpu: + name: Build GAR prover FRI GPU + needs: [setup, build-push-prover-images] + uses: ./.github/workflows/build-prover-fri-gpu-gar.yml + if: contains(github.ref_name, 'prover') + with: + setup_keys_id: 2d33a27-gpu + image_tag_suffix: ${{ needs.setup.outputs.image_tag_suffix }} diff --git a/.github/workflows/build-external-node-docker.yml b/.github/workflows/build-external-node-docker.yml new file mode 100644 index 000000000000..31ec1e81c093 --- /dev/null +++ b/.github/workflows/build-external-node-docker.yml @@ -0,0 +1,51 @@ +name: External Node - Build & push docker image +on: + workflow_dispatch: + inputs: + image_tag: + description: "Tag of a built image to deploy (latest2.0 by default)" + type: string + required: false + default: "latest2.0" + +jobs: + build-images: + name: External Node - Build and Push Docker Image + runs-on: [self-hosted, ci-runner] + steps: + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3 + with: + submodules: "recursive" + + - name: setup-env + run: | + echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV + echo CI=1 >> $GITHUB_ENV + echo $(pwd)/bin >> $GITHUB_PATH + echo CI=1 >> .env + echo IN_DOCKER=1 >> .env + + - name: start-services + run: | + docker-compose -f docker-compose-runner.yml up -d zk geth postgres + + - name: init + run: | + ci_run git config --global --add safe.directory /usr/src/zksync + ci_run git config --global --add safe.directory /usr/src/zksync/sdk/binaryen + ci_run git config --global --add safe.directory /usr/src/zksync/etc/system-contracts + ci_run git config --global --add safe.directory /usr/src/zksync/contracts + + ci_run zk + ci_run zk run yarn + ci_run cp etc/tokens/{test,localhost}.json + ci_run zk compiler all + ci_run zk contract build + ci_run zk f yarn run l2-contracts build + + - name: update-image + run: | + ci_run docker login -u ${{ secrets.DOCKERHUB_USER }} -p ${{ secrets.DOCKERHUB_TOKEN }} + ci_run zk docker build server-v2 + ci_run gcloud auth configure-docker us-docker.pkg.dev -q + ci_run zk docker push external-node --custom-tag ${{ inputs.image_tag }} diff --git a/.github/workflows/build-gar-reusable.yml b/.github/workflows/build-gar-reusable.yml new file mode 100644 index 000000000000..745ff6666e13 --- /dev/null +++ b/.github/workflows/build-gar-reusable.yml @@ -0,0 +1,81 @@ +name: Workflow template for Build Prover builtin Setup Keys + +on: + workflow_call: + inputs: + image_tag_suffix: + description: "Commit sha or git tag for Docker tag" + required: true + type: string + setup_keys_id: + description: "Commit sha for downloading keys from bucket dir" + required: true + type: string + push_asia: + description: "Push images to Asia GAR" + required: false + default: false + type: boolean + +jobs: + build-gar-prover: + name: Build GAR prover + runs-on: [self-hosted, ci-runner] + strategy: + fail-fast: false + matrix: + setup_keys: + [ + { prover_id: "0", keys_ids: "0,18" }, + { prover_id: "1", keys_ids: "1,4" }, + { prover_id: "2", keys_ids: "2,5" }, + { prover_id: "3", keys_ids: "6,7" }, + { prover_id: "4", keys_ids: "8,9" }, + { prover_id: "5", keys_ids: "10,11" }, + { prover_id: "6", keys_ids: "12,13" }, + { prover_id: "7", keys_ids: "14,15" }, + { prover_id: "8", keys_ids: "16,17" }, + { prover_id: "9", keys_ids: "3" }, + ] + steps: + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3 + with: + submodules: "recursive" + + - name: Download Setup Keys + run: | + gsutil cp gs://matterlabs-setup-keys-us/setup-keys/setup_2\^26.key docker/prover-gar/setup_2\^26.key + IFS=', ' read -r -a keys_ids <<< "${{ matrix.setup_keys.keys_ids }}" + printf "%s\n" "${keys_ids[@]}"| xargs -n 1 -P 8 -I {} gsutil cp -P gs://matterlabs-zksync-v2-infra-blob-store/prover_setup_keys/${{ inputs.setup_keys_id }}/setup_{}_key.bin docker/prover-gar/ + + - name: Login to us-central1 GAR + run: | + gcloud auth print-access-token --lifetime=7200 --impersonate-service-account=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com | docker login -u oauth2accesstoken --password-stdin https://us-docker.pkg.dev + + - name: Set up QEMU + uses: docker/setup-qemu-action@v2 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + + - name: Build and push + uses: docker/build-push-action@v4 + with: + context: docker/prover-gar + build-args: | + PROVER_IMAGE=${{ inputs.image_tag_suffix }} + push: true + tags: | + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/prover-v2-gar:2.0-${{ inputs.image_tag_suffix }}-prover-${{ matrix.setup_keys.prover_id }}-${{ inputs.setup_keys_id }} + + - name: Login to asia-southeast1 GAR + if: "${{ inputs.push_asia }}" + run: | + gcloud auth print-access-token --lifetime=7200 --impersonate-service-account=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com | docker login -u oauth2accesstoken --password-stdin https://asia-docker.pkg.dev + + - name: Push image to Asia + if: "${{ inputs.push_asia }}" + run: | + docker buildx imagetools create \ + --tag asia-docker.pkg.dev/matterlabs-infra/matterlabs-docker/prover-v2-gar:2.0-${{ inputs.image_tag_suffix }}-prover-${{ matrix.setup_keys.prover_id }}-${{ inputs.setup_keys_id }} \ + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/prover-v2-gar:2.0-${{ inputs.image_tag_suffix }}-prover-${{ matrix.setup_keys.prover_id }}-${{ inputs.setup_keys_id }} diff --git a/.github/workflows/build-local-node-docker.yml b/.github/workflows/build-local-node-docker.yml new file mode 100644 index 000000000000..5f4cfa4547eb --- /dev/null +++ b/.github/workflows/build-local-node-docker.yml @@ -0,0 +1,51 @@ +name: Local Node - Build docker image +on: + workflow_dispatch: + inputs: + image_tag: + description: "Tag of a built image to deploy (latest2.0 by default)" + type: string + required: false + default: "latest2.0" + +jobs: + build-images: + name: Local Node - Build and Push Docker Image + runs-on: [self-hosted, ci-runner] + steps: + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3 + with: + submodules: "recursive" + + - name: setup-env + run: | + echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV + echo CI=1 >> $GITHUB_ENV + echo $(pwd)/bin >> $GITHUB_PATH + echo CI=1 >> .env + echo IN_DOCKER=1 >> .env + + - name: start-services + run: | + docker-compose -f docker-compose-runner.yml up -d zk geth postgres + + - name: init + run: | + ci_run git config --global --add safe.directory /usr/src/zksync + ci_run git config --global --add safe.directory /usr/src/zksync/sdk/binaryen + ci_run git config --global --add safe.directory /usr/src/zksync/etc/system-contracts + ci_run git config --global --add safe.directory /usr/src/zksync/contracts + + ci_run zk + ci_run zk run yarn + ci_run cp etc/tokens/{test,localhost}.json + ci_run zk compiler all + ci_run zk contract build + ci_run zk f yarn run l2-contracts build + + - name: update-image + run: | + ci_run docker login -u ${{ secrets.DOCKERHUB_USER }} -p ${{ secrets.DOCKERHUB_TOKEN }} + ci_run zk docker build server-v2 + ci_run gcloud auth configure-docker us-docker.pkg.dev -q + ci_run zk docker push local-node --custom-tag ${{ inputs.image_tag }} diff --git a/.github/workflows/build-prover-fri-gpu-gar.yml b/.github/workflows/build-prover-fri-gpu-gar.yml new file mode 100644 index 000000000000..61304a5eb2c9 --- /dev/null +++ b/.github/workflows/build-prover-fri-gpu-gar.yml @@ -0,0 +1,46 @@ +name: Build Prover FRI GPU with builtin setup data + +on: + workflow_call: + inputs: + image_tag_suffix: + description: "Commit sha or git tag for Docker tag" + required: true + type: string + setup_keys_id: + description: "Commit sha for downloading setup data from bucket dir" + required: true + type: string + +jobs: + build-gar-prover-fri-gpu: + name: Build prover FRI GPU GAR + runs-on: [self-hosted, ci-runner] + steps: + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3 + with: + submodules: "recursive" + + - name: Download Setup data + run: | + gsutil -m rsync -r gs://matterlabs-zksync-v2-infra-blob-store/prover_setup_data/${{ inputs.setup_keys_id }} docker/prover-gpu-fri-gar + + - name: Login to us-central1 GAR + run: | + gcloud auth print-access-token --lifetime=7200 --impersonate-service-account=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com | docker login -u oauth2accesstoken --password-stdin https://us-docker.pkg.dev + + - name: Set up QEMU + uses: docker/setup-qemu-action@v2 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + + - name: Build and push + uses: docker/build-push-action@v4 + with: + context: docker/prover-gpu-fri-gar + build-args: | + PROVER_IMAGE=${{ inputs.image_tag_suffix }} + push: true + tags: | + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/prover-fri-gpu-gar:2.0-${{ inputs.image_tag_suffix }} diff --git a/.github/workflows/ci-prover-reusable.yml b/.github/workflows/ci-prover-reusable.yml index 426855cdaa21..e51ec94a81c3 100644 --- a/.github/workflows/ci-prover-reusable.yml +++ b/.github/workflows/ci-prover-reusable.yml @@ -11,7 +11,7 @@ jobs: run: | sudo apt update && sudo apt install -y \ pkg-config libclang-dev build-essential lldb lld \ - clang openssl libssl-dev gcc g++ pkg-config libclang-dev curl wget + clang openssl libssl-dev gcc g++ pkg-config libclang-dev curl wget - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3 with: diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 633680bf2d62..091941a66aa9 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -94,20 +94,3 @@ jobs: echo "Intentionally failing to block PR from merging" exit 1 fi - - notify: - if: always() && !cancelled() - name: Notify on failures - runs-on: ubuntu-latest - needs: [ci-for-core, ci-for-prover, build-core-images, build-prover-images] - steps: - - uses: technote-space/workflow-conclusion-action@45ce8e0eb155657ab8ccf346ade734257fd196a5 # v3 - - name: Notify to Mattermost (on incidents) - uses: tferreira/matterfy@releases/v1 - if: ${{ env.WORKFLOW_CONCLUSION == 'failure' }} - with: - type: failure - job_name: "*CI suites for PR #${{ github.event.pull_request.number }} failed*" - icon_emoji: octocat - channel: "ci-notifications" - url: ${{ secrets.MATTERMOST_WEBHOOK }} diff --git a/.github/workflows/release-test-stage.yml b/.github/workflows/release-test-stage.yml new file mode 100644 index 000000000000..36021030135e --- /dev/null +++ b/.github/workflows/release-test-stage.yml @@ -0,0 +1,97 @@ +name: Build and release Stage +on: + push: + branches: + - main + +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + +jobs: + changed_files: + runs-on: [matterlabs-default-infra-runners] + name: Test changed-files + outputs: + core: ${{ steps.changed-files-yaml.outputs.core_any_changed }} + prover: ${{ steps.changed-files-yaml.outputs.prover_any_changed }} + all: ${{ steps.changed-files-yaml.outputs.all_any_changed }} + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: 2 + submodules: "recursive" + + - name: Get all test, doc and src files that have changed + id: changed-files-yaml + uses: tj-actions/changed-files@v37 + with: + files_yaml: | + # If you want to exclude some files, please adjust here. + prover: + - prover/** + core: + - core/** + all: + - '!core/**' + - '!prover/**' + setup: + name: Setup + runs-on: [self-hosted, k8s, stage] + outputs: + image_tag_suffix: ${{ steps.generate-tag-suffix.outputs.image_tag_suffix }} + steps: + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3 + with: + submodules: "recursive" + + - name: Generate image tag suffix + id: generate-tag-suffix + run: | + sha=$(git rev-parse --short HEAD) + ts=$(date +%s%N | cut -b1-13) + echo "image_tag_suffix=${sha}-${ts}" >> $GITHUB_OUTPUT + + build-push-core-images: + name: Build and push images + needs: [setup, changed_files] + uses: ./.github/workflows/build-core-template.yml + if: needs.changed_files.outputs.core == 'true' || needs.changed_files.outputs.all == 'true' + with: + image_tag: ${{ needs.setup.outputs.image_tag }} + image_tag_suffix: ${{ needs.setup.outputs.image_tag_suffix }} + secrets: + DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }} + DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} + + build-push-prover-images: + name: Build and push images + needs: [setup, changed_files] + uses: ./.github/workflows/build-prover-template.yml + if: needs.changed_files.outputs.prover == 'true' || needs.changed_files.outputs.all == 'true' + with: + image_tag: ${{ needs.setup.outputs.image_tag }} + image_tag_suffix: ${{ needs.setup.outputs.image_tag_suffix }} + ERA_BELLMAN_CUDA_RELEASE: ${{ vars.ERA_BELLMAN_CUDA_RELEASE }} + secrets: + DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }} + DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} + + build-gar-prover: + name: Build GAR prover + needs: [setup, build-push-core-images, build-push-prover-images] + uses: ./.github/workflows/build-gar-reusable.yml + if: needs.changed_files.outputs.prover == 'true' || needs.changed_files.outputs.all == 'true' + with: + setup_keys_id: 4989f12 + image_tag_suffix: ${{ needs.setup.outputs.image_tag_suffix }} + push_asia: false + + build-gar-prover-fri-gpu: + name: Build GAR prover FRI GPU + needs: [setup, build-push-prover-images] + uses: ./.github/workflows/build-prover-fri-gpu-gar.yml + if: needs.changed_files.outputs.prover == 'true' || needs.changed_files.outputs.all == 'true' + with: + setup_keys_id: 2d33a27-gpu + image_tag_suffix: ${{ needs.setup.outputs.image_tag_suffix }} diff --git a/.github/workflows/vm-perf-to-prometheus.yml b/.github/workflows/vm-perf-to-prometheus.yml new file mode 100644 index 000000000000..c5fe27632736 --- /dev/null +++ b/.github/workflows/vm-perf-to-prometheus.yml @@ -0,0 +1,39 @@ +name: Push main branch VM benchmarks to Prometheus + +on: + push: + branches: + - main + workflow_dispatch: + +# Disable simultaneous deployments into a single environment +concurrency: vm-benchmarks + +jobs: + vm-benchmarks: + name: Run VM benchmarks + runs-on: [self-hosted, ci-runner] + + steps: + - uses: actions/checkout@v3 + with: + submodules: "recursive" + + - name: setup-env + run: | + echo PUSH_VM_BENCHMARKS_TO_PROMETHEUS=1 >> .env + + echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV + echo $(pwd)/bin >> $GITHUB_PATH + + - name: init + run: | + docker-compose -f docker-compose-runner.yml up -d zk + ci_run zk + ci_run zk compiler system-contracts + + - name: run benchmarks + run: | + ci_run cargo bench --package vm-benchmark --bench diy_benchmark + ci_run cargo bench --package vm-benchmark --bench iai | tee iai-result + ci_run cargo run --package vm-benchmark --bin iai_results_to_prometheus --release < iai-result diff --git a/CODEOWNERS b/CODEOWNERS index 12cd26187090..981a2db39116 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -2,3 +2,4 @@ .github/release-please/** @RomanBrodetski @perekopskiy @Deniallugo @popzxc **/CHANGELOG.md @RomanBrodetski @perekopskiy @Deniallugo @popzxc CODEOWNERS @RomanBrodetski @perekopskiy @Deniallugo @popzxc +.github/workflows/** @matter-labs/devops From d8ab84304d2b60dff423dde9f0c46c69ec272e9c Mon Sep 17 00:00:00 2001 From: Shahar Kaminsky Date: Fri, 29 Sep 2023 17:34:12 +0300 Subject: [PATCH 02/18] chore(release): Update Manifest and Config (#111) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # What ❔ This PR updates the version + bootstrap sha of release please to match our latest releases. ## Why ❔ From now on releases are going to be down from the public repo. Release please needs to know where to start. --- .github/release-please/config.json | 2 +- .github/release-please/manifest.json | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/release-please/config.json b/.github/release-please/config.json index 4f732993f37b..8874e29bde21 100644 --- a/.github/release-please/config.json +++ b/.github/release-please/config.json @@ -1,7 +1,7 @@ { "separate-pull-requests": true, "group-pull-request-title-pattern": "chore: release ${component} ${branch}", - "bootstrap-sha": "a980ac61f484a3b6bdbe11e606977d0d92d4e9ff", + "bootstrap-sha": "531757b5eb98da80b7e6d0ff7ad9fd0b970cc109", "bump-minor-pre-major": true, "bump-patch-for-minor-pre-major": true, "packages": { diff --git a/.github/release-please/manifest.json b/.github/release-please/manifest.json index bd40d32b4802..d3fd86e709c3 100644 --- a/.github/release-please/manifest.json +++ b/.github/release-please/manifest.json @@ -1,6 +1,6 @@ { "sdk/zksync-web3.js": "0.15.4", "sdk/zksync-rs": "0.4.0", - "core": "15.0.1", + "core": "15.0.2", "prover": "7.1.1" } From 51e02cb78cd56ecd9f5df31f2b590ba19d866e1b Mon Sep 17 00:00:00 2001 From: Yury Akudovich Date: Mon, 2 Oct 2023 13:02:37 +0200 Subject: [PATCH 03/18] fix: Bumps webpki to fix RUSTSEC-2023-0052. (#130) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # What ❔ Bumps webpki to 0.22.2 due to RUSTSEC-2023-0052. ## Why ❔ To fix vulnerability. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --- Cargo.lock | 4 ++-- prover/Cargo.lock | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e57439a3b244..e03ed8eaf61a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7176,9 +7176,9 @@ dependencies = [ [[package]] name = "webpki" -version = "0.22.1" +version = "0.22.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0e74f82d49d545ad128049b7e88f6576df2da6b02e9ce565c6f533be576957e" +checksum = "07ecc0cd7cac091bf682ec5efa18b1cff79d617b84181f38b3951dbe135f607f" dependencies = [ "ring", "untrusted", diff --git a/prover/Cargo.lock b/prover/Cargo.lock index c5dde4ef70da..c1703e814745 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -6135,9 +6135,9 @@ dependencies = [ [[package]] name = "webpki" -version = "0.22.0" +version = "0.22.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f095d78192e208183081cc07bc5515ef55216397af48b873e5edcd72637fa1bd" +checksum = "07ecc0cd7cac091bf682ec5efa18b1cff79d617b84181f38b3951dbe135f607f" dependencies = [ "ring", "untrusted", From 38d66b1f8929d2381ae43fe388343c3e7e9a630e Mon Sep 17 00:00:00 2001 From: Aleksandr Stepanov Date: Mon, 2 Oct 2023 14:56:38 +0300 Subject: [PATCH 04/18] ci: Migration from ci-runner to matterlabs-ci-runner (#133) --- .github/workflows/build-external-node-docker.yml | 2 +- .github/workflows/build-gar-reusable.yml | 2 +- .github/workflows/build-local-node-docker.yml | 2 +- .github/workflows/build-prover-fri-gpu-gar.yml | 2 +- .github/workflows/coverage.yml | 2 +- .github/workflows/vm-perf-comparison.yml | 2 +- .github/workflows/vm-perf-to-prometheus.yml | 2 +- .github/workflows/zk-environment-cuda-12-0.publish.yml | 2 +- .github/workflows/zk-environment.publish.yml | 2 +- 9 files changed, 9 insertions(+), 9 deletions(-) diff --git a/.github/workflows/build-external-node-docker.yml b/.github/workflows/build-external-node-docker.yml index 31ec1e81c093..cccbc84c6c9b 100644 --- a/.github/workflows/build-external-node-docker.yml +++ b/.github/workflows/build-external-node-docker.yml @@ -11,7 +11,7 @@ on: jobs: build-images: name: External Node - Build and Push Docker Image - runs-on: [self-hosted, ci-runner] + runs-on: [matterlabs-ci-runner] steps: - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3 with: diff --git a/.github/workflows/build-gar-reusable.yml b/.github/workflows/build-gar-reusable.yml index 745ff6666e13..9a14508fdc7a 100644 --- a/.github/workflows/build-gar-reusable.yml +++ b/.github/workflows/build-gar-reusable.yml @@ -20,7 +20,7 @@ on: jobs: build-gar-prover: name: Build GAR prover - runs-on: [self-hosted, ci-runner] + runs-on: [matterlabs-ci-runner] strategy: fail-fast: false matrix: diff --git a/.github/workflows/build-local-node-docker.yml b/.github/workflows/build-local-node-docker.yml index 5f4cfa4547eb..9880361206c1 100644 --- a/.github/workflows/build-local-node-docker.yml +++ b/.github/workflows/build-local-node-docker.yml @@ -11,7 +11,7 @@ on: jobs: build-images: name: Local Node - Build and Push Docker Image - runs-on: [self-hosted, ci-runner] + runs-on: [matterlabs-ci-runner] steps: - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3 with: diff --git a/.github/workflows/build-prover-fri-gpu-gar.yml b/.github/workflows/build-prover-fri-gpu-gar.yml index 61304a5eb2c9..9643d9433188 100644 --- a/.github/workflows/build-prover-fri-gpu-gar.yml +++ b/.github/workflows/build-prover-fri-gpu-gar.yml @@ -15,7 +15,7 @@ on: jobs: build-gar-prover-fri-gpu: name: Build prover FRI GPU GAR - runs-on: [self-hosted, ci-runner] + runs-on: [matterlabs-ci-runner] steps: - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3 with: diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index 5064bfe22722..e7ed89eb760a 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -7,7 +7,7 @@ on: jobs: generate: - runs-on: [self-hosted, ci-runner] + runs-on: [matterlabs-ci-runner] steps: - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3 diff --git a/.github/workflows/vm-perf-comparison.yml b/.github/workflows/vm-perf-comparison.yml index 3078c9bfa8a1..1e5b65a35989 100644 --- a/.github/workflows/vm-perf-comparison.yml +++ b/.github/workflows/vm-perf-comparison.yml @@ -6,7 +6,7 @@ on: jobs: vm-benchmarks: name: Run VM benchmarks - runs-on: [self-hosted, ci-runner] + runs-on: [matterlabs-ci-runner] steps: - uses: actions/checkout@v3 diff --git a/.github/workflows/vm-perf-to-prometheus.yml b/.github/workflows/vm-perf-to-prometheus.yml index c5fe27632736..d2a6594ffca2 100644 --- a/.github/workflows/vm-perf-to-prometheus.yml +++ b/.github/workflows/vm-perf-to-prometheus.yml @@ -12,7 +12,7 @@ concurrency: vm-benchmarks jobs: vm-benchmarks: name: Run VM benchmarks - runs-on: [self-hosted, ci-runner] + runs-on: [matterlabs-ci-runner] steps: - uses: actions/checkout@v3 diff --git a/.github/workflows/zk-environment-cuda-12-0.publish.yml b/.github/workflows/zk-environment-cuda-12-0.publish.yml index f6471582bdf0..7018a61117ff 100644 --- a/.github/workflows/zk-environment-cuda-12-0.publish.yml +++ b/.github/workflows/zk-environment-cuda-12-0.publish.yml @@ -16,7 +16,7 @@ jobs: push_to_registry: if: github.event.pull_request.merged == true || github.event_name == 'workflow_dispatch' name: Push Docker image to Docker Hub - runs-on: [self-hosted, ci-runner] + runs-on: [matterlabs-ci-runner] steps: - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3 with: diff --git a/.github/workflows/zk-environment.publish.yml b/.github/workflows/zk-environment.publish.yml index 561da6190210..6070d37c7071 100644 --- a/.github/workflows/zk-environment.publish.yml +++ b/.github/workflows/zk-environment.publish.yml @@ -16,7 +16,7 @@ jobs: push_to_registry: if: github.event.pull_request.merged == true || github.event_name == 'workflow_dispatch' name: Push Docker image to Docker Hub - runs-on: [self-hosted, ci-runner] + runs-on: [matterlabs-ci-runner] steps: - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3 with: From 8df11278ca76000d842fc0f73f5233dfc85ef77e Mon Sep 17 00:00:00 2001 From: Maksym Date: Mon, 2 Oct 2023 15:48:39 +0300 Subject: [PATCH 05/18] ci: refactor zk env workflow (#135) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # What ❔ ## Why ❔ Merge jobs which has something to do with zk-environment to one workflow ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --- .github/workflows/ci.yml | 5 + .../zk-environment-cuda-12-0.publish.yml | 50 -------- .github/workflows/zk-environment.publish.yml | 108 +++++++++++++++++- 3 files changed, 108 insertions(+), 55 deletions(-) delete mode 100644 .github/workflows/zk-environment-cuda-12-0.publish.yml diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 091941a66aa9..2de28c23d154 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -2,6 +2,11 @@ name: CI on: pull_request: + paths-ignore: + - ".github/workflows/zk-environment.publish.yml" + - "docker/zk-environment/Dockerfile" + - "docker/zk-environment-cuda-12-0/Dockerfile" + - "docker/zk-rust-nightly-environment/Dockerfile" merge_group: push: branches: diff --git a/.github/workflows/zk-environment-cuda-12-0.publish.yml b/.github/workflows/zk-environment-cuda-12-0.publish.yml deleted file mode 100644 index 7018a61117ff..000000000000 --- a/.github/workflows/zk-environment-cuda-12-0.publish.yml +++ /dev/null @@ -1,50 +0,0 @@ -name: publish zk-environment with cuda 12.0 docker image - -on: - pull_request: - branches: - - main - types: [closed] - paths: - - "docker/zk-environment-cuda-12-0/Dockerfile" - - ".github/workflows/zk-environment-cuda-12-0.publish.yml" - workflow_dispatch: - branches: - - "main" - -jobs: - push_to_registry: - if: github.event.pull_request.merged == true || github.event_name == 'workflow_dispatch' - name: Push Docker image to Docker Hub - runs-on: [matterlabs-ci-runner] - steps: - - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3 - with: - submodules: "recursive" - - - name: Login to us-central1 GAR - run: | - gcloud auth print-access-token --lifetime=7200 --impersonate-service-account=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com | docker login -u oauth2accesstoken --password-stdin https://us-docker.pkg.dev - - - name: Log in to Docker Hub - uses: docker/login-action@f4ef78c080cd8ba55a85445d5b36e214a81df20a # v2.1.0 - with: - username: ${{ secrets.DOCKERHUB_USER }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - - name: Set up QEMU - uses: docker/setup-qemu-action@v2 - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2 - - - name: Build and push - uses: docker/build-push-action@v4 - with: - context: . - file: docker/zk-environment-cuda-12-0/Dockerfile - push: true - target: nvidia-tools - tags: | - us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/zk-environment-cuda-12-0:latest - matterlabs/zk-environment:cuda-12-0-latest diff --git a/.github/workflows/zk-environment.publish.yml b/.github/workflows/zk-environment.publish.yml index 6070d37c7071..2fb5c1d88135 100644 --- a/.github/workflows/zk-environment.publish.yml +++ b/.github/workflows/zk-environment.publish.yml @@ -1,4 +1,4 @@ -name: publish zk-environment docker image +name: publish zk-environment docker images on: pull_request: @@ -8,15 +8,47 @@ on: paths: - "docker/zk-environment/Dockerfile" - ".github/workflows/zk-environment.publish.yml" + - "docker/zk-environment-cuda-12-0/Dockerfile" + - ".github/workflows/zk-environment-cuda-12-0.publish.yml" + - "docker/zk-rust-nightly-environment/Dockerfile" + - ".github/workflows/rust-nightly-environment.publish.yml" workflow_dispatch: branches: - "main" jobs: - push_to_registry: - if: github.event.pull_request.merged == true || github.event_name == 'workflow_dispatch' - name: Push Docker image to Docker Hub - runs-on: [matterlabs-ci-runner] + changed_files: + name: Changed files + outputs: + zk_environment: ${{ steps.changed-files-yaml.outputs.zk_env_any_changed }} + zk_environment_cuda_12: ${{ steps.changed-files-yaml.outputs.zk_env_cuda_12_any_changed }} + rust_nightly: ${{ steps.changed-files-yaml.outputs.rust_nightly_any_changed }} + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3 + with: + submodules: "recursive" + + - name: Get changed files + id: changed-files-yaml + uses: tj-actions/changed-files@v39 + with: + files_yaml: | + zk_env: + - docker/zk-environment/Dockerfile + - .github/workflows/zk-environment.publish.yml + zk_env_cuda_12: + - docker/zk-environment-cuda-12-0/Dockerfile + - .github/workflows/zk-environment-cuda-12-0.publish.yml + rust_nightly: + - docker/zk-rust-nightly-environment/Dockerfile + - .github/workflows/rust-nightly-environment.publish.yml + + zk_environment: + if: needs.changed_files.outputs.zk_environment == 'true' && github.event.pull_request.merged == true || github.event_name == 'workflow_dispatch' + name: Push zk-environment docker image to Docker Hub + runs-on: ubuntu-latest + needs: changed_files steps: - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3 with: @@ -51,3 +83,69 @@ jobs: tags: "matterlabs/zk-environment:latest2.0" file: docker/zk-environment/Dockerfile no-cache: true + + rust_nightly: + if: needs.changed_files.outputs.rust_nightly == 'true' && github.event.pull_request.merged == true || github.event_name == 'workflow_dispatch' + name: Push rust nightly docker image to Docker Hub + needs: changed_files + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3 + with: + submodules: "recursive" + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@f03ac48505955848960e80bbb68046aa35c7b9e7 # v2 + + - name: Log in to Docker Hub + uses: docker/login-action@f4ef78c080cd8ba55a85445d5b36e214a81df20a # v2.1.0 + with: + username: ${{ secrets.DOCKERHUB_USER }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Build and push Docker image + uses: docker/build-push-action@3b5e8027fcad23fda98b2e3ac259d8d67585f671 # v4.0.0 + with: + context: . + push: true + tags: "matterlabs/zksync_rust:nightly" + file: docker/zk-rust-nightly-environment/Dockerfile + no-cache: true + + + zk_environment_cuda_12: + if: needs.changed_files.outputs.zk_environment_cuda_12 == 'true' github.event.pull_request.merged == true || github.event_name == 'workflow_dispatch' + name: Push zk-environment cuda 12 docker image to Docker Hub + runs-on: [matterlabs-ci-runner] + needs: changed_files + steps: + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3 + with: + submodules: "recursive" + + - name: Login to us-central1 GAR + run: | + gcloud auth print-access-token --lifetime=7200 --impersonate-service-account=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com | docker login -u oauth2accesstoken --password-stdin https://us-docker.pkg.dev + + - name: Log in to Docker Hub + uses: docker/login-action@f4ef78c080cd8ba55a85445d5b36e214a81df20a # v2.1.0 + with: + username: ${{ secrets.DOCKERHUB_USER }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Set up QEMU + uses: docker/setup-qemu-action@v2 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + + - name: Build and push + uses: docker/build-push-action@v4 + with: + context: . + file: docker/zk-environment-cuda-12-0/Dockerfile + push: true + target: nvidia-tools + tags: | + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/zk-environment-cuda-12-0:latest + matterlabs/zk-environment:cuda-12-0-latest From c340a17cf347340b816222f72dbf76d113f03adb Mon Sep 17 00:00:00 2001 From: Maksym Date: Tue, 3 Oct 2023 15:25:20 +0300 Subject: [PATCH 06/18] ci: fix zk-env dockerfile (#143) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # What ❔ Refactoring rust nightly docker image ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --- .github/workflows/ci.yml | 8 +++----- .github/workflows/zk-environment.publish.yml | 11 +++-------- docker/zk-environment/Dockerfile | 5 +++++ docker/zk-rust-nightly-environment/Dockerfile | 19 ------------------- 4 files changed, 11 insertions(+), 32 deletions(-) delete mode 100644 docker/zk-rust-nightly-environment/Dockerfile diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 2de28c23d154..1afb539d0d9d 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -2,11 +2,6 @@ name: CI on: pull_request: - paths-ignore: - - ".github/workflows/zk-environment.publish.yml" - - "docker/zk-environment/Dockerfile" - - "docker/zk-environment-cuda-12-0/Dockerfile" - - "docker/zk-rust-nightly-environment/Dockerfile" merge_group: push: branches: @@ -46,6 +41,9 @@ jobs: all: - '!core/**' - '!prover/**' + - '!.github/workflows/zk-environment.publish.yml' + - '!docker/zk-environment/Dockerfile' + - '!docker/zk-environment-cuda-12-0/Dockerfile' ci-for-core: name: CI for Core Components needs: changed_files diff --git a/.github/workflows/zk-environment.publish.yml b/.github/workflows/zk-environment.publish.yml index 2fb5c1d88135..b733fab9b176 100644 --- a/.github/workflows/zk-environment.publish.yml +++ b/.github/workflows/zk-environment.publish.yml @@ -10,8 +10,6 @@ on: - ".github/workflows/zk-environment.publish.yml" - "docker/zk-environment-cuda-12-0/Dockerfile" - ".github/workflows/zk-environment-cuda-12-0.publish.yml" - - "docker/zk-rust-nightly-environment/Dockerfile" - - ".github/workflows/rust-nightly-environment.publish.yml" workflow_dispatch: branches: - "main" @@ -22,7 +20,6 @@ jobs: outputs: zk_environment: ${{ steps.changed-files-yaml.outputs.zk_env_any_changed }} zk_environment_cuda_12: ${{ steps.changed-files-yaml.outputs.zk_env_cuda_12_any_changed }} - rust_nightly: ${{ steps.changed-files-yaml.outputs.rust_nightly_any_changed }} runs-on: ubuntu-latest steps: - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3 @@ -40,9 +37,6 @@ jobs: zk_env_cuda_12: - docker/zk-environment-cuda-12-0/Dockerfile - .github/workflows/zk-environment-cuda-12-0.publish.yml - rust_nightly: - - docker/zk-rust-nightly-environment/Dockerfile - - .github/workflows/rust-nightly-environment.publish.yml zk_environment: if: needs.changed_files.outputs.zk_environment == 'true' && github.event.pull_request.merged == true || github.event_name == 'workflow_dispatch' @@ -85,7 +79,7 @@ jobs: no-cache: true rust_nightly: - if: needs.changed_files.outputs.rust_nightly == 'true' && github.event.pull_request.merged == true || github.event_name == 'workflow_dispatch' + if: needs.changed_files.outputs.zk_environment == 'true' && github.event.pull_request.merged == true || github.event_name == 'workflow_dispatch' name: Push rust nightly docker image to Docker Hub needs: changed_files runs-on: ubuntu-latest @@ -108,8 +102,9 @@ jobs: with: context: . push: true + target: rust-nightly tags: "matterlabs/zksync_rust:nightly" - file: docker/zk-rust-nightly-environment/Dockerfile + file: docker/zk-environment/Dockerfile no-cache: true diff --git a/docker/zk-environment/Dockerfile b/docker/zk-environment/Dockerfile index 573d42b79ea2..c9871f98afb8 100644 --- a/docker/zk-environment/Dockerfile +++ b/docker/zk-environment/Dockerfile @@ -193,3 +193,8 @@ RUN apt-get update && apt-get install -y --no-install-recommends \ && rm -rf /var/lib/apt/lists/* ENV LIBRARY_PATH /usr/local/cuda/lib64/stubs + + +FROM base as rust-nightly + +RUN rustup default nightly-2023-07-21 diff --git a/docker/zk-rust-nightly-environment/Dockerfile b/docker/zk-rust-nightly-environment/Dockerfile deleted file mode 100644 index db3c8515c36b..000000000000 --- a/docker/zk-rust-nightly-environment/Dockerfile +++ /dev/null @@ -1,19 +0,0 @@ -FROM debian:bookworm-slim - -ARG DEBIAN_FRONTEND=noninteractive - -RUN apt update && apt install git curl clang openssl libssl-dev gcc g++ pkg-config build-essential libclang-dev -y - -ENV RUSTUP_HOME=/usr/local/rustup \ - CARGO_HOME=/usr/local/cargo \ - PATH=/usr/local/cargo/bin:$PATH - -# Setup rust nightly -RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \ - rustup install nightly-2023-07-21 && \ - rustup default nightly-2023-07-21 - -# Setup cmake -RUN curl -Lo cmake-3.24.2-linux-x86_64.sh https://github.com/Kitware/CMake/releases/download/v3.24.2/cmake-3.24.2-linux-x86_64.sh && \ - chmod +x cmake-3.24.2-linux-x86_64.sh && \ - ./cmake-3.24.2-linux-x86_64.sh --skip-license --prefix=/usr/local From 5e61bdc75b2baa03004d4d3e801170c094766964 Mon Sep 17 00:00:00 2001 From: Danil Date: Tue, 3 Oct 2023 14:26:33 +0200 Subject: [PATCH 07/18] feat(vm): Restore system-constants-generator (#115) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # What ❔ Restore system-constants-generator ## Why ❔ This crate is rarely used, but it is important to keep it alive: it's used sometimes to tune our fee-related constants. Making sure that the code at least compiles is the smallest yet essential part of maintenance. ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --- Cargo.lock | 15 ++ Cargo.toml | 2 +- .../src/intrinsic_costs.rs | 2 +- .../system-constants-generator/src/main.rs | 21 +- .../system-constants-generator/src/utils.rs | 214 ++++++++++-------- core/lib/vm/src/constants.rs | 4 +- core/lib/vm/src/tracers/traits.rs | 2 +- 7 files changed, 149 insertions(+), 111 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e03ed8eaf61a..0b584dc9a2ea 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6208,6 +6208,21 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" +[[package]] +name = "system-constants-generator" +version = "0.1.0" +dependencies = [ + "codegen 0.2.0", + "once_cell", + "serde", + "serde_json", + "vm", + "zksync_contracts", + "zksync_state", + "zksync_types", + "zksync_utils", +] + [[package]] name = "tagptr" version = "0.2.0" diff --git a/Cargo.toml b/Cargo.toml index 9a62cde88d73..9211084819da 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,7 +7,7 @@ members = [ "core/bin/merkle_tree_consistency_checker", "core/bin/rocksdb_util", "core/bin/storage_logs_dedup_migration", - # "core/bin/system-constants-generator", + "core/bin/system-constants-generator", "core/bin/verification_key_generator_and_server", "core/bin/verified_sources_fetcher", "core/bin/zksync_server", diff --git a/core/bin/system-constants-generator/src/intrinsic_costs.rs b/core/bin/system-constants-generator/src/intrinsic_costs.rs index c663939db6ee..0491be494ab8 100644 --- a/core/bin/system-constants-generator/src/intrinsic_costs.rs +++ b/core/bin/system-constants-generator/src/intrinsic_costs.rs @@ -9,7 +9,7 @@ use crate::utils::{ get_l2_txs, }; use crate::utils::{metrics_from_txs, TransactionGenerator}; -use vm::vm_with_bootloader::BOOTLOADER_TX_ENCODING_SPACE; +use vm::constants::BOOTLOADER_TX_ENCODING_SPACE; use zksync_types::{ethabi::Address, IntrinsicSystemGasConstants, U256}; #[derive(Debug, Clone, Copy, PartialEq)] diff --git a/core/bin/system-constants-generator/src/main.rs b/core/bin/system-constants-generator/src/main.rs index d101647134f8..f076eadd8c8d 100644 --- a/core/bin/system-constants-generator/src/main.rs +++ b/core/bin/system-constants-generator/src/main.rs @@ -1,16 +1,6 @@ use std::fs; use serde::{Deserialize, Serialize}; -use vm::{ - vm_with_bootloader::{BLOCK_OVERHEAD_GAS, BLOCK_OVERHEAD_L1_GAS, BOOTLOADER_TX_ENCODING_SPACE}, - zk_evm::zkevm_opcode_defs::{ - circuit_prices::{ - ECRECOVER_CIRCUIT_COST_IN_ERGS, KECCAK256_CIRCUIT_COST_IN_ERGS, - SHA256_CIRCUIT_COST_IN_ERGS, - }, - system_params::{MAX_PUBDATA_PER_BLOCK, MAX_TX_ERGS_LIMIT}, - }, -}; use zksync_types::{ IntrinsicSystemGasConstants, GUARANTEED_PUBDATA_IN_TX, L1_GAS_PER_PUBDATA_BYTE, MAX_GAS_PER_PUBDATA_BYTE, MAX_NEW_FACTORY_DEPS, MAX_TXS_IN_BLOCK, @@ -21,6 +11,13 @@ mod utils; use codegen::Block; use codegen::Scope; +use vm::constants::{ + BLOCK_OVERHEAD_GAS, BLOCK_OVERHEAD_L1_GAS, BOOTLOADER_TX_ENCODING_SPACE, MAX_PUBDATA_PER_BLOCK, +}; +use zksync_types::zkevm_test_harness::zk_evm::zkevm_opcode_defs::circuit_prices::{ + ECRECOVER_CIRCUIT_COST_IN_ERGS, KECCAK256_CIRCUIT_COST_IN_ERGS, SHA256_CIRCUIT_COST_IN_ERGS, +}; +use zksync_types::zkevm_test_harness::zk_evm::zkevm_opcode_defs::system_params::MAX_TX_ERGS_LIMIT; // Params needed for L1 contracts #[derive(Copy, Clone, Debug, Serialize, Deserialize)] @@ -128,7 +125,7 @@ fn generate_rust_fee_constants(intrinsic_gas_constants: &IntrinsicSystemGasConst scope.import("super", "IntrinsicSystemGasConstants"); scope.raw( - vec![ + [ "// TODO (SMA-1699): Use this method to ensure that the transactions provide enough", "// intrinsic gas on the API level.", ] @@ -193,7 +190,7 @@ fn generate_rust_fee_constants(intrinsic_gas_constants: &IntrinsicSystemGasConst get_intrinsic_constants_fn.push_block(struct_block); } - vec![ + [ "//! THIS FILE IS AUTOGENERATED: DO NOT EDIT MANUALLY!\n".to_string(), "//! The file with constants related to fees most of which need to be computed\n" .to_string(), diff --git a/core/bin/system-constants-generator/src/utils.rs b/core/bin/system-constants-generator/src/utils.rs index 3af5df328f42..afb00b5cda7d 100644 --- a/core/bin/system-constants-generator/src/utils.rs +++ b/core/bin/system-constants-generator/src/utils.rs @@ -1,36 +1,62 @@ use once_cell::sync::Lazy; +use std::cell::RefCell; +use std::rc::Rc; +use vm::constants::{BLOCK_GAS_LIMIT, BOOTLOADER_HEAP_PAGE}; use vm::{ - utils::{create_test_block_params, read_bootloader_test_code, BLOCK_GAS_LIMIT}, - vm_with_bootloader::{ - init_vm_inner, push_raw_transaction_to_bootloader_memory, BlockContextMode, - BootloaderJobType, DerivedBlockContext, TxExecutionMode, - }, - zk_evm::{aux_structures::Timestamp, zkevm_opcode_defs::BOOTLOADER_HEAP_PAGE}, - HistoryEnabled, OracleTools, + BootloaderState, BoxedTracer, DynTracer, ExecutionEndTracer, ExecutionProcessing, + HistoryEnabled, HistoryMode, L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, Vm, + VmExecutionMode, VmExecutionStopReason, VmTracer, ZkSyncVmState, }; use zksync_contracts::{ - load_sys_contract, read_bootloader_code, read_sys_contract_bytecode, BaseSystemContracts, - ContractLanguage, SystemContractCode, + load_sys_contract, read_bootloader_code, read_sys_contract_bytecode, read_zbin_bytecode, + BaseSystemContracts, ContractLanguage, SystemContractCode, }; use zksync_state::{InMemoryStorage, StorageView, WriteStorage}; +use zksync_types::block::legacy_miniblock_hash; use zksync_types::{ - ethabi::Token, - fee::Fee, - l1::L1Tx, - l2::L2Tx, - tx::{ - tx_execution_info::{TxExecutionStatus, VmExecutionLogs}, - ExecutionMetrics, - }, - utils::storage_key_for_eth_balance, - AccountTreeId, Address, Execute, L1TxCommonData, L2ChainId, Nonce, StorageKey, Transaction, - BOOTLOADER_ADDRESS, H256, SYSTEM_CONTEXT_ADDRESS, SYSTEM_CONTEXT_GAS_PRICE_POSITION, - SYSTEM_CONTEXT_TX_ORIGIN_POSITION, U256, + ethabi::Token, fee::Fee, l1::L1Tx, l2::L2Tx, utils::storage_key_for_eth_balance, AccountTreeId, + Address, Execute, L1BatchNumber, L1TxCommonData, L2ChainId, MiniblockNumber, Nonce, + ProtocolVersionId, StorageKey, Timestamp, Transaction, BOOTLOADER_ADDRESS, H256, + SYSTEM_CONTEXT_ADDRESS, SYSTEM_CONTEXT_GAS_PRICE_POSITION, SYSTEM_CONTEXT_TX_ORIGIN_POSITION, + U256, ZKPORTER_IS_AVAILABLE, }; use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, u256_to_h256}; use crate::intrinsic_costs::VmSpentResourcesResult; +/// Tracer for setting the data for bootloader with custom input +/// and receive an output from this custom bootloader +struct SpecialBootloaderTracer { + input: Vec<(usize, U256)>, + output: Rc>, +} + +impl DynTracer for SpecialBootloaderTracer {} + +impl ExecutionEndTracer for SpecialBootloaderTracer {} + +impl ExecutionProcessing for SpecialBootloaderTracer { + fn initialize_tracer(&mut self, state: &mut ZkSyncVmState) { + state.memory.populate_page( + BOOTLOADER_HEAP_PAGE as usize, + self.input.clone(), + Timestamp(0), + ); + } + fn after_vm_execution( + &mut self, + state: &mut ZkSyncVmState, + _bootloader_state: &BootloaderState, + _stop_reason: VmExecutionStopReason, + ) { + let value_recorded_from_test = state.memory.read_slot(BOOTLOADER_HEAP_PAGE as usize, 0); + let mut res = self.output.borrow_mut(); + *res = value_recorded_from_test.value.as_u32(); + } +} + +impl VmTracer for SpecialBootloaderTracer {} + pub static GAS_TEST_SYSTEM_CONTRACTS: Lazy = Lazy::new(|| { let bytecode = read_bootloader_code("gas_test"); let hash = hash_bytecode(&bytecode); @@ -135,19 +161,39 @@ pub(super) fn get_l1_txs(number_of_txs: usize) -> (Vec, Vec Vec { + read_zbin_bytecode(format!( + "etc/system-contracts/bootloader/tests/artifacts/{}.yul/{}.yul.zbin", + test, test + )) +} + +fn default_l1_batch() -> L1BatchEnv { + L1BatchEnv { + previous_batch_hash: None, + number: L1BatchNumber(1), + timestamp: 100, + l1_gas_price: 50_000_000_000, // 50 gwei + fair_l2_gas_price: 250_000_000, // 0.25 gwei + fee_account: Address::random(), + enforced_base_fee: None, + first_l2_block: L2BlockEnv { + number: 1, + timestamp: 100, + prev_block_hash: legacy_miniblock_hash(MiniblockNumber(0)), + max_virtual_blocks_to_create: 100, + }, + } +} + /// Executes the "internal transfer test" of the bootloader -- the test that /// returns the amount of gas needed to perform and internal transfer, assuming no gas price /// per pubdata, i.e. under assumption that the refund will not touch any new slots. pub(super) fn execute_internal_transfer_test() -> u32 { - let (block_context, block_properties) = create_test_block_params(); - let block_context: DerivedBlockContext = block_context.into(); - let raw_storage = InMemoryStorage::with_system_contracts(hash_bytecode); let mut storage_view = StorageView::new(raw_storage); let bootloader_balance_key = storage_key_for_eth_balance(&BOOTLOADER_ADDRESS); storage_view.set_value(bootloader_balance_key, u256_to_h256(U256([0, 0, 1, 0]))); - let mut oracle_tools = OracleTools::new(&mut storage_view, HistoryEnabled); - let bytecode = read_bootloader_test_code("transfer_test"); let hash = hash_bytecode(&bytecode); let bootloader = SystemContractCode { @@ -155,6 +201,8 @@ pub(super) fn execute_internal_transfer_test() -> u32 { hash, }; + let l1_batch = default_l1_batch(); + let bytecode = read_sys_contract_bytecode("", "DefaultAccount", ContractLanguage::Sol); let hash = hash_bytecode(&bytecode); let default_aa = SystemContractCode { @@ -162,19 +210,20 @@ pub(super) fn execute_internal_transfer_test() -> u32 { hash, }; - let base_system_contract = BaseSystemContracts { + let base_system_smart_contracts = BaseSystemContracts { bootloader, default_aa, }; - let mut vm = init_vm_inner( - &mut oracle_tools, - BlockContextMode::NewBlock(block_context, Default::default()), - &block_properties, - BLOCK_GAS_LIMIT, - &base_system_contract, - TxExecutionMode::VerifyExecute, - ); + let system_env = SystemEnv { + zk_porter_available: ZKPORTER_IS_AVAILABLE, + version: ProtocolVersionId::latest(), + base_system_smart_contracts, + gas_limit: BLOCK_GAS_LIMIT, + execution_mode: TxExecutionMode::VerifyExecute, + default_validation_computational_gas_limit: BLOCK_GAS_LIMIT, + chain_id: L2ChainId::default(), + }; let eth_token_sys_contract = load_sys_contract("L2EthToken"); let transfer_from_to = ð_token_sys_contract @@ -197,24 +246,22 @@ pub(super) fn execute_internal_transfer_test() -> u32 { input }; let input: Vec<_> = bytes_to_be_words(input).into_iter().enumerate().collect(); - vm.state - .memory - .populate_page(BOOTLOADER_HEAP_PAGE as usize, input, Timestamp(0)); - - let result = vm.execute_till_block_end(BootloaderJobType::BlockPostprocessing); - assert!( - result.block_tip_result.revert_reason.is_none(), - "The internal call has reverted" - ); - assert!( - result.full_result.revert_reason.is_none(), - "The internal call has reverted" + let tracer_result = Rc::new(RefCell::new(0)); + let tracer = SpecialBootloaderTracer { + input, + output: tracer_result.clone(), + }; + let mut vm = Vm::new( + l1_batch, + system_env, + Rc::new(RefCell::new(storage_view)), + HistoryEnabled, ); + let result = vm.inspect(vec![tracer.into_boxed()], VmExecutionMode::Bootloader); - let value_recorded_from_test = vm.state.memory.read_slot(BOOTLOADER_HEAP_PAGE as usize, 0); - - value_recorded_from_test.value.as_u32() + assert!(!result.result.is_failed(), "The internal call has reverted"); + tracer_result.take() } // Executes an array of transactions in the VM. @@ -226,9 +273,6 @@ pub(super) fn execute_user_txs_in_test_gas_vm( .iter() .fold(U256::zero(), |sum, elem| sum + elem.gas_limit()); - let (block_context, block_properties) = create_test_block_params(); - let block_context: DerivedBlockContext = block_context.into(); - let raw_storage = InMemoryStorage::with_system_contracts(hash_bytecode); let mut storage_view = StorageView::new(raw_storage); @@ -256,61 +300,43 @@ pub(super) fn execute_user_txs_in_test_gas_vm( storage_view.set_value(tx_gas_price_key, u256_to_h256(U256([1, 0, 0, 0]))); } - let mut oracle_tools = OracleTools::new(&mut storage_view, HistoryEnabled); + let l1_batch = default_l1_batch(); + let system_env = SystemEnv { + zk_porter_available: ZKPORTER_IS_AVAILABLE, + version: ProtocolVersionId::latest(), + base_system_smart_contracts: GAS_TEST_SYSTEM_CONTRACTS.clone(), + gas_limit: BLOCK_GAS_LIMIT, + execution_mode: TxExecutionMode::VerifyExecute, + default_validation_computational_gas_limit: BLOCK_GAS_LIMIT, + chain_id: L2ChainId::default(), + }; - let mut vm = init_vm_inner( - &mut oracle_tools, - BlockContextMode::NewBlock(block_context, Default::default()), - &block_properties, - BLOCK_GAS_LIMIT, - &GAS_TEST_SYSTEM_CONTRACTS, - TxExecutionMode::VerifyExecute, + let mut vm = Vm::new( + l1_batch, + system_env, + Rc::new(RefCell::new(storage_view)), + HistoryEnabled, ); - vm.start_next_l2_block(vm.get_current_l2_block_info().dummy_next_block_info()); let mut total_gas_refunded = 0; for tx in txs { - push_raw_transaction_to_bootloader_memory( - &mut vm, - tx.clone().into(), - TxExecutionMode::VerifyExecute, - 0, - None, - ); - let tx_execution_result = vm - .execute_next_tx(u32::MAX, false) - .expect("Bootloader failed while processing transaction"); + vm.push_transaction(tx); + let tx_execution_result = vm.execute(VmExecutionMode::OneTx); - total_gas_refunded += tx_execution_result.gas_refunded; + total_gas_refunded += tx_execution_result.refunds.gas_refunded; if !accept_failure { - assert_eq!( - tx_execution_result.status, - TxExecutionStatus::Success, + assert!( + !tx_execution_result.result.is_failed(), "A transaction has failed" ); } } - let result = vm.execute_till_block_end(BootloaderJobType::BlockPostprocessing); - let execution_logs = VmExecutionLogs { - storage_logs: result.full_result.storage_log_queries, - events: result.full_result.events, - l2_to_l1_logs: result.full_result.l2_to_l1_logs, - total_log_queries_count: result.full_result.total_log_queries, - }; - - let metrics = ExecutionMetrics::new( - &execution_logs, - result.full_result.gas_used as usize, - 0, // The number of contracts deployed is irrelevant for our needs - result.full_result.contracts_used, - result.full_result.cycles_used, - result.full_result.computational_gas_used, - result.full_result.total_log_queries, - ); + let result = vm.execute(VmExecutionMode::Bootloader); + let metrics = result.get_execution_metrics(None); VmSpentResourcesResult { - gas_consumed: vm.gas_consumed(), + gas_consumed: result.statistics.gas_used, total_gas_paid: total_gas_paid_upfront.as_u32() - total_gas_refunded, pubdata_published: metrics.size() as u32, total_pubdata_paid: 0, diff --git a/core/lib/vm/src/constants.rs b/core/lib/vm/src/constants.rs index a51688b851e7..1c1cb3d5017f 100644 --- a/core/lib/vm/src/constants.rs +++ b/core/lib/vm/src/constants.rs @@ -70,8 +70,8 @@ pub(crate) const TX_GAS_LIMIT_OFFSET: usize = 4; const INITIAL_BASE_PAGE: u32 = 8; pub const BOOTLOADER_HEAP_PAGE: u32 = heap_page_from_base(MemoryPage(INITIAL_BASE_PAGE)).0; -pub(crate) const BLOCK_OVERHEAD_GAS: u32 = 1200000; -pub(crate) const BLOCK_OVERHEAD_L1_GAS: u32 = 1000000; +pub const BLOCK_OVERHEAD_GAS: u32 = 1200000; +pub const BLOCK_OVERHEAD_L1_GAS: u32 = 1000000; pub const BLOCK_OVERHEAD_PUBDATA: u32 = BLOCK_OVERHEAD_L1_GAS / L1_GAS_PER_PUBDATA_BYTE; /// VM Hooks are used for communication between bootloader and tracers. diff --git a/core/lib/vm/src/tracers/traits.rs b/core/lib/vm/src/tracers/traits.rs index 6e76a041fabc..33e149066b16 100644 --- a/core/lib/vm/src/tracers/traits.rs +++ b/core/lib/vm/src/tracers/traits.rs @@ -69,7 +69,7 @@ pub trait DynTracer { /// Save the results of the vm execution. pub trait VmTracer: - DynTracer + ExecutionEndTracer + ExecutionProcessing + Send + DynTracer + ExecutionEndTracer + ExecutionProcessing { fn save_results(&mut self, _result: &mut VmExecutionResultAndLogs) {} } From cf44a491a324199b4cf457d28658da44b6dafc61 Mon Sep 17 00:00:00 2001 From: Danil Date: Tue, 3 Oct 2023 14:45:52 +0200 Subject: [PATCH 08/18] feat(vm): Introduce new way of returning from the tracer #2569 (#116) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit … private at a4d5ca6625210471d9de66d61e7c9a41a336afb8 # What ❔ ## Why ❔ ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --------- Signed-off-by: Danil --- core/lib/vm/src/errors/halt.rs | 9 ++++ core/lib/vm/src/implementation/execution.rs | 10 +++-- core/lib/vm/src/tracers/default_tracers.rs | 42 +++++++++++++------ core/lib/vm/src/tracers/result_tracer.rs | 13 +++++- .../lib/vm/src/tracers/storage_invocations.rs | 15 +++++-- core/lib/vm/src/tracers/traits.rs | 30 +++++++++---- core/lib/vm/src/tracers/utils.rs | 5 ++- core/lib/vm/src/tracers/validation/mod.rs | 19 +++++++-- .../src/api_server/execution_sandbox/error.rs | 4 ++ .../api_server/execution_sandbox/validate.rs | 2 +- 10 files changed, 112 insertions(+), 37 deletions(-) diff --git a/core/lib/vm/src/errors/halt.rs b/core/lib/vm/src/errors/halt.rs index 10c8a8d702b9..0a5057a0616f 100644 --- a/core/lib/vm/src/errors/halt.rs +++ b/core/lib/vm/src/errors/halt.rs @@ -26,6 +26,8 @@ pub enum Halt { UnexpectedVMBehavior(String), // Bootloader is out of gas. BootloaderOutOfGas, + // Validation step is out of gas + ValidationOutOfGas, // Transaction has a too big gas limit and will not be executed by the server. TooBigGasLimit, // The bootloader did not have enough gas to start the transaction in the first place @@ -37,6 +39,7 @@ pub enum Halt { // Failed to publish information about the batch and the L2 block onto L1 FailedToAppendTransactionToL2Block(String), VMPanic, + TracerCustom(String), } impl Display for Halt { @@ -102,6 +105,12 @@ impl Display for Halt { reason ) } + Halt::TracerCustom(reason) => { + write!(f, "Tracer aborted execution: {}", reason) + } + Halt::ValidationOutOfGas => { + write!(f, "Validation run out of gas") + } } } } diff --git a/core/lib/vm/src/implementation/execution.rs b/core/lib/vm/src/implementation/execution.rs index 9944a37f7e83..52c4ff0cb0da 100644 --- a/core/lib/vm/src/implementation/execution.rs +++ b/core/lib/vm/src/implementation/execution.rs @@ -6,7 +6,9 @@ use crate::old_vm::{ utils::{vm_may_have_ended_inner, VmExecutionResult}, }; use crate::tracers::{ - traits::{BoxedTracer, ExecutionEndTracer, ExecutionProcessing, VmTracer}, + traits::{ + BoxedTracer, ExecutionEndTracer, ExecutionProcessing, TracerExecutionStatus, VmTracer, + }, DefaultExecutionTracer, RefundsTracer, }; use crate::types::{inputs::VmExecutionMode, outputs::VmExecutionResultAndLogs}; @@ -104,11 +106,11 @@ impl Vm { break VmExecutionStopReason::VmFinished; } - if tracer.should_stop_execution() { - break VmExecutionStopReason::TracerRequestedStop; + if let TracerExecutionStatus::Stop(reason) = tracer.should_stop_execution() { + break VmExecutionStopReason::TracerRequestedStop(reason); } }; - tracer.after_vm_execution(&mut self.state, &self.bootloader_state, result); + tracer.after_vm_execution(&mut self.state, &self.bootloader_state, result.clone()); result } diff --git a/core/lib/vm/src/tracers/default_tracers.rs b/core/lib/vm/src/tracers/default_tracers.rs index 7cc1e19869cf..4df00193265d 100644 --- a/core/lib/vm/src/tracers/default_tracers.rs +++ b/core/lib/vm/src/tracers/default_tracers.rs @@ -16,14 +16,17 @@ use crate::bootloader_state::BootloaderState; use crate::constants::BOOTLOADER_HEAP_PAGE; use crate::old_vm::history_recorder::HistoryMode; use crate::old_vm::memory::SimpleMemory; -use crate::tracers::traits::{DynTracer, ExecutionEndTracer, ExecutionProcessing, VmTracer}; +use crate::tracers::traits::{ + DynTracer, ExecutionEndTracer, ExecutionProcessing, TracerExecutionStatus, + TracerExecutionStopReason, VmTracer, +}; use crate::tracers::utils::{ computational_gas_price, gas_spent_on_bytecodes_and_long_messages_this_opcode, print_debug_if_needed, VmHook, }; use crate::tracers::ResultTracer; use crate::types::internals::ZkSyncVmState; -use crate::{VmExecutionMode, VmExecutionStopReason}; +use crate::{Halt, VmExecutionMode, VmExecutionStopReason}; /// Default tracer for the VM. It manages the other tracers execution and stop the vm when needed. pub(crate) struct DefaultExecutionTracer { @@ -141,17 +144,32 @@ impl Tracer for DefaultExecutionTracer { } impl ExecutionEndTracer for DefaultExecutionTracer { - fn should_stop_execution(&self) -> bool { - let mut should_stop = match self.execution_mode { - VmExecutionMode::OneTx => self.tx_has_been_processed(), - VmExecutionMode::Batch => false, - VmExecutionMode::Bootloader => self.ret_from_the_bootloader == Some(RetOpcode::Ok), + fn should_stop_execution(&self) -> TracerExecutionStatus { + match self.execution_mode { + VmExecutionMode::OneTx => { + if self.tx_has_been_processed() { + return TracerExecutionStatus::Stop(TracerExecutionStopReason::Finish); + } + } + VmExecutionMode::Bootloader => { + if self.ret_from_the_bootloader == Some(RetOpcode::Ok) { + return TracerExecutionStatus::Stop(TracerExecutionStopReason::Finish); + } + } + VmExecutionMode::Batch => {} }; - should_stop = should_stop || self.validation_run_out_of_gas(); + if self.validation_run_out_of_gas() { + return TracerExecutionStatus::Stop(TracerExecutionStopReason::Abort( + Halt::ValidationOutOfGas, + )); + } for tracer in self.custom_tracers.iter() { - should_stop = should_stop || tracer.should_stop_execution(); + let reason = tracer.should_stop_execution(); + if TracerExecutionStatus::Continue != reason { + return reason; + } } - should_stop + TracerExecutionStatus::Continue } } @@ -244,9 +262,9 @@ impl ExecutionProcessing for DefaultExecu stop_reason: VmExecutionStopReason, ) { self.result_tracer - .after_vm_execution(state, bootloader_state, stop_reason); + .after_vm_execution(state, bootloader_state, stop_reason.clone()); for processor in self.custom_tracers.iter_mut() { - processor.after_vm_execution(state, bootloader_state, stop_reason); + processor.after_vm_execution(state, bootloader_state, stop_reason.clone()); } } } diff --git a/core/lib/vm/src/tracers/result_tracer.rs b/core/lib/vm/src/tracers/result_tracer.rs index b8e089493565..dd61ea49cea6 100644 --- a/core/lib/vm/src/tracers/result_tracer.rs +++ b/core/lib/vm/src/tracers/result_tracer.rs @@ -24,6 +24,7 @@ use crate::types::{ }; use crate::constants::{BOOTLOADER_HEAP_PAGE, RESULT_SUCCESS_FIRST_SLOT}; +use crate::tracers::traits::TracerExecutionStopReason; use crate::{Halt, TxRevertReason}; use crate::{VmExecutionMode, VmExecutionStopReason}; @@ -120,9 +121,11 @@ impl ExecutionProcessing for ResultTracer // One of the tracers above has requested to stop the execution. // If it was the correct stop we already have the result, // otherwise it can be out of gas error - VmExecutionStopReason::TracerRequestedStop => { + VmExecutionStopReason::TracerRequestedStop(reason) => { match self.execution_mode { - VmExecutionMode::OneTx => self.vm_stopped_execution(state, bootloader_state), + VmExecutionMode::OneTx => { + self.vm_stopped_execution(state, bootloader_state, reason) + } VmExecutionMode::Batch => self.vm_finished_execution(state), VmExecutionMode::Bootloader => self.vm_finished_execution(state), }; @@ -188,7 +191,13 @@ impl ResultTracer { &mut self, state: &ZkSyncVmState, bootloader_state: &BootloaderState, + reason: TracerExecutionStopReason, ) { + if let TracerExecutionStopReason::Abort(halt) = reason { + self.result = Some(Result::Halt { reason: halt }); + return; + } + if self.bootloader_out_of_gas { self.result = Some(Result::Halt { reason: Halt::BootloaderOutOfGas, diff --git a/core/lib/vm/src/tracers/storage_invocations.rs b/core/lib/vm/src/tracers/storage_invocations.rs index ef4b59c60a88..bd6f419eddfb 100644 --- a/core/lib/vm/src/tracers/storage_invocations.rs +++ b/core/lib/vm/src/tracers/storage_invocations.rs @@ -1,7 +1,11 @@ use crate::bootloader_state::BootloaderState; use crate::old_vm::history_recorder::HistoryMode; -use crate::tracers::traits::{DynTracer, ExecutionEndTracer, ExecutionProcessing, VmTracer}; +use crate::tracers::traits::{ + DynTracer, ExecutionEndTracer, ExecutionProcessing, TracerExecutionStatus, + TracerExecutionStopReason, VmTracer, +}; use crate::types::internals::ZkSyncVmState; +use crate::Halt; use zksync_state::WriteStorage; #[derive(Debug, Default, Clone)] @@ -21,8 +25,13 @@ impl StorageInvocations { impl DynTracer for StorageInvocations {} impl ExecutionEndTracer for StorageInvocations { - fn should_stop_execution(&self) -> bool { - self.current >= self.limit + fn should_stop_execution(&self) -> TracerExecutionStatus { + if self.current >= self.limit { + return TracerExecutionStatus::Stop(TracerExecutionStopReason::Abort( + Halt::TracerCustom("Storage invocations limit reached".to_string()), + )); + } + TracerExecutionStatus::Continue } } diff --git a/core/lib/vm/src/tracers/traits.rs b/core/lib/vm/src/tracers/traits.rs index 33e149066b16..4e76ed1fa15d 100644 --- a/core/lib/vm/src/tracers/traits.rs +++ b/core/lib/vm/src/tracers/traits.rs @@ -8,7 +8,7 @@ use crate::old_vm::history_recorder::HistoryMode; use crate::old_vm::memory::SimpleMemory; use crate::types::internals::ZkSyncVmState; use crate::types::outputs::VmExecutionResultAndLogs; -use crate::VmExecutionStopReason; +use crate::{Halt, VmExecutionStopReason}; /// Run tracer for collecting data during the vm execution cycles pub trait ExecutionProcessing: @@ -31,14 +31,6 @@ pub trait ExecutionProcessing: } } -/// Stop the vm execution if the tracer conditions are met -pub trait ExecutionEndTracer { - // Returns whether the vm execution should stop. - fn should_stop_execution(&self) -> bool { - false - } -} - /// Version of zk_evm::Tracer suitable for dynamic dispatch. pub trait DynTracer { fn before_decoding(&mut self, _state: VmLocalStateData<'_>, _memory: &SimpleMemory) {} @@ -83,3 +75,23 @@ impl + 'static> BoxedTracer { + // Returns whether the vm execution should stop. + fn should_stop_execution(&self) -> TracerExecutionStatus { + TracerExecutionStatus::Continue + } +} diff --git a/core/lib/vm/src/tracers/utils.rs b/core/lib/vm/src/tracers/utils.rs index f86b496b0787..5f9090d6180c 100644 --- a/core/lib/vm/src/tracers/utils.rs +++ b/core/lib/vm/src/tracers/utils.rs @@ -18,6 +18,7 @@ use crate::constants::{ use crate::old_vm::history_recorder::HistoryMode; use crate::old_vm::memory::SimpleMemory; use crate::old_vm::utils::{aux_heap_page_from_base, heap_page_from_base}; +use crate::tracers::traits::TracerExecutionStopReason; #[derive(Clone, Debug, Copy)] pub(crate) enum VmHook { @@ -217,8 +218,8 @@ pub(crate) fn get_vm_hook_params(memory: &SimpleMemory) -> Ve ) } -#[derive(Debug, Clone, Copy, PartialEq)] +#[derive(Debug, Clone, PartialEq)] pub enum VmExecutionStopReason { VmFinished, - TracerRequestedStop, + TracerRequestedStop(TracerExecutionStopReason), } diff --git a/core/lib/vm/src/tracers/validation/mod.rs b/core/lib/vm/src/tracers/validation/mod.rs index 4b94e3f177b5..d85d031665ac 100644 --- a/core/lib/vm/src/tracers/validation/mod.rs +++ b/core/lib/vm/src/tracers/validation/mod.rs @@ -27,7 +27,10 @@ use zksync_utils::{ use crate::old_vm::history_recorder::HistoryMode; use crate::old_vm::memory::SimpleMemory; -use crate::tracers::traits::{DynTracer, ExecutionEndTracer, ExecutionProcessing, VmTracer}; +use crate::tracers::traits::{ + DynTracer, ExecutionEndTracer, ExecutionProcessing, TracerExecutionStatus, + TracerExecutionStopReason, VmTracer, +}; use crate::tracers::utils::{ computational_gas_price, get_calldata_page_via_abi, print_debug_if_needed, VmHook, }; @@ -38,7 +41,7 @@ pub use params::ValidationTracerParams; use types::NewTrustedValidationItems; use types::ValidationTracerMode; -use crate::VmExecutionResultAndLogs; +use crate::{Halt, VmExecutionResultAndLogs}; /// Tracer that is used to ensure that the validation adheres to all the rules /// to prevent DDoS attacks on the server. @@ -341,8 +344,16 @@ impl DynTracer for ValidationTracer { } impl ExecutionEndTracer for ValidationTracer { - fn should_stop_execution(&self) -> bool { - self.should_stop_execution || self.result.get().is_some() + fn should_stop_execution(&self) -> TracerExecutionStatus { + if self.should_stop_execution { + return TracerExecutionStatus::Stop(TracerExecutionStopReason::Finish); + } + if let Some(result) = self.result.get() { + return TracerExecutionStatus::Stop(TracerExecutionStopReason::Abort( + Halt::TracerCustom(format!("Validation error: {:#?}", result)), + )); + } + TracerExecutionStatus::Continue } } diff --git a/core/lib/zksync_core/src/api_server/execution_sandbox/error.rs b/core/lib/zksync_core/src/api_server/execution_sandbox/error.rs index abc50af37a5f..b4f04e2e5d60 100644 --- a/core/lib/zksync_core/src/api_server/execution_sandbox/error.rs +++ b/core/lib/zksync_core/src/api_server/execution_sandbox/error.rs @@ -61,6 +61,10 @@ impl From for SandboxExecutionError { Halt::FailedToAppendTransactionToL2Block(reason) => { SandboxExecutionError::Revert(reason, vec![]) } + Halt::TracerCustom(reason) => SandboxExecutionError::Revert(reason, vec![]), + Halt::ValidationOutOfGas => Self::AccountValidationFailed( + "The validation of the transaction ran out of gas".to_string(), + ), } } } diff --git a/core/lib/zksync_core/src/api_server/execution_sandbox/validate.rs b/core/lib/zksync_core/src/api_server/execution_sandbox/validate.rs index 05eb7f3ce2d9..2dd5ae7b9c25 100644 --- a/core/lib/zksync_core/src/api_server/execution_sandbox/validate.rs +++ b/core/lib/zksync_core/src/api_server/execution_sandbox/validate.rs @@ -86,8 +86,8 @@ impl TxSharedArgs { ]); let result = match (result.result, validation_result.get()) { - (ExecutionResult::Halt { reason }, _) => Err(ValidationError::FailedTx(reason)), (_, Some(err)) => Err(ValidationError::ViolatedRule(err.clone())), + (ExecutionResult::Halt { reason }, _) => Err(ValidationError::FailedTx(reason)), (_, None) => Ok(()), }; From f6284bdb627c1c3b42d587e173476a4598f5609b Mon Sep 17 00:00:00 2001 From: Maksym Date: Tue, 3 Oct 2023 15:59:03 +0300 Subject: [PATCH 09/18] ci: fix zk environment workflow condition (#147) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # What ❔ Fix for zk env workflow ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --- .github/workflows/zk-environment.publish.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/zk-environment.publish.yml b/.github/workflows/zk-environment.publish.yml index b733fab9b176..bf2ec7fa18bf 100644 --- a/.github/workflows/zk-environment.publish.yml +++ b/.github/workflows/zk-environment.publish.yml @@ -109,7 +109,7 @@ jobs: zk_environment_cuda_12: - if: needs.changed_files.outputs.zk_environment_cuda_12 == 'true' github.event.pull_request.merged == true || github.event_name == 'workflow_dispatch' + if: needs.changed_files.outputs.zk_environment_cuda_12 == 'true' && github.event.pull_request.merged == true || github.event_name == 'workflow_dispatch' name: Push zk-environment cuda 12 docker image to Docker Hub runs-on: [matterlabs-ci-runner] needs: changed_files From 6a2367698ececdbae85dbb4ae173b8a2a537d9bd Mon Sep 17 00:00:00 2001 From: Yury Akudovich Date: Tue, 3 Oct 2023 15:56:57 +0200 Subject: [PATCH 10/18] ci: Makes TruffleHog run in merge queue. (#149) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # What ❔ Makes TruffleHog run in merge queue. ## Why ❔ To prevent any secrets to be merged. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --- .github/workflows/secrets_scanner.yaml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/secrets_scanner.yaml b/.github/workflows/secrets_scanner.yaml index 54054cf7cc12..6a1faa200cc4 100644 --- a/.github/workflows/secrets_scanner.yaml +++ b/.github/workflows/secrets_scanner.yaml @@ -1,5 +1,7 @@ name: Leaked Secrets Scan -on: [pull_request] +on: + pull_request: + merge_group: jobs: TruffleHog: runs-on: ubuntu-latest From 0dec553804858a435151bae98930d2f70c1ae596 Mon Sep 17 00:00:00 2001 From: Shahar Kaminsky Date: Tue, 3 Oct 2023 17:31:52 +0300 Subject: [PATCH 11/18] fix(prover): Add Prover Readme (#146) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # What ❔ This PR adds all the prover readmes from the private repo. ## Why ❔ The mirroring script ignored all readme files. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --- prover/proof_fri_compressor/README.md | 7 + prover/prover/README.md | 8 + prover/prover_fri/README.md | 150 ++++++++++++++++++ prover/prover_fri_gateway/README.md | 11 ++ prover/prover_fri_types/README.md | 8 + .../README.md | 22 +++ prover/witness_generator/README.md | 52 ++++++ prover/witness_vector_generator/README.md | 7 + 8 files changed, 265 insertions(+) create mode 100644 prover/proof_fri_compressor/README.md create mode 100644 prover/prover/README.md create mode 100644 prover/prover_fri/README.md create mode 100644 prover/prover_fri_gateway/README.md create mode 100644 prover/prover_fri_types/README.md create mode 100644 prover/vk_setup_data_generator_server_fri/README.md create mode 100644 prover/witness_generator/README.md create mode 100644 prover/witness_vector_generator/README.md diff --git a/prover/proof_fri_compressor/README.md b/prover/proof_fri_compressor/README.md new file mode 100644 index 000000000000..4b0fa52ed9fd --- /dev/null +++ b/prover/proof_fri_compressor/README.md @@ -0,0 +1,7 @@ +# Witness vector generator + +Used to compress FRI proof to Bellman proof that gets sent to L1. + +## running + +`zk f cargo +nightly-2023-07-21 run --release --bin zksync_proof_fri_compressor` diff --git a/prover/prover/README.md b/prover/prover/README.md new file mode 100644 index 000000000000..2d70cf056d4b --- /dev/null +++ b/prover/prover/README.md @@ -0,0 +1,8 @@ +# Readme + +For compiling locally (no cuda) set `features=["legacy"], default-features=false` for: + +- `./Cargo.toml`: `heavy-ops-service` dependency. +- `../setup_key_generator_and_server/Cargo.toml`: `api` and `prover-service` dependencies. + +**! Don't push those changes !** diff --git a/prover/prover_fri/README.md b/prover/prover_fri/README.md new file mode 100644 index 000000000000..0b4d17def736 --- /dev/null +++ b/prover/prover_fri/README.md @@ -0,0 +1,150 @@ +# FRI Prover + +## running cpu prover + +`zk f cargo +nightly-2023-07-21 run --release --bin zksync_prover_fri` + +## running gpu prover(requires CUDA 12.0+) + +`zk f cargo +nightly-2023-07-21 run --release --features "gpu" --bin zksync_prover_fri` + +## Proving a block using CPU prover locally + +Below steps can be used to prove a block on local machine using CPU prover. This is useful for debugging and testing +Machine specs: + +- CPU: At least 8 physical cores +- RAM: 60GB of RAM(if you have lower RAM machine enable swap) +- Disk: 400GB of free disk + +1. Install the correct nightly version using command: `rustup install nightly-2023-07-21` +2. Generate the cpu setup data (no need to regenerate if it's already there). This will consume around 300Gb of disk. + Use these commands: + + ```markdown + for i in {1..13}; do zk f cargo run --release --bin zksync_setup_data_generator_fri -- --numeric-circuit $i + --is_base_layer done + + for i in {1..15}; do zk f cargo run --release --bin zksync_setup_data_generator_fri -- --numeric-circuit $i done + ``` + +3. Initialize DB and run migrations: `zk init` + +4. Override the following configuration in your `dev.env`: + + ``` + ETH_SENDER_SENDER_PROOF_SENDING_MODE=OnlyRealProofs + ETH_SENDER_SENDER_PROOF_LOADING_MODE=FriProofFromGcs + OBJECT_STORE_FILE_BACKED_BASE_PATH=/path/to/server/artifacts + PROVER_OBJECT_STORE_FILE_BACKED_BASE_PATH=/path/to/prover/artifacts + FRI_PROVER_SETUP_DATA_PATH=/path/to/above-generated/cpu-setup-data + ``` + +5. Run server `zk server --components=api,eth,tree,state_keeper,housekeeper,proof_data_handler` to produce blocks to be + proven +6. Run prover gateway to fetch blocks to be proven from server : + `zk f cargo run --release --bin zksync_prover_fri_gateway` +7. Run 4 witness generators to generate witness for each round: + + ``` + API_PROMETHEUS_LISTENER_PORT=3116 zk f cargo run --release --bin zksync_witness_generator -- --round=basic_circuits + API_PROMETHEUS_LISTENER_PORT=3117 zk f cargo run --release --bin zksync_witness_generator -- --round=leaf_aggregation + API_PROMETHEUS_LISTENER_PORT=3118 zk f cargo run --release --bin zksync_witness_generator -- --round=node_aggregation + API_PROMETHEUS_LISTENER_PORT=3119 zk f cargo run --release --bin zksync_witness_generator -- --round=scheduler + ``` + +8. Run prover to perform actual proving: `zk f cargo run --release --bin zksync_prover_fri` +9. Finally, run proof compressor to compress the proof to be sent on L1: + `zk f cargo run --release --bin zksync_proof_fri_compressor` + +## Proving a block using GPU prover locally + +Below steps can be used to prove a block on local machine using GPU prover, It requires Cuda 12.0 installation as +pre-requisite. This is useful for debugging and testing Machine specs: + +- CPU: At least 8 physical cores +- RAM: 16GB of RAM(if you have lower RAM machine enable swap) +- Disk: 30GB of free disk +- GPU: 1x Nvidia L4/T4 with 16GB of GPU RAM + +1. Install the correct nightly version using command: `rustup install nightly-2023-07-21` +2. Generate the gpu setup data (no need to regenerate if it's already there). This will consume around 300Gb of disk. + Use these commands: + + ```markdown + for i in {1..13}; do zk f cargo run --features "gpu" --release --bin zksync_setup_data_generator_fri -- + --numeric-circuit $i --is_base_layer done + + for i in {1..15}; do zk f cargo run --features "gpu" --release --bin zksync_setup_data_generator_fri -- + --numeric-circuit $i done + ``` + +3. Initialize DB and run migrations: `zk init` + +4. Override the following configuration in your `dev.env`: + + ``` + ETH_SENDER_SENDER_PROOF_SENDING_MODE=OnlyRealProofs + ETH_SENDER_SENDER_PROOF_LOADING_MODE=FriProofFromGcs + OBJECT_STORE_FILE_BACKED_BASE_PATH=/path/to/server/artifacts + PROVER_OBJECT_STORE_FILE_BACKED_BASE_PATH=/path/to/prover/artifacts + FRI_PROVER_SETUP_DATA_PATH=/path/to/above-generated/gpu-setup-data + ``` + +5. Run server `zk server --components=api,eth,tree,state_keeper,housekeeper,proof_data_handler` to produce blocks to be + proven +6. Run prover gateway to fetch blocks to be proven from server : + `zk f cargo run --release --bin zksync_prover_fri_gateway` +7. Run 4 witness generators to generate witness for each round: + + ``` + API_PROMETHEUS_LISTENER_PORT=3116 zk f cargo run --release --bin zksync_witness_generator -- --round=basic_circuits + API_PROMETHEUS_LISTENER_PORT=3117 zk f cargo run --release --bin zksync_witness_generator -- --round=leaf_aggregation + API_PROMETHEUS_LISTENER_PORT=3118 zk f cargo run --release --bin zksync_witness_generator -- --round=node_aggregation + API_PROMETHEUS_LISTENER_PORT=3119 zk f cargo run --release --bin zksync_witness_generator -- --round=scheduler + ``` + +8. Run prover to perform actual proving: `zk f cargo run --features "gpu" --release --bin zksync_prover_fri` +9. Run 5 witness vector generators to feed jobs to GPU prover: + + ``` + FRI_WITNESS_VECTOR_GENERATOR_PROMETHEUS_LISTENER_PORT=3416 zk f cargo run --release --bin zksync_witness_vector_generator + FRI_WITNESS_VECTOR_GENERATOR_PROMETHEUS_LISTENER_PORT=3417 zk f cargo run --release --bin zksync_witness_vector_generator + FRI_WITNESS_VECTOR_GENERATOR_PROMETHEUS_LISTENER_PORT=3418 zk f cargo run --release --bin zksync_witness_vector_generator + FRI_WITNESS_VECTOR_GENERATOR_PROMETHEUS_LISTENER_PORT=3419 zk f cargo run --release --bin zksync_witness_vector_generator + FRI_WITNESS_VECTOR_GENERATOR_PROMETHEUS_LISTENER_PORT=3420 zk f cargo run --release --bin zksync_witness_vector_generator + ``` + +10. Finally, run proof compressor to compress the proof to be sent on L1: + `zk f cargo run --release --bin zksync_proof_fri_compressor` + +## Performing circuit upgrade + +Performing circuit upgrade requires crypto library to be updated and generating new setup data, verification key, +finalization hints if the circuit changes. Below steps can be used to perform circuit upgrade: + +1. checkout if the circuit geometry has changed in the new version of the circuit by running the + [workflow](https://github.com/matter-labs/zkevm_test_harness/actions/workflows/geometry-config-generator.yml) in + harness and merge the generated PR. +2. update the relevant crypto dependencies(boojum, zkevm_circuit, harness, etc) in `Cargo.lock`, for example: + `cargo update -p zkevm_test_harness@1.4.0` +3. prepare an PR with the updated dependencies [sample PR](https://github.com/matter-labs/zksync-2-dev/pull/2481). +4. Run the verification key + [workflow](https://github.com/matter-labs/zksync-2-dev/actions/workflows/fri-vk-generator.yaml) against the PR to + generate the verification key and finalization hints for the new circuit. +5. Only once the above verification key workflow is successful, start the setup-data generation(cpu, gpu setup data + generation can be done in parallel), this step is important, since the setup data requires the new VK, we need to + wait for it to finish. +6. Run the cpu setup data generation + [workflow](https://github.com/matter-labs/zksync-2-dev/actions/workflows/fri-setup-data-generator.yml) against the PR + to generate the cpu setup data. +7. Run the gpu setup data generation + [workflow](https://github.com/matter-labs/zksync-2-dev/actions/workflows/fri-gpu-setup-data-generator.yml) against + the PR to generate the gpu setup data. +8. Once the setup data generation workflows are successful, update the PR with `setup_keys_id` id in + [build-docker-from-tag.yml](../../.github/workflows/build-docker-from-tag.yml) and in + [fri-gpu-prover-integration-test.yml](../../.github/workflows/fri-gpu-prover-integration-test.yml), make sure to only + do it from `FRI prover` not old. +9. Run the GPU integration test + [workflow](https://github.com/matter-labs/zksync-2-dev/actions/workflows/fri-gpu-prover-integration-test.yml) against + the PR to verify the GPU prover is working fine with new circuits. diff --git a/prover/prover_fri_gateway/README.md b/prover/prover_fri_gateway/README.md new file mode 100644 index 000000000000..bfe04e1f6511 --- /dev/null +++ b/prover/prover_fri_gateway/README.md @@ -0,0 +1,11 @@ +# FRI Prover Gateway + +The Prover Gateway is a service component in our system infrastructure that acts as an intermediary between the prover +and the server's HTTP API. It regularly invokes the server's HTTP API to get proof related data, and it submits proof. +Its primary functions include: + +- **GetProofGenerationData**: This function is responsible for pulling proof generation data from the HTTPS API. It + obtains the necessary data required to generate proofs in our system. The retrieved data is then used as input by + prover for the proof generation process. +- **SubmitProof**: Once the proof is generated by prover, this function is used to submit the resulting proof back to + the server. diff --git a/prover/prover_fri_types/README.md b/prover/prover_fri_types/README.md new file mode 100644 index 000000000000..7485656110e7 --- /dev/null +++ b/prover/prover_fri_types/README.md @@ -0,0 +1,8 @@ +# FRI Prover types + +Lib contains types used by FRI prover and shared among + +- FRI prover +- witness generator +- vk and setup data generator +- witness vector generator diff --git a/prover/vk_setup_data_generator_server_fri/README.md b/prover/vk_setup_data_generator_server_fri/README.md new file mode 100644 index 000000000000..1dc8b5c0fa2c --- /dev/null +++ b/prover/vk_setup_data_generator_server_fri/README.md @@ -0,0 +1,22 @@ +# Setup data and VK generator and server + +The SNARK VK generation requires the `CRS_FILE` environment variable to be present and point to the correct file. The +file can be downloaded from the following +[link](https://storage.googleapis.com/matterlabs-setup-keys-us/setup-keys/setup_2^26.key) its also present in dir after +zk init keys/setup/setup_2^26.key + +## generating setup-data for specific circuit type + +`zk f cargo +nightly-2023-07-21 run --release --bin zksync_setup_data_generator_fri -- --numeric-circuit 1 --is_base_layer` + +## generating GPU setup-data for specific circuit type + +`zk f cargo +nightly-2023-07-21 run --features "gpu" --release --bin zksync_setup_data_generator_fri -- --numeric-circuit 1 --is_base_layer` + +## Generating VK's + +`cargo +nightly-2023-07-21 run --release --bin zksync_vk_generator_fri` + +## generating VK commitment for existing VK's + +`cargo +nightly-2023-07-21 run --release --bin zksync_commitment_generator_fri` diff --git a/prover/witness_generator/README.md b/prover/witness_generator/README.md new file mode 100644 index 000000000000..9d35fe7e054a --- /dev/null +++ b/prover/witness_generator/README.md @@ -0,0 +1,52 @@ +# WitnessGenerator + +Please read this +[doc](https://www.notion.so/matterlabs/Draft-FRI-Prover-Integration-Prover-Shadowing-c4b1373786eb43779a93118be4be5d99) +for rationale of this binary, alongside the existing one in zk-core. + +The component is responsible for generating prover jobs and saving artifacts needed for the next round of proof +aggregation. That is, every aggregation round needs two sets of input: + +- computed proofs from the previous round +- some artifacts that the witness generator of previous round(s) returns. There are four rounds of proofs for every + block, each of them starts with an invocation of `{Round}WitnessGenerator` with a corresponding + `WitnessGeneratorJobType`: + +## BasicCircuitsWitnessGenerator + +- generates basic circuits (circuits like `Main VM` - up to 50 \* 48 = 2400 circuits): +- input table: `basic_circuit_witness_jobs` (todo SMA-1362: will be renamed from `witness_inputs`) +- artifact/output table: `leaf_aggregation_jobs` (also creates job stubs in `node_aggregation_jobs` and + `scheduler_aggregation_jobs`) value in `aggregation_round` field of `prover_jobs` table: 0 + +## LeafAggregationWitnessGenerator + +- generates leaf aggregation circuits (up to 48 circuits of type `LeafAggregation`) +- input table: `leaf_aggregation_jobs` +- artifact/output table: `node_aggregation_jobs` +- value in `aggregation_round` field of `prover_jobs` table: 1 + +## NodeAggregationWitnessGenerator + +- generates one circuit of type `NodeAggregation` +- input table: `leaf_aggregation_jobs` +- value in `aggregation_round` field of `prover_jobs` table: 2 + +## SchedulerWitnessGenerator + +- generates one circuit of type `Scheduler` +- input table: `scheduler_witness_jobs` +- value in `aggregation_round` field of `prover_jobs` table: 3 + +One round of prover generation consists of: + +- `WitnessGenerator` picks up the next `queued` job in its input table and processes it (invoking the corresponding + helper function in `zkevm_test_harness` repo) +- it saves the generated circuis to `prover_jobs` table and the other artifacts to its output table +- the individual proofs are picked up by the provers, processed, and marked as complete. +- when the last proof for this round is computed, the prover updates the row in the output table setting its status to + `queued` +- `WitnessGenerator` picks up such job and proceeds to the next round + +Note that the very first input table (`witness_inputs`) is populated by the tree (as the input artifact for the +`WitnessGeneratorJobType::BasicCircuits` is the merkle proofs) diff --git a/prover/witness_vector_generator/README.md b/prover/witness_vector_generator/README.md new file mode 100644 index 000000000000..8c4328afe8cf --- /dev/null +++ b/prover/witness_vector_generator/README.md @@ -0,0 +1,7 @@ +# Witness vector generator + +Used to generate witness vectors using circuit and sending them to prover over TCP. + +## running + +`zk f cargo +nightly-2023-07-21 run --release --bin zksync_witness_vector_generator` From 7dfbc5eddab94cd24f96912e0d43ba36e1cf363f Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Tue, 3 Oct 2023 17:39:17 +0300 Subject: [PATCH 12/18] feat: Implement dynamic L2-to-L1 log tree depth (#126) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # What ❔ Implements dynamic depth of the in-memory L2-to-L1 log Merkle tree. Previously, this tree always had 512 items (if necessary, additional zero items were added at the end). With these changes, the tree has *at least* 512 items (with padding); the actual number of items is `max(512, items.len().next_power_of_two())`. This makes the change backward-compatible without needing any logic tied to L1 batch number etc. ## Why ❔ We want to allow larger Merkle tree depths than previously. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --- core/lib/mini_merkle_tree/benches/tree.rs | 4 +- core/lib/mini_merkle_tree/src/lib.rs | 60 ++++---- core/lib/mini_merkle_tree/src/tests.rs | 60 ++++++-- core/lib/types/src/commitment.rs | 11 +- core/lib/types/src/l2_to_l1_log.rs | 5 + .../src/api_server/web3/namespaces/zks.rs | 129 +++++++++--------- 6 files changed, 155 insertions(+), 114 deletions(-) diff --git a/core/lib/mini_merkle_tree/benches/tree.rs b/core/lib/mini_merkle_tree/benches/tree.rs index 7206b64e7c4a..a964456bfb45 100644 --- a/core/lib/mini_merkle_tree/benches/tree.rs +++ b/core/lib/mini_merkle_tree/benches/tree.rs @@ -10,7 +10,7 @@ const TREE_SIZES: &[usize] = &[32, 64, 128, 256, 512, 1_024]; fn compute_merkle_root(bencher: &mut Bencher<'_>, tree_size: usize) { let leaves = (0..tree_size).map(|i| [i as u8; 88]); - let tree = MiniMerkleTree::new(leaves, tree_size); + let tree = MiniMerkleTree::new(leaves, None); bencher.iter_batched( || tree.clone(), MiniMerkleTree::merkle_root, @@ -20,7 +20,7 @@ fn compute_merkle_root(bencher: &mut Bencher<'_>, tree_size: usize) { fn compute_merkle_path(bencher: &mut Bencher<'_>, tree_size: usize) { let leaves = (0..tree_size).map(|i| [i as u8; 88]); - let tree = MiniMerkleTree::new(leaves, tree_size); + let tree = MiniMerkleTree::new(leaves, None); bencher.iter_batched( || tree.clone(), |tree| tree.merkle_root_and_path(tree_size / 3), diff --git a/core/lib/mini_merkle_tree/src/lib.rs b/core/lib/mini_merkle_tree/src/lib.rs index a4e9552aad6f..18bb343bc701 100644 --- a/core/lib/mini_merkle_tree/src/lib.rs +++ b/core/lib/mini_merkle_tree/src/lib.rs @@ -15,8 +15,9 @@ mod tests; use zksync_basic_types::H256; use zksync_crypto::hasher::{keccak::KeccakHasher, Hasher}; -/// Maximum supported depth of Merkle trees. 10 means that the tree must have <=1,024 leaves. -const MAX_TREE_DEPTH: usize = 10; +/// Maximum supported depth of the tree. 32 corresponds to `2^32` elements in the tree, which +/// we unlikely to ever hit. +const MAX_TREE_DEPTH: usize = 32; /// In-memory Merkle tree of bounded depth (no more than 10). /// @@ -27,61 +28,61 @@ const MAX_TREE_DEPTH: usize = 10; pub struct MiniMerkleTree<'a, const LEAF_SIZE: usize> { hasher: &'a dyn HashEmptySubtree, hashes: Box<[H256]>, - tree_size: usize, + binary_tree_size: usize, } impl MiniMerkleTree<'static, LEAF_SIZE> where KeccakHasher: HashEmptySubtree, { - /// Creates a new Merkle tree from the supplied leaves. If `tree_size` is larger than the - /// number of the supplied leaves, the remaining leaves are `[0_u8; LEAF_SIZE]`. + /// Creates a new Merkle tree from the supplied leaves. If `min_tree_size` is supplied and is larger + /// than the number of the supplied leaves, the leaves are padded to `min_tree_size` with `[0_u8; LEAF_SIZE]` entries. /// The hash function used in keccak-256. /// /// # Panics /// /// Panics in the same situations as [`Self::with_hasher()`]. - pub fn new(leaves: impl Iterator, tree_size: usize) -> Self { - Self::with_hasher(&KeccakHasher, leaves, tree_size) + pub fn new( + leaves: impl Iterator, + min_tree_size: Option, + ) -> Self { + Self::with_hasher(&KeccakHasher, leaves, min_tree_size) } } impl<'a, const LEAF_SIZE: usize> MiniMerkleTree<'a, LEAF_SIZE> { - /// Creates a new Merkle tree from the supplied leaves. If `tree_size` is larger than the - /// number of the supplied leaves, the remaining leaves are `[0_u8; LEAF_SIZE]`. + /// Creates a new Merkle tree from the supplied leaves. If `min_tree_size` is supplied and is larger than the + /// number of the supplied leaves, the leaves are padded to `min_tree_size` with `[0_u8; LEAF_SIZE]` entries. /// /// # Panics /// /// Panics if any of the following conditions applies: /// - /// - The number of `leaves` is greater than `tree_size`. - /// - `tree_size > 1_024`. - /// - `tree_size` is not a power of 2. + /// - `min_tree_size` (if supplied) is not a power of 2. pub fn with_hasher( hasher: &'a dyn HashEmptySubtree, leaves: impl Iterator, - tree_size: usize, + min_tree_size: Option, ) -> Self { - assert!( - tree_size <= 1 << MAX_TREE_DEPTH, - "tree size must be <={}", - 1 << MAX_TREE_DEPTH - ); - assert!( - tree_size.is_power_of_two(), - "tree size must be a power of 2" - ); - let hashes: Box<[H256]> = leaves.map(|bytes| hasher.hash_bytes(&bytes)).collect(); + let mut binary_tree_size = hashes.len().next_power_of_two(); + if let Some(min_tree_size) = min_tree_size { + assert!( + min_tree_size.is_power_of_two(), + "tree size must be a power of 2" + ); + binary_tree_size = min_tree_size.max(binary_tree_size); + } assert!( - hashes.len() <= tree_size, - "tree size must be greater or equal the number of supplied leaves" + tree_depth_by_size(binary_tree_size) <= MAX_TREE_DEPTH, + "Tree contains more than {} items; this is not supported", + 1 << MAX_TREE_DEPTH ); Self { hasher, hashes, - tree_size, + binary_tree_size, } } @@ -97,7 +98,7 @@ impl<'a, const LEAF_SIZE: usize> MiniMerkleTree<'a, LEAF_SIZE> { /// Returns the root hash and the Merkle proof for a leaf with the specified 0-based `index`. pub fn merkle_root_and_path(self, index: usize) -> (H256, Vec) { - let mut merkle_path = Vec::with_capacity(MAX_TREE_DEPTH); + let mut merkle_path = vec![]; let root_hash = self.compute_merkle_root_and_path(index, Some(&mut merkle_path)); (root_hash, merkle_path) } @@ -109,7 +110,10 @@ impl<'a, const LEAF_SIZE: usize> MiniMerkleTree<'a, LEAF_SIZE> { ) -> H256 { assert!(index < self.hashes.len(), "invalid tree leaf index"); - let depth = tree_depth_by_size(self.tree_size); + let depth = tree_depth_by_size(self.binary_tree_size); + if let Some(merkle_path) = merkle_path.as_deref_mut() { + merkle_path.reserve(depth); + } let mut hashes = self.hashes; let mut level_len = hashes.len(); diff --git a/core/lib/mini_merkle_tree/src/tests.rs b/core/lib/mini_merkle_tree/src/tests.rs index f5745cf43aab..c534c87523cd 100644 --- a/core/lib/mini_merkle_tree/src/tests.rs +++ b/core/lib/mini_merkle_tree/src/tests.rs @@ -26,7 +26,7 @@ fn hash_of_empty_tree_with_single_item() { for depth in 0..=5 { let len = 1 << depth; println!("checking tree with {len} items"); - let tree = MiniMerkleTree::new(iter::once([0_u8; 88]), len); + let tree = MiniMerkleTree::new(iter::once([0_u8; 88]), Some(len)); assert_eq!(tree.merkle_root(), KeccakHasher.empty_subtree_hash(depth)); } } @@ -38,16 +38,18 @@ fn hash_of_large_empty_tree_with_multiple_items() { let leaves = iter::repeat([0_u8; 88]).take(len); let tree_size = len.next_power_of_two(); - let tree = MiniMerkleTree::new(leaves, tree_size); + let tree = MiniMerkleTree::new(leaves.clone(), Some(tree_size)); + let depth = tree_depth_by_size(tree_size); + assert_eq!(tree.merkle_root(), KeccakHasher.empty_subtree_hash(depth)); + let tree = MiniMerkleTree::new(leaves, None); let depth = tree_depth_by_size(tree_size); - assert!(depth <= MAX_TREE_DEPTH); assert_eq!(tree.merkle_root(), KeccakHasher.empty_subtree_hash(depth)); } } #[test] fn single_item_tree_snapshot() { - let tree = MiniMerkleTree::new(iter::once([1_u8; 88]), 32); + let tree = MiniMerkleTree::new(iter::once([1_u8; 88]), Some(32)); let (root_hash, path) = tree.merkle_root_and_path(0); let expected_root_hash: H256 = @@ -70,7 +72,7 @@ fn single_item_tree_snapshot() { #[test] fn full_tree_snapshot() { let leaves = (1_u8..=32).map(|byte| [byte; 88]); - let tree = MiniMerkleTree::new(leaves, 32); + let tree = MiniMerkleTree::new(leaves, None); let (root_hash, path) = tree.merkle_root_and_path(2); let expected_root_hash: H256 = @@ -93,7 +95,7 @@ fn full_tree_snapshot() { #[test] fn partial_tree_snapshot() { let leaves = (1_u8..=50).map(|byte| [byte; 88]); - let tree = MiniMerkleTree::new(leaves.clone(), 64); + let tree = MiniMerkleTree::new(leaves.clone(), None); let (root_hash, path) = tree.merkle_root_and_path(10); let expected_root_hash: H256 = @@ -113,7 +115,7 @@ fn partial_tree_snapshot() { .map(|s| s.parse::().unwrap()); assert_eq!(path, expected_path); - let tree = MiniMerkleTree::new(leaves, 64); + let tree = MiniMerkleTree::new(leaves, None); let (root_hash, path) = tree.merkle_root_and_path(49); assert_eq!(root_hash, expected_root_hash); @@ -157,7 +159,7 @@ fn verify_merkle_proof( #[test] fn merkle_proofs_are_valid_in_small_tree() { let leaves = (1_u8..=50).map(|byte| [byte; 88]); - let tree = MiniMerkleTree::new(leaves.clone(), 64); + let tree = MiniMerkleTree::new(leaves.clone(), None); for (i, item) in leaves.enumerate() { let (merkle_root, path) = tree.clone().merkle_root_and_path(i); @@ -168,10 +170,50 @@ fn merkle_proofs_are_valid_in_small_tree() { #[test] fn merkle_proofs_are_valid_in_larger_tree() { let leaves = (1_u8..=255).map(|byte| [byte; 88]); - let tree = MiniMerkleTree::new(leaves.clone(), 512); + let tree = MiniMerkleTree::new(leaves.clone(), Some(512)); for (i, item) in leaves.enumerate() { let (merkle_root, path) = tree.clone().merkle_root_and_path(i); verify_merkle_proof(&item, i, 512, &path, merkle_root); } } + +#[test] +#[allow(clippy::cast_possible_truncation)] // truncation is intentional +fn merkle_proofs_are_valid_in_very_large_tree() { + let leaves = (1_u32..=15_000).map(|byte| [byte as u8; 88]); + + let tree = MiniMerkleTree::new(leaves.clone(), None); + for (i, item) in leaves.clone().enumerate().step_by(61) { + let (merkle_root, path) = tree.clone().merkle_root_and_path(i); + verify_merkle_proof(&item, i, 1 << 14, &path, merkle_root); + } + + let tree_with_min_size = MiniMerkleTree::new(leaves.clone(), Some(512)); + assert_eq!(tree_with_min_size.clone().merkle_root(), tree.merkle_root()); + for (i, item) in leaves.enumerate().step_by(61) { + let (merkle_root, path) = tree_with_min_size.clone().merkle_root_and_path(i); + verify_merkle_proof(&item, i, 1 << 14, &path, merkle_root); + } +} + +#[test] +fn merkle_proofs_are_valid_in_very_small_trees() { + for item_count in 1..=20 { + let leaves = (1..=item_count).map(|byte| [byte; 88]); + + let tree = MiniMerkleTree::new(leaves.clone(), None); + let item_count = usize::from(item_count).next_power_of_two(); + for (i, item) in leaves.clone().enumerate() { + let (merkle_root, path) = tree.clone().merkle_root_and_path(i); + verify_merkle_proof(&item, i, item_count, &path, merkle_root); + } + + let tree_with_min_size = MiniMerkleTree::new(leaves.clone(), Some(512)); + assert_ne!(tree_with_min_size.clone().merkle_root(), tree.merkle_root()); + for (i, item) in leaves.enumerate() { + let (merkle_root, path) = tree_with_min_size.clone().merkle_root_and_path(i); + verify_merkle_proof(&item, i, 512, &path, merkle_root); + } + } +} diff --git a/core/lib/types/src/commitment.rs b/core/lib/types/src/commitment.rs index 89207309f206..abc0946fa34f 100644 --- a/core/lib/types/src/commitment.rs +++ b/core/lib/types/src/commitment.rs @@ -15,7 +15,6 @@ use zksync_mini_merkle_tree::MiniMerkleTree; use crate::{ block::L1BatchHeader, - circuit::GEOMETRY_CONFIG, ethabi::Token, l2_to_l1_log::L2ToL1Log, web3::signing::keccak256, @@ -27,8 +26,6 @@ use crate::{ pub trait SerializeCommitment { /// Size of the structure in bytes. const SERIALIZED_SIZE: usize; - /// The number of objects of this type that can be included in a single L1 batch. - const LIMIT_PER_L1_BATCH: usize; /// Serializes this struct into the provided buffer, which is guaranteed to have byte length /// [`Self::SERIALIZED_SIZE`]. fn serialize_commitment(&self, buffer: &mut [u8]); @@ -167,7 +164,6 @@ impl L1BatchWithMetadata { impl SerializeCommitment for L2ToL1Log { const SERIALIZED_SIZE: usize = 88; - const LIMIT_PER_L1_BATCH: usize = GEOMETRY_CONFIG.limit_for_l1_messages_merklizer as usize; fn serialize_commitment(&self, buffer: &mut [u8]) { buffer[0] = self.shard_id; @@ -181,8 +177,6 @@ impl SerializeCommitment for L2ToL1Log { impl SerializeCommitment for InitialStorageWrite { const SERIALIZED_SIZE: usize = 64; - const LIMIT_PER_L1_BATCH: usize = - GEOMETRY_CONFIG.limit_for_initial_writes_pubdata_hasher as usize; fn serialize_commitment(&self, buffer: &mut [u8]) { self.key.to_little_endian(&mut buffer[0..32]); @@ -192,8 +186,6 @@ impl SerializeCommitment for InitialStorageWrite { impl SerializeCommitment for RepeatedStorageWrite { const SERIALIZED_SIZE: usize = 40; - const LIMIT_PER_L1_BATCH: usize = - GEOMETRY_CONFIG.limit_for_repeated_writes_pubdata_hasher as usize; fn serialize_commitment(&self, buffer: &mut [u8]) { buffer[..8].copy_from_slice(&self.index.to_be_bytes()); @@ -238,8 +230,9 @@ impl L1BatchAuxiliaryOutput { .chunks(L2ToL1Log::SERIALIZED_SIZE) .map(|chunk| <[u8; L2ToL1Log::SERIALIZED_SIZE]>::try_from(chunk).unwrap()); // ^ Skip first 4 bytes of the serialized logs (i.e., the number of logs). + let min_tree_size = Some(L2ToL1Log::LEGACY_LIMIT_PER_L1_BATCH); let l2_l1_logs_merkle_root = - MiniMerkleTree::new(merkle_tree_leaves, L2ToL1Log::LIMIT_PER_L1_BATCH).merkle_root(); + MiniMerkleTree::new(merkle_tree_leaves, min_tree_size).merkle_root(); Self { l2_l1_logs_compressed, diff --git a/core/lib/types/src/l2_to_l1_log.rs b/core/lib/types/src/l2_to_l1_log.rs index 57338c4766c3..8ad01b6f272f 100644 --- a/core/lib/types/src/l2_to_l1_log.rs +++ b/core/lib/types/src/l2_to_l1_log.rs @@ -15,6 +15,11 @@ pub struct L2ToL1Log { } impl L2ToL1Log { + /// Legacy upper bound of L2-to-L1 logs per single L1 batch. This is not used as a limit now, + /// but still determines the minimum number of items in the Merkle tree built from L2-to-L1 logs + /// for a certain batch. + pub const LEGACY_LIMIT_PER_L1_BATCH: usize = 512; + pub fn from_slice(data: &[u8]) -> Self { assert_eq!(data.len(), Self::SERIALIZED_SIZE); Self { diff --git a/core/lib/zksync_core/src/api_server/web3/namespaces/zks.rs b/core/lib/zksync_core/src/api_server/web3/namespaces/zks.rs index 54e58187afea..a9a8ee435481 100644 --- a/core/lib/zksync_core/src/api_server/web3/namespaces/zks.rs +++ b/core/lib/zksync_core/src/api_server/web3/namespaces/zks.rs @@ -1,6 +1,7 @@ use std::{collections::HashMap, convert::TryInto, time::Instant}; use bigdecimal::{BigDecimal, Zero}; +use zksync_dal::StorageProcessor; use zksync_mini_merkle_tree::MiniMerkleTree; use zksync_types::{ @@ -8,7 +9,6 @@ use zksync_types::{ BlockDetails, BridgeAddresses, GetLogsFilter, L1BatchDetails, L2ToL1LogProof, ProtocolVersion, TransactionDetails, }, - commitment::SerializeCommitment, fee::Fee, l1::L1Tx, l2::L2Tx, @@ -266,15 +266,9 @@ impl ZksNamespace { .map_err(|err| internal_error(METHOD_NAME, err))? .expect("L1 batch should contain at least one miniblock"); - let all_l1_logs_in_batch = storage - .blocks_web3_dal() - .get_l2_to_l1_logs(l1_batch_number) - .await - .map_err(|err| internal_error(METHOD_NAME, err))?; - // Position of l1 log in L1 batch relative to logs with identical data let l1_log_relative_position = if let Some(l2_log_position) = l2_log_position { - let pos = storage + let logs = storage .events_web3_dal() .get_logs( GetLogsFilter { @@ -286,48 +280,69 @@ impl ZksNamespace { self.state.api_config.req_entities_limit, ) .await - .map_err(|err| internal_error(METHOD_NAME, err))? - .iter() - .position(|event| { - event.block_number == Some(block_number.0.into()) - && event.log_index == Some(l2_log_position.into()) - }); - match pos { + .map_err(|err| internal_error(METHOD_NAME, err))?; + let maybe_pos = logs.iter().position(|event| { + event.block_number == Some(block_number.0.into()) + && event.log_index == Some(l2_log_position.into()) + }); + match maybe_pos { Some(pos) => pos, - None => { - return Ok(None); - } + None => return Ok(None), } } else { 0 }; - let l1_log_index = match all_l1_logs_in_batch + let log_proof = self + .get_l2_to_l1_log_proof_inner( + METHOD_NAME, + &mut storage, + l1_batch_number, + l1_log_relative_position, + |log| { + log.sender == L1_MESSENGER_ADDRESS + && log.key == address_to_h256(&sender) + && log.value == msg + }, + ) + .await?; + + metrics::histogram!("api.web3.call", start.elapsed(), "method" => METHOD_NAME); + Ok(log_proof) + } + + async fn get_l2_to_l1_log_proof_inner( + &self, + method_name: &'static str, + storage: &mut StorageProcessor<'_>, + l1_batch_number: L1BatchNumber, + index_in_filtered_logs: usize, + log_filter: impl Fn(&L2ToL1Log) -> bool, + ) -> Result, Web3Error> { + let all_l1_logs_in_batch = storage + .blocks_web3_dal() + .get_l2_to_l1_logs(l1_batch_number) + .await + .map_err(|err| internal_error(method_name, err))?; + + let Some((l1_log_index, _)) = all_l1_logs_in_batch .iter() .enumerate() - .filter(|(_, log)| { - log.sender == L1_MESSENGER_ADDRESS - && log.key == address_to_h256(&sender) - && log.value == msg - }) - .nth(l1_log_relative_position) - { - Some(nth_elem) => nth_elem.0, - None => { - return Ok(None); - } + .filter(|(_, log)| log_filter(log)) + .nth(index_in_filtered_logs) + else { + return Ok(None); }; let merkle_tree_leaves = all_l1_logs_in_batch.iter().map(L2ToL1Log::to_bytes); - let (root, proof) = MiniMerkleTree::new(merkle_tree_leaves, L2ToL1Log::LIMIT_PER_L1_BATCH) + let min_tree_size = Some(L2ToL1Log::LEGACY_LIMIT_PER_L1_BATCH); + let (root, proof) = MiniMerkleTree::new(merkle_tree_leaves, min_tree_size) .merkle_root_and_path(l1_log_index); - let msg_proof = L2ToL1LogProof { + Ok(Some(L2ToL1LogProof { proof, root, id: l1_log_index as u32, - }; - metrics::histogram!("api.web3.call", start.elapsed(), "method" => METHOD_NAME); - Ok(Some(msg_proof)) + })) } #[tracing::instrument(skip(self))] @@ -345,45 +360,27 @@ impl ZksNamespace { .access_storage_tagged("api") .await .unwrap(); - let (l1_batch_number, l1_batch_tx_index) = match storage + let Some((l1_batch_number, l1_batch_tx_index)) = storage .blocks_web3_dal() .get_l1_batch_info_for_tx(tx_hash) .await .map_err(|err| internal_error(METHOD_NAME, err))? - { - Some(x) => x, - None => return Ok(None), - }; - - let all_l1_logs_in_batch = storage - .blocks_web3_dal() - .get_l2_to_l1_logs(l1_batch_number) - .await - .map_err(|err| internal_error(METHOD_NAME, err))?; - - let l1_log_index = match all_l1_logs_in_batch - .iter() - .enumerate() - .filter(|(_, log)| log.tx_number_in_block == l1_batch_tx_index) - .nth(index.unwrap_or(0)) - { - Some(nth_elem) => nth_elem.0, - None => { - return Ok(None); - } + else { + return Ok(None); }; - let merkle_tree_leaves = all_l1_logs_in_batch.iter().map(L2ToL1Log::to_bytes); - let (root, proof) = MiniMerkleTree::new(merkle_tree_leaves, L2ToL1Log::LIMIT_PER_L1_BATCH) - .merkle_root_and_path(l1_log_index); - let msg_proof = L2ToL1LogProof { - proof, - root, - id: l1_log_index as u32, - }; + let log_proof = self + .get_l2_to_l1_log_proof_inner( + METHOD_NAME, + &mut storage, + l1_batch_number, + index.unwrap_or(0), + |log| log.tx_number_in_block == l1_batch_tx_index, + ) + .await?; metrics::histogram!("api.web3.call", start.elapsed(), "method" => METHOD_NAME); - Ok(Some(msg_proof)) + Ok(log_proof) } #[tracing::instrument(skip(self))] From 7d017f431b886d7cbd190f13e8874f5032128185 Mon Sep 17 00:00:00 2001 From: Shahar Kaminsky Date: Tue, 3 Oct 2023 18:43:29 +0300 Subject: [PATCH 13/18] chore(codeowners): Update Owner Team (#136) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # What ❔ This PR replaces the core team in codeowners with the newly created era-reviewers team. ## Why ❔ To tighten who can approve PRs and not spam the rest. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --- CODEOWNERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CODEOWNERS b/CODEOWNERS index 981a2db39116..8cde1cc1ade7 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -1,4 +1,4 @@ -* @matter-labs/core +* @matter-labs/era-reviewers .github/release-please/** @RomanBrodetski @perekopskiy @Deniallugo @popzxc **/CHANGELOG.md @RomanBrodetski @perekopskiy @Deniallugo @popzxc CODEOWNERS @RomanBrodetski @perekopskiy @Deniallugo @popzxc From aa60ecd1eb0cd54fe7955f57419729ef0cdc1831 Mon Sep 17 00:00:00 2001 From: Maksym Date: Tue, 3 Oct 2023 22:05:17 +0300 Subject: [PATCH 14/18] ci: switch default rust to nightly for prover builds (#139) Switch default rust target to nightly for prover builds. --- .github/workflows/build-prover-template.yml | 2 +- .github/workflows/ci-prover-reusable.yml | 34 +++++---------------- bin/ci_run | 4 +-- docker-compose-runner-nightly.yml | 17 +++++++++++ 4 files changed, 28 insertions(+), 29 deletions(-) create mode 100644 docker-compose-runner-nightly.yml diff --git a/.github/workflows/build-prover-template.yml b/.github/workflows/build-prover-template.yml index f084bb3c382b..b3a0c262503e 100644 --- a/.github/workflows/build-prover-template.yml +++ b/.github/workflows/build-prover-template.yml @@ -46,6 +46,7 @@ jobs: env: image_tag: ${{ inputs.image_tag }} IMAGE_TAG_SUFFIX: ${{ inputs.image_tag_suffix }} + RUNNER_COMPOSE_FILE: "docker-compose-runner-nightly.yml" runs-on: [matterlabs-ci-runner] needs: [era-bellman-cuda] strategy: @@ -126,7 +127,6 @@ jobs: if [ "$underscored_name" == "prover_gpu_fri" ]; then underscored_name="prover_fri" fi - ci_run rustup default nightly-2023-07-21 ci_run echo [workspace] > Cargo.toml ci_run echo members = [\"prover/${underscored_name}\"] >> Cargo.toml ci_run cp prover/Cargo.lock Cargo.lock diff --git a/.github/workflows/ci-prover-reusable.yml b/.github/workflows/ci-prover-reusable.yml index e51ec94a81c3..0626457761ed 100644 --- a/.github/workflows/ci-prover-reusable.yml +++ b/.github/workflows/ci-prover-reusable.yml @@ -5,51 +5,33 @@ on: jobs: unit-tests: runs-on: [matterlabs-ci-runner] + env: + RUNNER_COMPOSE_FILE: "docker-compose-runner-nightly.yml" steps: - - name: Prepare environment - run: | - sudo apt update && sudo apt install -y \ - pkg-config libclang-dev build-essential lldb lld \ - clang openssl libssl-dev gcc g++ pkg-config libclang-dev curl wget - - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3 with: submodules: "recursive" - - name: Use Node.js 18.18.0 - uses: actions/setup-node@v2 - with: - node-version: '18.18.0' - - - name: Install Rust nightly-2023-07-21 - uses: actions-rs/toolchain@v1 - with: - toolchain: nightly-2023-07-21 - override: true - - name: Setup environment run: | echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV echo $(pwd)/bin >> $GITHUB_PATH echo IN_DOCKER=1 >> .env - npm install -g yarn - cargo install sqlx-cli --version 0.5.13 - name: Start services run: | - docker-compose -f docker-compose-runner.yml pull - docker-compose -f docker-compose-runner.yml up --build -d zk + docker-compose -f ${RUNNER_COMPOSE_FILE} pull + docker-compose -f ${RUNNER_COMPOSE_FILE} up --build -d geth zk postgres ci_run sccache --start-server - name: Init run: | - zk - zk config compile - zk db setup + ci_run zk + ci_run zk config compile + ci_run zk db setup - name: Prover unit tests run: | - cd prover # Not all tests are enabled since, prover and setup_key_generator_and_server requires bellman-cuda to be present - zk f cargo +nightly-2023-07-21 test --release -p zksync_witness_generator -p vk_setup_data_generator_server_fri -p zksync_prover_fri -p zksync_witness_vector_generator -p zksync_prover_fri_utils + ci_run bash -c "cd prover && zk f cargo test --release -p zksync_witness_generator -p vk_setup_data_generator_server_fri -p zksync_prover_fri -p zksync_witness_vector_generator -p zksync_prover_fri_utils" diff --git a/bin/ci_run b/bin/ci_run index b76fce10ac70..0f578106f467 100755 --- a/bin/ci_run +++ b/bin/ci_run @@ -2,5 +2,5 @@ # Runs the command from within CI docker-compose environment. cd $ZKSYNC_HOME - -docker-compose -f docker-compose-runner.yml exec -T zk $@ +compose_file="${RUNNER_COMPOSE_FILE:-docker-compose-runner.yml}" +docker-compose -f $compose_file exec -T zk $@ diff --git a/docker-compose-runner-nightly.yml b/docker-compose-runner-nightly.yml new file mode 100644 index 000000000000..2d60a0325f63 --- /dev/null +++ b/docker-compose-runner-nightly.yml @@ -0,0 +1,17 @@ +version: '3.2' +services: + zk: + image: matterlabs/zksync_rust:nightly + extends: + file: docker-compose-runner.yml + service: zk + + postgres: + extends: + file: docker-compose-runner.yml + service: postgres + + geth: + extends: + file: docker-compose-runner.yml + service: geth From 4e2b011d13d913a667f08ea314b1d088205e08c9 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Tue, 3 Oct 2023 21:37:07 +0200 Subject: [PATCH 15/18] chore(main): release core 15.1.0 (#148) :robot: I have created a release *beep* *boop* --- ## [15.1.0](https://github.com/matter-labs/zksync-era/compare/core-v15.0.2...core-v15.1.0) (2023-10-03) ### Features * Implement dynamic L2-to-L1 log tree depth ([#126](https://github.com/matter-labs/zksync-era/issues/126)) ([7dfbc5e](https://github.com/matter-labs/zksync-era/commit/7dfbc5eddab94cd24f96912e0d43ba36e1cf363f)) * **vm:** Introduce new way of returning from the tracer [#2569](https://github.com/matter-labs/zksync-era/issues/2569) ([#116](https://github.com/matter-labs/zksync-era/issues/116)) ([cf44a49](https://github.com/matter-labs/zksync-era/commit/cf44a491a324199b4cf457d28658da44b6dafc61)) * **vm:** Restore system-constants-generator ([#115](https://github.com/matter-labs/zksync-era/issues/115)) ([5e61bdc](https://github.com/matter-labs/zksync-era/commit/5e61bdc75b2baa03004d4d3e801170c094766964)) --- This PR was generated with [Release Please](https://github.com/googleapis/release-please). See [documentation](https://github.com/googleapis/release-please#release-please). Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: Danil --- .github/release-please/manifest.json | 2 +- core/CHANGELOG.md | 9 +++++++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/.github/release-please/manifest.json b/.github/release-please/manifest.json index d3fd86e709c3..995633511099 100644 --- a/.github/release-please/manifest.json +++ b/.github/release-please/manifest.json @@ -1,6 +1,6 @@ { "sdk/zksync-web3.js": "0.15.4", "sdk/zksync-rs": "0.4.0", - "core": "15.0.2", + "core": "15.1.0", "prover": "7.1.1" } diff --git a/core/CHANGELOG.md b/core/CHANGELOG.md index 1b8d1857453b..36060639819d 100644 --- a/core/CHANGELOG.md +++ b/core/CHANGELOG.md @@ -1,5 +1,14 @@ # Changelog +## [15.1.0](https://github.com/matter-labs/zksync-era/compare/core-v15.0.2...core-v15.1.0) (2023-10-03) + + +### Features + +* Implement dynamic L2-to-L1 log tree depth ([#126](https://github.com/matter-labs/zksync-era/issues/126)) ([7dfbc5e](https://github.com/matter-labs/zksync-era/commit/7dfbc5eddab94cd24f96912e0d43ba36e1cf363f)) +* **vm:** Introduce new way of returning from the tracer [#2569](https://github.com/matter-labs/zksync-era/issues/2569) ([#116](https://github.com/matter-labs/zksync-era/issues/116)) ([cf44a49](https://github.com/matter-labs/zksync-era/commit/cf44a491a324199b4cf457d28658da44b6dafc61)) +* **vm:** Restore system-constants-generator ([#115](https://github.com/matter-labs/zksync-era/issues/115)) ([5e61bdc](https://github.com/matter-labs/zksync-era/commit/5e61bdc75b2baa03004d4d3e801170c094766964)) + ## [15.0.1](https://github.com/matter-labs/zksync-2-dev/compare/core-v15.0.0...core-v15.0.1) (2023-09-27) From 2db848998c3428664d140f1b086ada0e6f255edd Mon Sep 17 00:00:00 2001 From: agolajko <57454127+agolajko@users.noreply.github.com> Date: Wed, 4 Oct 2023 10:58:47 +0100 Subject: [PATCH 16/18] fix(hyperchain_wizard): clean up init-hyperchain (#127) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit feat: hyperchain wizard - cleaned up the `init` function - added `required` for empty information inputs - Two more things I’ll do today: - validate the private keys and addresses - Check deployer and governor keys are the same --- infrastructure/zk/src/env.ts | 2 +- infrastructure/zk/src/hyperchain_wizard.ts | 227 +++++++++++---------- infrastructure/zk/src/init.ts | 74 ++++++- infrastructure/zk/src/run/run.ts | 4 + 4 files changed, 187 insertions(+), 120 deletions(-) diff --git a/infrastructure/zk/src/env.ts b/infrastructure/zk/src/env.ts index cda73ddf6f5a..9b6eb443636d 100644 --- a/infrastructure/zk/src/env.ts +++ b/infrastructure/zk/src/env.ts @@ -57,7 +57,7 @@ export function set(env: string, print: boolean = false) { const envFile = (process.env.ENV_FILE = `etc/env/${env}.env`); if (!fs.existsSync(envFile)) { // No .env file found - we should compile it! - config.compileConfig(); + config.compileConfig(env); } reload(); get(print); diff --git a/infrastructure/zk/src/hyperchain_wizard.ts b/infrastructure/zk/src/hyperchain_wizard.ts index f588084ee0d1..40f692137eb2 100644 --- a/infrastructure/zk/src/hyperchain_wizard.ts +++ b/infrastructure/zk/src/hyperchain_wizard.ts @@ -1,14 +1,10 @@ import { Command } from 'commander'; import enquirer from 'enquirer'; -import { BigNumber, ethers } from 'ethers'; +import { BigNumber, ethers, utils } from 'ethers'; import chalk from 'chalk'; -import { announced, submoduleUpdate } from './init'; +import { announced, init, InitArgs } from './init'; import * as server from './server'; -import * as contract from './contract'; -import * as run from './run/run'; -import * as compiler from './compiler'; import * as db from './database'; -import { clean } from './clean'; import * as env from './env'; import { compileConfig } from './config'; import * as fs from 'fs'; @@ -38,34 +34,33 @@ interface BasePromptOptions { skip?: ((state: object) => boolean | Promise) | boolean; } -// An init command that allows configuring and spinning up a new Hyperchain network +// An init command that allows configuring and spinning up a new Hyperchain network. async function initHyperchain() { await announced('Initializing Hyperchain creation', setupConfiguration()); - await announced('Drop postgres db', db.drop()); - await announced('Setup postgres db', db.setup()); - await announced('Clean rocksdb', clean('db')); - await announced('Clean backups', clean('backups')); - await announced('Building L1 and L2 contracts', contract.build()); - - await announced('Deploy test tokens', initializeTestERC20s()); - await announced('Deploying L1 verifier', contract.deployVerifier([])); - await announced('Running server genesis setup', server.genesisFromSources()); - const deployerPrivateKey = process.env.DEPLOYER_PRIVATE_KEY; const governorPrivateKey = process.env.GOVERNOR_PRIVATE_KEY; const governorAddress = process.env.GOVERNOR_ADDRESS; + const deployL2Weth = Boolean(process.env.DEPLOY_L2_WETH || false); + const deployTestTokens = Boolean(process.env.DEPLOY_TEST_TOKENS || false); + + const initArgs: InitArgs = { + skipSubmodulesCheckout: false, + skipEnvSetup: false, + deployerL1ContractInputArgs: ['--private-key', deployerPrivateKey, '--governor-address', governorAddress], + governorPrivateKeyArgs: ['--private-key', governorPrivateKey], + deployerL2ContractInput: { + args: ['--private-key', deployerPrivateKey], + includePaymaster: false, + includeL2WETH: deployL2Weth + }, + testTokens: { + deploy: deployTestTokens, + args: ['--private-key', deployerPrivateKey, '--envFile', process.env.CHAIN_ETH_NETWORK!] + } + }; - await announced( - 'Deploying L1 contracts', - contract.redeployL1(['--private-key', deployerPrivateKey, '--governor-address', governorAddress]) - ); - - await announced('Initializing validator', contract.initializeValidator(['--private-key', governorPrivateKey])); - await announced('Initialize L1 allow list', contract.initializeL1AllowList(['--private-key', governorPrivateKey])); - await announced('Deploying L2 contracts', contract.deployL2(['--private-key', deployerPrivateKey], false)); - - await announced('Initialize WETH Token', initializeWethTokenForHyperchain()); + await init(initArgs); env.mergeInitToEnv(); @@ -89,7 +84,7 @@ async function setupConfiguration() { const results: any = await enquirer.prompt(questions); if (results.config === CONFIGURE) { - await announced('Setting Hyperchain metadata', setHyperchainMetadata()); + await announced('Setting Hyperchain configuration', setHyperchainMetadata()); await announced('Validating information and balances to deploy Hyperchain', checkReadinessToDeploy()); } else { const envs = env.getAvailableEnvsFromFiles(); @@ -106,10 +101,6 @@ async function setupConfiguration() { const envResults: any = await enquirer.prompt(envQuestions); env.set(envResults.env); } - await announced('Checkout system-contracts submodule', submoduleUpdate()); - await announced('Compiling JS packages', run.yarn()); - await announced('Compiling system contracts', compiler.compileSystemContracts()); - await announced('Compile l2 contracts', compiler.compileAll()); } async function setHyperchainMetadata() { @@ -124,19 +115,22 @@ async function setHyperchainMetadata() { const INSERT_KEYS = 'Insert keys'; const questions: BasePromptOptions[] = [ { - message: 'What is your hyperchain name?', + message: 'What is your Hyperchain name?', name: 'chainName', - type: 'input' + type: 'input', + required: true }, { - message: 'What is your hyperchain id? Make sure this is not used by other chains.', + message: 'What is your Hyperchain id? Make sure this is not used by other chains.', name: 'chainId', - type: 'input' + type: 'numeral', + required: true }, { - message: 'To which L1 Network will your hyperchain rollup to?', + message: 'To which L1 Network will your Hyperchain rollup to?', name: 'l1Chain', type: 'select', + required: true, choices: BASE_NETWORKS } ]; @@ -146,20 +140,25 @@ async function setHyperchainMetadata() { let deployer, governor, ethOperator, feeReceiver: ethers.Wallet | undefined; let feeReceiverAddress, l1Rpc, l1Id; + await initializeTestERC20s(); + await initializeWethTokenForHyperchain(); + if (results.l1Chain !== BaseNetwork.LOCALHOST) { const rpcQuestions: BasePromptOptions[] = [ { message: 'What is the RPC url for the L1 Network?', name: 'l1Rpc', - type: 'input' + type: 'input', + required: true } ]; if (results.l1Chain === BaseNetwork.LOCALHOST_CUSTOM) { rpcQuestions.push({ - message: 'What is netowrk id of your L1 Network?', + message: 'What is network id of your L1 Network?', name: 'l1NetworkId', - type: 'input' + type: 'numeral', + required: true }); } @@ -192,27 +191,48 @@ async function setHyperchainMetadata() { { message: 'Private key of the L1 Deployer (the one that deploys the contracts)', name: 'deployerKey', - type: 'password' + type: 'password', + required: true }, { message: 'Private key of the L1 Governor (the one that can upgrade the contracts)', name: 'governorKey', - type: 'password' + type: 'password', + required: true }, { message: 'Private key of the L1 ETH Operator (the one that rolls up the batches)', name: 'ethOperator', - type: 'password' + type: 'password', + required: true }, { message: 'Address of L2 fee receiver (the one that collects fees)', name: 'feeReceiver', - type: 'input' + type: 'input', + required: true } ]; const keyResults: any = await enquirer.prompt(keyQuestions); + if (!utils.isAddress(keyResults.deployerKey)) { + throw Error(error('Deployer address is not a valid address')); + } + if (!utils.isAddress(keyResults.governorKey)) { + throw Error(error('Governor address is not a valid address')); + } + if (!utils.isAddress(keyResults.ethOperator)) { + throw Error(error('ETH Operator address is not a valid address')); + } + if (!utils.isAddress(keyResults.feeReceiver)) { + throw Error(error('Fee Receiver address is not a valid address')); + } + + if (keyResults.deployerKey == keyResults.governorKey) { + throw Error(error('Governor and Deployer should not be the same')); + } + deployer = new ethers.Wallet(keyResults.deployerKey); governor = new ethers.Wallet(keyResults.governorKey); ethOperator = new ethers.Wallet(keyResults.ethOperator); @@ -252,10 +272,6 @@ async function setHyperchainMetadata() { ) ); - if (governor.address == deployer.address) { - throw Error(error('Governor and Deployer cannot be the same')); - } - if (results.l1Chain !== BaseNetwork.LOCALHOST_CUSTOM && results.l1Chain !== BaseNetwork.LOCALHOST) { const verifyQuestions: BasePromptOptions[] = [ { @@ -272,7 +288,8 @@ async function setHyperchainMetadata() { { message: 'Please provide your Etherscan API Key.', name: 'etherscanKey', - type: 'input' + type: 'input', + required: true } ]; @@ -302,8 +319,8 @@ async function setHyperchainMetadata() { wrapEnvModify('FEE_RECEIVER_PRIVATE_KEY', feeReceiver.privateKey); } - // For now force delay to 20 seconds to ensure batch execution doesn't not happen in same block as batch proving - // This bug will be fixed on the smart contract soon + // For now force delay to 20 seconds to ensure batch execution doesn't not happen in same block as batch proving. + // This bug will be fixed on the smart contract soon. wrapEnvModify('CONTRACTS_VALIDATOR_TIMELOCK_EXECUTION_DELAY', '0'); wrapEnvModify('ETH_SENDER_SENDER_L1_BATCH_MIN_AGE_BEFORE_EXECUTE_SECONDS', '20'); @@ -328,21 +345,12 @@ async function initializeTestERC20s() { const results: any = await enquirer.prompt(questions); if (results.deployERC20s) { - const privateKey = process.env.DEPLOYER_PRIVATE_KEY; - await announced( - 'Deploying localhost ERC20 tokens', - run.deployERC20('dev', '', '', '', [ - '--private-key', - privateKey, - '--envFile', - process.env.CHAIN_ETH_NETWORK! - ]) - ); + wrapEnvModify('DEPLOY_TEST_TOKENS', 'true'); console.log( warning( - `The addresses for the tokens can be found on the /etc/tokens/${getEnv( + `The addresses for the tokens will be available at the /etc/tokens/${getEnv( process.env.CHAIN_ETH_NETWORK! - )}.json file. The deployer address is the owner of the token contracts.` + )}.json file.` ) ); } @@ -351,7 +359,7 @@ async function initializeTestERC20s() { async function initializeWethTokenForHyperchain() { const questions: BasePromptOptions[] = [ { - message: 'Do you want to deploy a Wrapped ETH Bridge?', + message: 'Do you want to deploy Wrapped ETH to your Hyperchain?', name: 'deployWeth', type: 'confirm' } @@ -360,45 +368,44 @@ async function initializeWethTokenForHyperchain() { const results: any = await enquirer.prompt(questions); if (results.deployWeth) { - const tokens = getTokens(process.env.CHAIN_ETH_NETWORK!); - - let baseWethToken = tokens.find((token: { symbol: string }) => token.symbol == 'WETH')?.address; - - if (!baseWethToken) { - const wethQuestions = [ - { - message: 'What is the address of the Wrapped ETH on the base chain?', - name: 'l1Weth', - type: 'input' + wrapEnvModify('DEPLOY_L2_WETH', 'true'); + + if (!process.env.DEPLOY_TEST_TOKENS) { + // Only try to fetch this info if no test tokens will be deployed, otherwise WETH address will be defined later. + const tokens = getTokens(process.env.CHAIN_ETH_NETWORK!); + + let baseWethToken = tokens.find((token: { symbol: string }) => token.symbol == 'WETH')?.address; + + if (!baseWethToken) { + const wethQuestions = [ + { + message: 'What is the address of the Wrapped ETH on the base chain?', + name: 'l1Weth', + type: 'input', + required: true + } + ]; + + const wethResults: any = await enquirer.prompt(wethQuestions); + + baseWethToken = wethResults.l1Weth; + + if (fs.existsSync(`/etc/tokens/${getEnv(process.env.ZKSYNC_ENV!)}.json`)) { + tokens.push({ + name: 'Wrapped Ether', + symbol: 'WETH', + decimals: 18, + address: baseWethToken! + }); + fs.writeFileSync( + `/etc/tokens/${getEnv(process.env.ZKSYNC_ENV!)}.json`, + JSON.stringify(tokens, null, 4) + ); } - ]; - - const wethResults: any = await enquirer.prompt(wethQuestions); - - baseWethToken = wethResults.l1Weth; - - if (fs.existsSync(`/etc/tokens/${getEnv(process.env.ZKSYNC_ENV!)}.json`)) { - tokens.push({ - name: 'Wrapped Ether', - symbol: 'WETH', - decimals: 18, - address: baseWethToken! - }); - fs.writeFileSync( - `/etc/tokens/${getEnv(process.env.ZKSYNC_ENV!)}.json`, - JSON.stringify(tokens, null, 4) - ); } - } - - wrapEnvModify('CONTRACTS_L1_WETH_TOKEN_ADDR', baseWethToken!); - const governorPrivateKey = process.env.GOVERNOR_PRIVATE_KEY; - - await announced( - 'Initializing L2 WETH token', - contract.initializeWethToken(['--private-key', governorPrivateKey]) - ); + wrapEnvModify('CONTRACTS_L1_WETH_TOKEN_ADDR', baseWethToken!); + } } } @@ -441,12 +448,12 @@ async function startServer() { await server.server(false, false, components.join(',')); } -// The current env.modify requires to write down the variable name twice. This wraps it so the caller only writes the name and the value +// The current env.modify requires to write down the variable name twice. This wraps it so the caller only writes the name and the value. function wrapEnvModify(variable: string, assignedVariable: string) { env.modify(variable, `${variable}=${assignedVariable}`); } -// Make sure all env information is available and wallets are funded +// Make sure all env information is available and wallets are funded. async function checkReadinessToDeploy() { const provider = new ethers.providers.JsonRpcProvider(process.env.ETH_CLIENT_WEB3_URL!); @@ -531,13 +538,13 @@ function getL1Name(baseChain: BaseNetwork) { function getEnv(chainName: string) { return String(chainName) - .normalize('NFKD') // split accented characters into their base characters and diacritical marks - .replace(/[\u0300-\u036f]/g, '') // remove all the accents, which happen to be all in the \u03xx UNICODE block. - .trim() // trim leading or trailing whitespace - .toLowerCase() // convert to lowercase - .replace(/[^a-z0-9 -]/g, '') // remove non-alphanumeric characters - .replace(/\s+/g, '-') // replace spaces with hyphens - .replace(/-+/g, '-'); // remove consecutive hyphens + .normalize('NFKD') // Split accented characters into their base characters and diacritical marks. + .replace(/[\u0300-\u036f]/g, '') // Remove all the accents, which happen to be all in the \u03xx UNICODE block. + .trim() // Trim leading or trailing whitespace. + .toLowerCase() // Convert to lowercase. + .replace(/[^a-z0-9 -]/g, '') // Remove non-alphanumeric characters. + .replace(/\s+/g, '-') // Replace spaces with hyphens. + .replace(/-+/g, '-'); // Remove consecutive hyphens. } type L1Token = { @@ -561,5 +568,5 @@ export function getTokens(network: string): L1Token[] { } export const initHyperchainCommand = new Command('init-hyperchain') - .description('Initializes a new hyperchain network') + .description('Initializes a new Hyperchain network') .action(initHyperchain); diff --git a/infrastructure/zk/src/init.ts b/infrastructure/zk/src/init.ts index d44be33da13a..e2c65168461d 100644 --- a/infrastructure/zk/src/init.ts +++ b/infrastructure/zk/src/init.ts @@ -17,8 +17,17 @@ const announce = chalk.yellow; const success = chalk.green; const timestamp = chalk.grey; -export async function init(skipSubmodulesCheckout: boolean) { - if (!process.env.CI) { +export async function init(initArgs: InitArgs = DEFAULT_ARGS) { + const { + skipSubmodulesCheckout, + skipEnvSetup, + testTokens, + deployerL1ContractInputArgs, + governorPrivateKeyArgs, + deployerL2ContractInput + } = initArgs; + + if (!process.env.CI && !skipEnvSetup) { await announced('Pulling images', docker.pull()); await announced('Checking environment', checkEnv()); await announced('Checking git hooks', env.gitHooks()); @@ -28,6 +37,7 @@ export async function init(skipSubmodulesCheckout: boolean) { if (!skipSubmodulesCheckout) { await announced('Checkout system-contracts submodule', submoduleUpdate()); } + await announced('Compiling JS packages', run.yarn()); await announced('Compile l2 contracts', compiler.compileAll()); await announced('Drop postgres db', db.drop()); @@ -35,15 +45,27 @@ export async function init(skipSubmodulesCheckout: boolean) { await announced('Clean rocksdb', clean('db')); await announced('Clean backups', clean('backups')); await announced('Building contracts', contract.build()); - await announced('Deploying localhost ERC20 tokens', run.deployERC20('dev')); + if (testTokens.deploy) { + await announced('Deploying localhost ERC20 tokens', run.deployERC20('dev', '', '', '', testTokens.args)); + } await announced('Deploying L1 verifier', contract.deployVerifier([])); await announced('Reloading env', env.reload()); await announced('Running server genesis setup', server.genesisFromSources()); - await announced('Deploying L1 contracts', contract.redeployL1([])); - await announced('Initializing validator', contract.initializeValidator()); - await announced('Initialize L1 allow list', contract.initializeL1AllowList()); - await announced('Deploying L2 contracts', contract.deployL2([], true, true)); - await announced('Initializing L2 WETH token', contract.initializeWethToken()); + await announced('Deploying L1 contracts', contract.redeployL1(deployerL1ContractInputArgs)); + await announced('Initializing validator', contract.initializeValidator(governorPrivateKeyArgs)); + await announced('Initialize L1 allow list ', contract.initializeL1AllowList(governorPrivateKeyArgs)); + await announced( + 'Deploying L2 contracts', + contract.deployL2( + deployerL2ContractInput.args, + deployerL2ContractInput.includePaymaster, + deployerL2ContractInput.includeL2WETH + ) + ); + + if (deployerL2ContractInput.includeL2WETH) { + await announced('Initializing L2 WETH token', contract.initializeWethToken(governorPrivateKeyArgs)); + } } // A smaller version of `init` that "resets" the localhost environment, for which `init` was already called before. @@ -115,11 +137,45 @@ async function checkEnv() { } } +export interface InitArgs { + skipSubmodulesCheckout: boolean; + skipEnvSetup: boolean; + deployerL1ContractInputArgs: any[]; + governorPrivateKeyArgs: any[]; + deployerL2ContractInput: { + args: any[]; + includePaymaster: boolean; + includeL2WETH: boolean; + }; + testTokens: { + deploy: boolean; + args: any[]; + }; +} + +const DEFAULT_ARGS: InitArgs = { + skipSubmodulesCheckout: false, + skipEnvSetup: false, + deployerL1ContractInputArgs: [], + governorPrivateKeyArgs: [], + deployerL2ContractInput: { args: [], includePaymaster: true, includeL2WETH: true }, + testTokens: { deploy: true, args: [] } +}; + export const initCommand = new Command('init') .option('--skip-submodules-checkout') + .option('--skip-env-setup') .description('perform zksync network initialization for development') .action(async (cmd: Command) => { - await init(cmd.skipSubmodulesCheckout); + const initArgs: InitArgs = { + skipSubmodulesCheckout: cmd.skipSubmodulesCheckout, + skipEnvSetup: cmd.skipEnvSetup, + deployerL1ContractInputArgs: [], + governorPrivateKeyArgs: [], + deployerL2ContractInput: { args: [], includePaymaster: true, includeL2WETH: true }, + testTokens: { deploy: true, args: [] } + }; + await init(initArgs); }); export const reinitCommand = new Command('reinit') .description('"reinitializes" network. Runs faster than `init`, but requires `init` to be executed prior') diff --git a/infrastructure/zk/src/run/run.ts b/infrastructure/zk/src/run/run.ts index e895fb23a02c..ff4092d79005 100644 --- a/infrastructure/zk/src/run/run.ts +++ b/infrastructure/zk/src/run/run.ts @@ -4,6 +4,8 @@ import { Wallet } from 'ethers'; import fs from 'fs'; import * as path from 'path'; import * as dataRestore from './data-restore'; +import { getTokens } from '../hyperchain_wizard'; +import * as env from '../env'; export { dataRestore }; @@ -39,6 +41,8 @@ export async function deployERC20( { "name": "MLTTL", "symbol": "MLTTW", "decimals": 18 }, { "name": "Wrapped Ether", "symbol": "WETH", "decimals": 18, "implementation": "WETH9"} ]' ${args.join(' ')} > ./etc/tokens/${destinationFile}.json`); + const WETH = getTokens(destinationFile).find((token) => token.symbol === 'WETH')!; + env.modify('CONTRACTS_L1_WETH_TOKEN_ADDR', `CONTRACTS_L1_WETH_TOKEN_ADDR=${WETH.address}`); } else if (command == 'new') { await utils.spawn( `yarn --silent --cwd contracts/ethereum deploy-erc20 add --token-name ${name} --symbol ${symbol} --decimals ${decimals}` From d6a1399e356b7f5b26c25092235d727816f7f6c2 Mon Sep 17 00:00:00 2001 From: Yury Akudovich Date: Wed, 4 Oct 2023 15:03:36 +0200 Subject: [PATCH 17/18] ci: Adds call of build-docker-from-tag.yml from release-please. (#153) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # What ❔ Adds call of build-docker-from-tag.yml from release-please. Allows manually call of build-docker-from-tag.yml. ## Why ❔ To create releases due to issue in github which doesn't allow automatic workflow triggers from workflow. ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --- .github/workflows/build-docker-from-tag.yml | 21 +++++++++++++++++++-- .github/workflows/release-please.yml | 4 ++++ 2 files changed, 23 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build-docker-from-tag.yml b/.github/workflows/build-docker-from-tag.yml index 95f6e5998bc1..d499a7afaada 100644 --- a/.github/workflows/build-docker-from-tag.yml +++ b/.github/workflows/build-docker-from-tag.yml @@ -1,5 +1,17 @@ name: Build Image from tag on: + workflow_call: + inputs: + tag_name: + description: "Tag of an image to built" + type: string + required: true + workflow_dispatch: + inputs: + tag_name: + description: "Tag of an image to built" + type: string + required: true push: tags: - core-v** @@ -10,7 +22,7 @@ concurrency: docker-build jobs: setup: name: Setup - runs-on: [k8s, stage] + runs-on: [ubuntu-latest] outputs: image_tag_suffix: ${{ steps.set.outputs.image_tag_suffix }} steps: @@ -21,7 +33,12 @@ jobs: - name: Generate output with git tag id: set run: | - git_tag="${GITHUB_REF#refs/*/}" + git_tag="" + if [[ -z "${{ inputs.tag_name }}" ]]; then + git_tag="${GITHUB_REF#refs/*/}" + else + git_tag="${{ inputs.tag_name }}" + fi version=$(cut -d "-" -f2 <<< ${git_tag}) echo "image_tag_suffix=${version}" >> $GITHUB_OUTPUT diff --git a/.github/workflows/release-please.yml b/.github/workflows/release-please.yml index d9f04a40e9d4..1194864aa80b 100644 --- a/.github/workflows/release-please.yml +++ b/.github/workflows/release-please.yml @@ -3,6 +3,10 @@ on: branches: - main +permissions: + contents: write + pull-requests: write + name: release-please jobs: release-please: From 0d952d43a021c2fbf18920da3e7d770a6309d990 Mon Sep 17 00:00:00 2001 From: AnastasiiaVashchuk <72273339+AnastasiiaVashchuk@users.noreply.github.com> Date: Thu, 5 Oct 2023 12:50:02 +0300 Subject: [PATCH 18/18] fix: use gauge instead histogram for replication lag metric (#159) --- core/lib/circuit_breaker/src/replication_lag.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/lib/circuit_breaker/src/replication_lag.rs b/core/lib/circuit_breaker/src/replication_lag.rs index df8e886258be..244c53349cee 100644 --- a/core/lib/circuit_breaker/src/replication_lag.rs +++ b/core/lib/circuit_breaker/src/replication_lag.rs @@ -20,7 +20,7 @@ impl CircuitBreaker for ReplicationLagChecker { .get_replication_lag_sec() .await; - metrics::histogram!("circuit_breaker.replication_lag", lag as f64); + metrics::gauge!("circuit_breaker.replication_lag", lag as f64); match self.replication_lag_limit_sec { Some(replication_lag_limit_sec) if lag > replication_lag_limit_sec => Err( CircuitBreakerError::ReplicationLag(lag, replication_lag_limit_sec),