diff --git a/.aztec-sync-commit b/.aztec-sync-commit new file mode 100644 index 00000000000..c97738f7226 --- /dev/null +++ b/.aztec-sync-commit @@ -0,0 +1 @@ +9e246c1289fa40c35c4b28d2f0081dfdc2aa9d19 diff --git a/.github/ACVM_NOT_PUBLISHABLE.md b/.github/ACVM_NOT_PUBLISHABLE.md new file mode 100644 index 00000000000..e7eacb3b523 --- /dev/null +++ b/.github/ACVM_NOT_PUBLISHABLE.md @@ -0,0 +1,13 @@ +--- +title: "ACVM crates are not publishable" +assignees: TomAFrench kevaundray savio-sou +--- + + +The ACVM crates are currently unpublishable, making a release will NOT push our crates to crates.io. + +This is likely due to a crate we depend on bumping its MSRV above our own. Our lockfile is not taken into account when publishing to crates.io (as people downloading our crate don't use it) so we need to be able to use the most up to date versions of our dependencies (including transient dependencies) specified. + +Check the [MSRV check]({{env.WORKFLOW_URL}}) workflow for details. + +This issue was raised by the workflow `{{env.WORKFLOW_NAME}}` diff --git a/.github/CRATES_IO_PUBLISH_FAILED.md b/.github/CRATES_IO_PUBLISH_FAILED.md new file mode 100644 index 00000000000..ec4de319772 --- /dev/null +++ b/.github/CRATES_IO_PUBLISH_FAILED.md @@ -0,0 +1,10 @@ +--- +title: "ACVM crates failed to publish" +assignees: TomAFrench kevaundray savio-sou +--- + +The {{env.CRATE_VERSION}} release of the ACVM crates failed. + +Check the [Publish ACVM]({{env.WORKFLOW_URL}}) workflow for details. + +This issue was raised by the workflow `{{env.WORKFLOW_NAME}}` diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml index 112da342e10..71207793e53 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yml +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -6,7 +6,7 @@ body: attributes: value: | # Description - Thanks for taking the time to create the Issue, and welcome to the Noirot family! + Thanks for taking the time to create the Issue and welcome to the Noir community! - type: textarea id: aim attributes: @@ -38,45 +38,69 @@ body: 2. 3. 4. + - type: dropdown + id: impact + attributes: + label: Project Impact + description: How does this affect a project you or others are working on? + options: + - "Nice-to-have" + - "Blocker" + - type: textarea + id: impact_context + attributes: + label: Impact Context + description: If a nice-to-have / blocker, supplement how does this Issue affect the project. + - type: dropdown + id: workaround + attributes: + label: Workaround + description: Is there a workaround for this Issue? + options: + - "Yes" + - type: textarea + id: workaround_description + attributes: + label: Workaround Description + description: If yes, supplement how could the Issue be worked around. + - type: textarea + id: additional + attributes: + label: Additional Context + description: Supplement further information if applicable. - type: markdown attributes: value: | # Environment - Specify your versions of Noir releases used. + Specify your version of Noir tooling used. - type: markdown attributes: value: | - ## Using Nargo? + ## Nargo (CLI) - type: dropdown id: nargo-install attributes: label: Installation Method description: How did you install Nargo? - multiple: false options: - - Binary + - Binary (`noirup` default) - Compiled from source - type: input id: nargo-version attributes: label: Nargo Version - description: What is the output of the `nargo --version` command? - placeholder: "nargo 0.6.0 (git version hash: 0181813203a9e3e46c6d8c3169ad5d25971d4282, is dirty: false)" + description: Output of running `nargo --version` + placeholder: "nargo version = 0.23.0 noirc version = 0.23.0+5be9f9d7e2f39ca228df10e5a530474af0331704 (git version hash: 5be9f9d7e2f39ca228df10e5a530474af0331704, is dirty: false)" - type: markdown attributes: value: | - ## Using TypeScript? - Please await for our new set of packages. - You can find our target release timeframe on the [Noir Roadmap](https://github.com/orgs/noir-lang/projects/1/views/16). - - type: markdown - attributes: - value: | - # Misc - - type: textarea - id: additional + ## NoirJS (JavaScript) + - type: input + id: noirjs-version attributes: - label: Additional Context - description: Supplement further information if applicable. + label: NoirJS Version + description: Version number of `noir_js` in `package.json` + placeholder: "0.23.0" - type: markdown attributes: value: | @@ -87,11 +111,8 @@ body: label: Would you like to submit a PR for this Issue? description: Fellow contributors are happy to provide support where applicable. options: - - "No" - "Maybe" - "Yes" - validations: - required: true - type: textarea id: pr_support attributes: diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml index 979ac75811e..abbfe392454 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.yml +++ b/.github/ISSUE_TEMPLATE/feature_request.yml @@ -6,7 +6,7 @@ body: attributes: value: | ## Description - Thanks for taking the time to create the Issue, and welcome to the Noirot family! + Thanks for taking the time to create the Issue and welcome to the Noir community! - type: textarea id: problem attributes: @@ -21,11 +21,31 @@ body: description: Describe how you think it should work. Supply pseudocode / step-by-step examples if applicable. validations: required: true + - type: dropdown + id: impact + attributes: + label: Project Impact + description: How does this affect a project you or others are working on? + options: + - "Nice-to-have" + - "Blocker" - type: textarea - id: alternatives + id: impact_context attributes: - label: Alternatives Considered - description: Describe less-happy cases you have considered, if any. + label: Impact Context + description: If a nice-to-have / blocker, supplement how does this Issue affect the project. + - type: dropdown + id: workaround + attributes: + label: Workaround + description: Is there a workaround for this Issue? + options: + - "Yes" + - type: textarea + id: workaround_description + attributes: + label: Workaround Description + description: If yes, supplement how could the Issue be worked around. - type: textarea id: additional attributes: @@ -42,11 +62,8 @@ body: description: Fellow contributors are happy to provide support where applicable. multiple: false options: - - "No" - "Maybe" - "Yes" - validations: - required: true - type: textarea id: pr-support attributes: diff --git a/.github/JS_PUBLISH_FAILED.md b/.github/JS_PUBLISH_FAILED.md new file mode 100644 index 00000000000..5b9f79aac1f --- /dev/null +++ b/.github/JS_PUBLISH_FAILED.md @@ -0,0 +1,11 @@ +--- +title: "JS packages failed to publish" +assignees: TomAFrench kevaundray savio-sou +labels: js +--- + +The {{env.NPM_TAG}} release of the JS packages failed. + +Check the [Publish JS packages]({{env.WORKFLOW_URL}}) workflow for details. + +This issue was raised by the workflow `{{env.WORKFLOW_NAME}}` diff --git a/.github/actions/install-playwright/action.yml b/.github/actions/install-playwright/action.yml index ac412a7dd4a..be835a422cc 100644 --- a/.github/actions/install-playwright/action.yml +++ b/.github/actions/install-playwright/action.yml @@ -9,7 +9,7 @@ runs: run: echo "PLAYWRIGHT_VERSION=$(yarn workspace @noir-lang/noirc_abi info @web/test-runner-playwright --json | jq .children.Version | tr -d '"')" >> $GITHUB_ENV - name: Cache playwright binaries - uses: actions/cache@v3 + uses: actions/cache@v4 id: playwright-cache with: path: | @@ -19,6 +19,5 @@ runs: - name: Install playwright deps shell: bash if: steps.playwright-cache.outputs.cache-hit != 'true' - run: | - npx playwright install - npx playwright install-deps + run: ./.github/scripts/playwright-install.sh + diff --git a/.github/actions/nix/action.yml b/.github/actions/nix/action.yml deleted file mode 100644 index 9f008ad0f9d..00000000000 --- a/.github/actions/nix/action.yml +++ /dev/null @@ -1,27 +0,0 @@ -name: Setup Nix -description: Installs and setups Nix components - -inputs: - github-token: - description: 'Github Access Token' - required: true - nix-cache-name: - description: 'Name of the Cachix cache to use' - required: true - cachix-auth-token: - description: 'Cachix Auth Token' - required: true - - -runs: - using: composite - steps: - - uses: cachix/install-nix-action@v22 - with: - nix_path: nixpkgs=channel:nixos-23.05 - github_access_token: ${{ inputs.github-token }} - - - uses: cachix/cachix-action@v12 - with: - name: ${{ inputs.nix-cache-name }} - authToken: ${{ inputs.cachix-auth-token }} diff --git a/.github/actions/setup/action.yml b/.github/actions/setup/action.yml index b265a63d29a..d0e83dedf67 100644 --- a/.github/actions/setup/action.yml +++ b/.github/actions/setup/action.yml @@ -7,7 +7,7 @@ runs: - uses: actions/setup-node@v4 id: node with: - node-version: 18.17.1 + node-version: 18.19.0 cache: 'yarn' cache-dependency-path: 'yarn.lock' diff --git a/.github/scripts/acvm_js-test-browser.sh b/.github/scripts/acvm_js-test-browser.sh index 598c98dadf2..34445623988 100755 --- a/.github/scripts/acvm_js-test-browser.sh +++ b/.github/scripts/acvm_js-test-browser.sh @@ -1,5 +1,5 @@ #!/bin/bash set -eu -npx playwright install && npx playwright install-deps +./.github/scripts/playwright-install.sh yarn workspace @noir-lang/acvm_js test:browser diff --git a/.github/scripts/cargo-binstall-install.sh b/.github/scripts/cargo-binstall-install.sh new file mode 100755 index 00000000000..55d90904ca3 --- /dev/null +++ b/.github/scripts/cargo-binstall-install.sh @@ -0,0 +1,4 @@ +#!/usr/bin/env bash +set -eu + +curl -L --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/cargo-bins/cargo-binstall/main/install-from-binstall-release.sh | bash diff --git a/.github/scripts/integration-test-browser.sh b/.github/scripts/integration-test-browser.sh new file mode 100755 index 00000000000..12195a88928 --- /dev/null +++ b/.github/scripts/integration-test-browser.sh @@ -0,0 +1,5 @@ +#!/bin/bash +set -eu + +./.github/scripts/playwright-install.sh +yarn workspace integration-tests test:browser \ No newline at end of file diff --git a/.github/scripts/integration-test-node.sh b/.github/scripts/integration-test-node.sh new file mode 100755 index 00000000000..b7f00c65620 --- /dev/null +++ b/.github/scripts/integration-test-node.sh @@ -0,0 +1,5 @@ +#!/bin/bash +set -eu + +apt-get install libc++-dev -y +yarn workspace integration-tests test:node diff --git a/.github/scripts/noir-js-test.sh b/.github/scripts/noir-js-test.sh index b5fe34038fe..72458d8de6a 100755 --- a/.github/scripts/noir-js-test.sh +++ b/.github/scripts/noir-js-test.sh @@ -1,6 +1,4 @@ #!/bin/bash set -eu -./scripts/nargo_compile_noir_js_assert_lt.sh -rm -rf /usr/src/noir/tooling/noir_js/test/noir_compiled_examples/assert_lt/target/debug_assert_lt.json -yarn workspace @noir-lang/noir_js test \ No newline at end of file +yarn workspace @noir-lang/noir_js test diff --git a/.github/scripts/noir-wasm-test-browser.sh b/.github/scripts/noir-wasm-test-browser.sh index 4b584abce23..189dee91289 100755 --- a/.github/scripts/noir-wasm-test-browser.sh +++ b/.github/scripts/noir-wasm-test-browser.sh @@ -1,6 +1,6 @@ #!/bin/bash set -eu -./scripts/nargo_compile_wasm_fixtures.sh -npx playwright install && npx playwright install-deps -yarn workspace @noir-lang/noir_wasm test:browser \ No newline at end of file +./.github/scripts/playwright-install.sh +yarn workspace @noir-lang/noir_wasm test:build_fixtures +yarn workspace @noir-lang/noir_wasm test:browser diff --git a/.github/scripts/noir-wasm-test.sh b/.github/scripts/noir-wasm-test.sh index 03e1bac2330..a7aa6b68de2 100755 --- a/.github/scripts/noir-wasm-test.sh +++ b/.github/scripts/noir-wasm-test.sh @@ -1,7 +1,7 @@ #!/bin/bash set -eu -./scripts/nargo_compile_wasm_fixtures.sh +yarn workspace @noir-lang/noir_wasm test:build_fixtures yarn workspace @noir-lang/noir_wasm test:node -npx playwright install && npx playwright install-deps +./.github/scripts/playwright-install.sh yarn workspace @noir-lang/noir_wasm test:browser diff --git a/.github/scripts/noirc-abi-test-browser.sh b/.github/scripts/noirc-abi-test-browser.sh index 7a966cb5e94..e03da253ebb 100755 --- a/.github/scripts/noirc-abi-test-browser.sh +++ b/.github/scripts/noirc-abi-test-browser.sh @@ -1,5 +1,5 @@ #!/bin/bash set -eu -npx playwright install && npx playwright install-deps +./.github/scripts/playwright-install.sh yarn workspace @noir-lang/noirc_abi test:browser diff --git a/.github/scripts/integration-test.sh b/.github/scripts/playwright-install.sh similarity index 52% rename from .github/scripts/integration-test.sh rename to .github/scripts/playwright-install.sh index 4e1b52cedf9..4072e996264 100755 --- a/.github/scripts/integration-test.sh +++ b/.github/scripts/playwright-install.sh @@ -1,6 +1,4 @@ #!/bin/bash set -eu -apt-get install libc++-dev -y npx playwright install && npx playwright install-deps -yarn workspace integration-tests test \ No newline at end of file diff --git a/.github/scripts/wasm-bindgen-install.sh b/.github/scripts/wasm-bindgen-install.sh index a147a46cde8..a548372ee2c 100755 --- a/.github/scripts/wasm-bindgen-install.sh +++ b/.github/scripts/wasm-bindgen-install.sh @@ -1,5 +1,13 @@ -#!/bin/bash +#!/usr/bin/env bash set -eu -# TODO call this script directly -./scripts/install_wasm-bindgen.sh +cd $(dirname "$0") + +./cargo-binstall-install.sh + +# Install wasm-bindgen-cli. +if [ "$(wasm-bindgen --version | cut -d' ' -f2)" != "0.2.86" ]; then + echo "Building wasm-bindgen..." + cargo binstall wasm-bindgen-cli@0.2.86 --force --no-confirm +fi + diff --git a/.github/scripts/wasm-opt-install.sh b/.github/scripts/wasm-opt-install.sh new file mode 100755 index 00000000000..cbdeb8f2bfe --- /dev/null +++ b/.github/scripts/wasm-opt-install.sh @@ -0,0 +1,8 @@ +#!/bin/bash +set -eu + +cd $(dirname "$0") + +./cargo-binstall-install.sh + +cargo-binstall wasm-opt --version 0.116.0 -y diff --git a/.github/scripts/wasm-pack-install.sh b/.github/scripts/wasm-pack-install.sh index f9b2fe160d5..d3de47d9786 100755 --- a/.github/scripts/wasm-pack-install.sh +++ b/.github/scripts/wasm-pack-install.sh @@ -1,5 +1,8 @@ #!/bin/bash set -eu -curl -L --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/cargo-bins/cargo-binstall/main/install-from-binstall-release.sh | bash +cd $(dirname "$0") + +./cargo-binstall-install.sh + cargo-binstall wasm-pack --version 0.12.1 -y diff --git a/.github/workflows/docker-test-flow.yml b/.github/workflows/docker-test-flow.yml index 4b4a2ac2add..c8b4f53fadd 100644 --- a/.github/workflows/docker-test-flow.yml +++ b/.github/workflows/docker-test-flow.yml @@ -720,7 +720,63 @@ jobs: - name: Test working-directory: /usr/src/noir run: | - ./.github/scripts/integration-test.sh + ./.github/scripts/integration-test-node.sh + + test-integration-browser: + name: Integration test browser + runs-on: ubuntu-latest + needs: [ + build-base-js, + build-noir-wasm, + build-noirc-abi, + build-acvm_js, + build-noir-js-types, + build-noir_js, + build-barretenberg-backend + ] + container: + image: ghcr.io/noir-lang/noir:${{ github.sha }}-js + credentials: + username: ${{ github.actor }} + password: ${{ secrets.github_token }} + steps: + - name: Download noir wasm + uses: actions/download-artifact@v4 + with: + name: noir_wasm + path: /usr/src/noir/compiler/wasm + - name: Download noirc abi + uses: actions/download-artifact@v4 + with: + name: noirc_abi_wasm + path: /usr/src/noir/tooling/noirc_abi_wasm + - name: Download acvm js + uses: actions/download-artifact@v4 + with: + name: acvm_js + path: /usr/src/noir/acvm-repo/acvm_js + - name: Download noir js types + uses: actions/download-artifact@v4 + with: + name: noir-js-types + path: | + /usr/src/noir/tooling/noir_js_types/lib + - name: Download noir js + uses: actions/download-artifact@v4 + with: + name: noir_js + path: + /usr/src/noir/tooling/noir_js/lib + - name: Download Barretenberg backend + uses: actions/download-artifact@v4 + with: + name: barretenberg-backend + path: + /usr/src/noir/tooling/noir_js_backend_barretenberg/lib + - name: Test + working-directory: /usr/src/noir + run: | + ./.github/scripts/integration-test-browser.sh tests-end: name: End @@ -733,6 +789,7 @@ jobs: - test-noir-wasm - test-noir-wasm-browser - test-integration + - test-integration-browser - test-noir_codegen - test-acvm_js - test-acvm_js-browser @@ -748,4 +805,4 @@ jobs: exit 0 fi env: - FAIL: ${{ contains(needs.*.result, 'failure') || contains(needs.*.result, 'skipped') }} + FAIL: ${{ contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled') || contains(needs.*.result, 'skipped') }} diff --git a/.github/workflows/docs-pr.yml b/.github/workflows/docs-pr.yml index 87bec37c438..5d0b72c6ad8 100644 --- a/.github/workflows/docs-pr.yml +++ b/.github/workflows/docs-pr.yml @@ -53,9 +53,9 @@ jobs: steps: - name: Checkout code uses: actions/checkout@v4 - + - name: Setup toolchain - uses: dtolnay/rust-toolchain@1.71.1 + uses: dtolnay/rust-toolchain@1.73.0 - uses: Swatinem/rust-cache@v2 with: @@ -80,7 +80,7 @@ jobs: yarn workspaces foreach -Rpt --from docs run build - name: Upload artifact - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: docs path: ./docs/build/ @@ -98,7 +98,7 @@ jobs: uses: actions/checkout@v4 - name: Download built docs - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: docs path: ./docs/build @@ -114,3 +114,16 @@ jobs: NETLIFY_AUTH_TOKEN: ${{ secrets.NETLIFY_AUTH_TOKEN }} NETLIFY_SITE_ID: ${{ secrets.NETLIFY_SITE_ID }} timeout-minutes: 1 + + add_comment: + needs: [deploy_preview] + runs-on: ubuntu-latest + permissions: + pull-requests: write + steps: + - name: Tag dev rel in comment + uses: marocchino/sticky-pull-request-comment@v2 + with: + message: | + FYI @noir-lang/developerrelations on Noir doc changes. + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} \ No newline at end of file diff --git a/.github/workflows/formatting.yml b/.github/workflows/formatting.yml index 97736e2415e..279e90f5f6f 100644 --- a/.github/workflows/formatting.yml +++ b/.github/workflows/formatting.yml @@ -32,7 +32,7 @@ jobs: uses: actions/checkout@v4 - name: Setup toolchain - uses: dtolnay/rust-toolchain@1.71.1 + uses: dtolnay/rust-toolchain@1.73.0 with: targets: ${{ matrix.target }} components: clippy, rustfmt @@ -63,3 +63,68 @@ jobs: - name: Run `yarn lint` run: yarn lint + + build-nargo: + runs-on: ubuntu-22.04 + timeout-minutes: 30 + + steps: + - name: Checkout Noir repo + uses: actions/checkout@v4 + + - name: Setup toolchain + uses: dtolnay/rust-toolchain@1.73.0 + + - uses: Swatinem/rust-cache@v2 + with: + key: x86_64-unknown-linux-gnu + cache-on-failure: true + save-if: ${{ github.event_name != 'merge_group' }} + + - name: Build Nargo + run: cargo build --package nargo_cli --release + + - name: Package artifacts + run: | + mkdir dist + cp ./target/release/nargo ./dist/nargo + 7z a -ttar -so -an ./dist/* | 7z a -si ./nargo-x86_64-unknown-linux-gnu.tar.gz + + - name: Upload artifact + uses: actions/upload-artifact@v4 + with: + name: nargo + path: ./dist/* + retention-days: 3 + + nargo_fmt: + needs: [build-nargo] + name: Nargo fmt + runs-on: ubuntu-latest + timeout-minutes: 30 + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Download nargo binary + uses: actions/download-artifact@v4 + with: + name: nargo + path: ./nargo + + - name: Set nargo on PATH + run: | + nargo_binary="${{ github.workspace }}/nargo/nargo" + chmod +x $nargo_binary + echo "$(dirname $nargo_binary)" >> $GITHUB_PATH + export PATH="$PATH:$(dirname $nargo_binary)" + nargo -V + + - name: Format stdlib + working-directory: ./noir_stdlib + run: nargo fmt --check + + - name: Format test suite + working-directory: ./test_programs + run: ./format.sh check diff --git a/.github/workflows/gates_report.yml b/.github/workflows/gates_report.yml index 8e3ef768828..f3f798fc5ea 100644 --- a/.github/workflows/gates_report.yml +++ b/.github/workflows/gates_report.yml @@ -18,7 +18,7 @@ jobs: uses: actions/checkout@v4 - name: Setup toolchain - uses: dtolnay/rust-toolchain@1.71.1 + uses: dtolnay/rust-toolchain@1.73.0 - uses: Swatinem/rust-cache@v2 with: @@ -36,7 +36,7 @@ jobs: 7z a -ttar -so -an ./dist/* | 7z a -si ./nargo-x86_64-unknown-linux-gnu.tar.gz - name: Upload artifact - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: nargo path: ./dist/* @@ -53,7 +53,7 @@ jobs: - uses: actions/checkout@v4 - name: Download nargo binary - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: nargo path: ./nargo diff --git a/.github/workflows/publish-acvm.yml b/.github/workflows/publish-acvm.yml index 0251aaa0377..959cd8e4bca 100644 --- a/.github/workflows/publish-acvm.yml +++ b/.github/workflows/publish-acvm.yml @@ -18,7 +18,7 @@ jobs: ref: ${{ inputs.noir-ref }} - name: Setup toolchain - uses: dtolnay/rust-toolchain@1.71.1 + uses: dtolnay/rust-toolchain@1.73.0 # These steps are in a specific order so crate dependencies are updated first - name: Publish acir_field @@ -62,3 +62,16 @@ jobs: cargo publish --package acvm env: CARGO_REGISTRY_TOKEN: ${{ secrets.ACVM_CRATES_IO_TOKEN }} + + # Raise an issue if any package failed to publish + - name: Alert on failed publish + uses: JasonEtco/create-an-issue@v2 + if: ${{ failure() }} + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + CRATE_VERSION: ${{ inputs.noir-ref }} + WORKFLOW_NAME: ${{ github.workflow }} + WORKFLOW_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} + with: + update_existing: true + filename: .github/JS_PUBLISH_FAILED.md \ No newline at end of file diff --git a/.github/workflows/publish-docs.yml b/.github/workflows/publish-docs.yml index 231b57550c9..a56583b34eb 100644 --- a/.github/workflows/publish-docs.yml +++ b/.github/workflows/publish-docs.yml @@ -5,6 +5,7 @@ on: branches: - master paths: [docs/**] + workflow_dispatch: jobs: publish-docs: diff --git a/.github/workflows/publish-es-packages.yml b/.github/workflows/publish-es-packages.yml index 2c825ffd45f..f72a97b2684 100644 --- a/.github/workflows/publish-es-packages.yml +++ b/.github/workflows/publish-es-packages.yml @@ -18,31 +18,34 @@ jobs: build-noirc_abi_wasm: runs-on: ubuntu-latest steps: - - name: Checkout sources + - name: Checkout Noir repo uses: actions/checkout@v4 - with: - ref: ${{ inputs.noir-ref }} - - name: Setup Nix - uses: ./.github/actions/nix + - name: Setup toolchain + uses: dtolnay/rust-toolchain@1.73.0 + + - uses: Swatinem/rust-cache@v2 with: - github-token: ${{ secrets.GITHUB_TOKEN }} - nix-cache-name: "noir" - cachix-auth-token: ${{ secrets.CACHIXAUTHTOKEN }} + key: noirc-abi + save-if: false - - name: Build wasm package - run: | - nix build -L .#noirc_abi_wasm + - name: Install Yarn dependencies + uses: ./.github/actions/setup + + - name: Install wasm-opt + run: ./.github/scripts/wasm-opt-install.sh + + - name: Build noirc_abi + run: ./.github/scripts/noirc-abi-build.sh - - uses: actions/upload-artifact@v3 + - name: Upload artifact + uses: actions/upload-artifact@v4 with: name: noirc_abi_wasm - path: | - result/noirc_abi_wasm/nodejs - result/noirc_abi_wasm/web + path: ./tooling/noirc_abi_wasm/outputs/out/noirc_abi_wasm + retention-days: 10 build-noir_wasm: - needs: [build-noirc_abi_wasm] runs-on: ubuntu-latest steps: - name: Checkout sources @@ -51,27 +54,27 @@ jobs: ref: ${{ inputs.noir-ref }} - name: Setup toolchain - uses: dtolnay/rust-toolchain@1.71.1 + uses: dtolnay/rust-toolchain@1.73.0 - uses: Swatinem/rust-cache@v2 with: key: noir-wasm save-if: false - - name: Download noirc_abi_wasm package artifact - uses: actions/download-artifact@v3 - with: - name: noirc_abi_wasm - path: ./tooling/noirc_abi_wasm - - name: Install Yarn dependencies uses: ./.github/actions/setup + - name: Install wasm-opt + run: ./.github/scripts/wasm-opt-install.sh + + - name: Build noir_js_types + run: yarn workspace @noir-lang/types build + - name: Build noir_wasm - run: yarn workspace @noir-lang/noir_wasm build + run: ./.github/scripts/noir-wasm-build.sh - name: Upload artifact - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: noir_wasm path: | @@ -84,26 +87,30 @@ jobs: steps: - name: Checkout sources uses: actions/checkout@v4 - with: - ref: ${{ inputs.noir-ref }} - - name: Setup Nix - uses: ./.github/actions/nix + - name: Setup toolchain + uses: dtolnay/rust-toolchain@1.73.0 + + - uses: Swatinem/rust-cache@v2 with: - github-token: ${{ secrets.GITHUB_TOKEN }} - nix-cache-name: "noir" - cachix-auth-token: ${{ secrets.CACHIXAUTHTOKEN }} + key: acvm-js + save-if: false - - name: Build wasm package - run: | - nix build -L .#acvm_js + - name: Install Yarn dependencies + uses: ./.github/actions/setup + + - name: Install wasm-opt + run: ./.github/scripts/wasm-opt-install.sh - - uses: actions/upload-artifact@v3 + - name: Build acvm_js + run: ./.github/scripts/acvm_js-build.sh + + - name: Upload artifact + uses: actions/upload-artifact@v4 with: - name: acvm_js - path: | - result/acvm_js/nodejs - result/acvm_js/web + name: acvm-js + path: ./acvm-repo/acvm_js/outputs/out/acvm_js + retention-days: 3 publish-es-packages: runs-on: ubuntu-latest @@ -114,17 +121,17 @@ jobs: with: ref: ${{ inputs.noir-ref }} - - uses: actions/download-artifact@v3 + - uses: actions/download-artifact@v4 with: - name: acvm_js + name: acvm-js path: acvm-repo/acvm_js - - uses: actions/download-artifact@v3 + - uses: actions/download-artifact@v4 with: name: noir_wasm path: compiler/wasm - - uses: actions/download-artifact@v3 + - uses: actions/download-artifact@v4 with: name: noirc_abi_wasm path: tooling/noirc_abi_wasm @@ -145,3 +152,16 @@ jobs: - name: Publish ES Packages run: yarn publish:all --access public --tag ${{ inputs.npm-tag }} + + # Raise an issue if any package failed to publish + - name: Alert on failed publish + uses: JasonEtco/create-an-issue@v2 + if: ${{ failure() }} + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + NPM_TAG: ${{ inputs.npm-tag }} + WORKFLOW_NAME: ${{ github.workflow }} + WORKFLOW_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} + with: + update_existing: true + filename: .github/JS_PUBLISH_FAILED.md \ No newline at end of file diff --git a/.github/workflows/publish-nargo.yml b/.github/workflows/publish-nargo.yml index fc089008657..e47e1a13053 100644 --- a/.github/workflows/publish-nargo.yml +++ b/.github/workflows/publish-nargo.yml @@ -46,7 +46,7 @@ jobs: echo "MACOSX_DEPLOYMENT_TARGET=$(xcrun -sdk macosx$(sw_vers -productVersion) --show-sdk-platform-version)" >> $GITHUB_ENV - name: Setup toolchain - uses: dtolnay/rust-toolchain@1.71.1 + uses: dtolnay/rust-toolchain@1.73.0 with: targets: ${{ matrix.target }} @@ -67,7 +67,7 @@ jobs: 7z a -ttar -so -an ./dist/* | 7z a -si ./nargo-${{ matrix.target }}.tar.gz - name: Upload artifact - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: nargo-${{ matrix.target }} path: ./dist/* @@ -120,7 +120,7 @@ jobs: ref: ${{ inputs.tag || env.GITHUB_REF }} - name: Setup toolchain - uses: dtolnay/rust-toolchain@1.71.1 + uses: dtolnay/rust-toolchain@1.73.0 with: targets: ${{ matrix.target }} @@ -145,7 +145,7 @@ jobs: 7z a -ttar -so -an ./dist/* | 7z a -si ./nargo-${{ matrix.target }}.tar.gz - name: Upload artifact - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: nargo-${{ matrix.target }} path: ./dist/* diff --git a/.github/workflows/pull-request-title.yml b/.github/workflows/pull-request-title.yml index 4b8a626a94e..7e9b729da28 100644 --- a/.github/workflows/pull-request-title.yml +++ b/.github/workflows/pull-request-title.yml @@ -27,3 +27,22 @@ jobs: fix feat chore + + force-push-comment: + name: Warn external contributors about force-pushing + runs-on: ubuntu-latest + if: ${{ github.event_name == 'pull_request_target' && github.event.pull_request.head.repo.full_name != 'noir-lang/noir' }} + permissions: + pull-requests: write + + steps: + - name: Post comment on force pushes + uses: marocchino/sticky-pull-request-comment@v2 + with: + message: | + Thank you for your contribution to the Noir language. + + Please **do not force push to this branch** after the Noir team have started review of this PR. Doing so will only delay us merging your PR as we will need to start the review process from scratch. + + Thanks for your understanding. + \ No newline at end of file diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 71a0ab6d894..83e8e479181 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -45,7 +45,7 @@ jobs: - uses: actions/setup-node@v3 with: - node-version: 18.17.1 + node-version: 18.19.0 cache: 'yarn' cache-dependency-path: 'yarn.lock' diff --git a/.github/workflows/test-js-packages.yml b/.github/workflows/test-js-packages.yml index addc9ce3d83..b3908ee5d3e 100644 --- a/.github/workflows/test-js-packages.yml +++ b/.github/workflows/test-js-packages.yml @@ -22,7 +22,7 @@ jobs: uses: actions/checkout@v4 - name: Setup toolchain - uses: dtolnay/rust-toolchain@1.71.1 + uses: dtolnay/rust-toolchain@1.73.0 - uses: Swatinem/rust-cache@v2 with: @@ -40,52 +40,47 @@ jobs: 7z a -ttar -so -an ./dist/* | 7z a -si ./nargo-x86_64-unknown-linux-gnu.tar.gz - name: Upload artifact - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: nargo path: ./dist/* retention-days: 3 - build-noir-wasm: - needs: [build-noirc-abi] + build-noirc-abi: runs-on: ubuntu-latest timeout-minutes: 30 steps: - - name: Checkout sources + - name: Checkout Noir repo uses: actions/checkout@v4 - name: Setup toolchain - uses: dtolnay/rust-toolchain@1.71.1 + uses: dtolnay/rust-toolchain@1.73.0 - uses: Swatinem/rust-cache@v2 with: - key: noir-wasm + key: noirc-abi cache-on-failure: true save-if: ${{ github.event_name != 'merge_group' }} - - name: Download noirc_abi_wasm package artifact - uses: actions/download-artifact@v3 - with: - name: noirc_abi_wasm - path: ./tooling/noirc_abi_wasm - - name: Install Yarn dependencies uses: ./.github/actions/setup - - name: Build noir_wasm - run: yarn workspace @noir-lang/noir_wasm build + - name: Install wasm-opt + run: ./.github/scripts/wasm-opt-install.sh + + - name: Build noirc_abi + run: ./.github/scripts/noirc-abi-build.sh - name: Upload artifact - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: - name: noir_wasm - path: | - ./compiler/wasm/dist - ./compiler/wasm/build - retention-days: 3 + name: noirc_abi_wasm + path: ./tooling/noirc_abi_wasm/outputs/out/noirc_abi_wasm + retention-days: 10 - build-acvm-js: + + build-noir-wasm: runs-on: ubuntu-latest timeout-minutes: 30 @@ -93,55 +88,68 @@ jobs: - name: Checkout sources uses: actions/checkout@v4 - - name: Setup Nix - uses: ./.github/actions/nix + - name: Setup toolchain + uses: dtolnay/rust-toolchain@1.73.0 + + - uses: Swatinem/rust-cache@v2 with: - github-token: ${{ secrets.GITHUB_TOKEN }} - nix-cache-name: "noir" - cachix-auth-token: ${{ secrets.CACHIXAUTHTOKEN }} + key: noir-wasm + cache-on-failure: true + save-if: ${{ github.event_name != 'merge_group' }} - - name: Build acvm-js - run: | - nix build -L .#acvm_js + - name: Install Yarn dependencies + uses: ./.github/actions/setup + + - name: Install wasm-opt + run: ./.github/scripts/wasm-opt-install.sh + + - name: Build noir_js_types + run: yarn workspace @noir-lang/types build - - name: Dereference symlink - run: echo "UPLOAD_PATH=$(readlink -f result/acvm_js)" >> $GITHUB_ENV + - name: Build noir_wasm + run: ./.github/scripts/noir-wasm-build.sh - name: Upload artifact - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: - name: acvm-js - path: ${{ env.UPLOAD_PATH }} + name: noir_wasm + path: | + ./compiler/wasm/dist + ./compiler/wasm/build retention-days: 3 - build-noirc-abi: + build-acvm-js: runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout sources - uses: actions/checkout@v3 + uses: actions/checkout@v4 - - name: Setup Nix - uses: ./.github/actions/nix + - name: Setup toolchain + uses: dtolnay/rust-toolchain@1.73.0 + + - uses: Swatinem/rust-cache@v2 with: - github-token: ${{ secrets.GITHUB_TOKEN }} - nix-cache-name: "noir" - cachix-auth-token: ${{ secrets.CACHIXAUTHTOKEN }} + key: acvm-js + cache-on-failure: true + save-if: ${{ github.event_name != 'merge_group' }} - - name: Build noirc_abi_wasm - run: | - nix build -L .#noirc_abi_wasm + - name: Install Yarn dependencies + uses: ./.github/actions/setup + + - name: Install wasm-opt + run: ./.github/scripts/wasm-opt-install.sh - - name: Dereference symlink - run: echo "UPLOAD_PATH=$(readlink -f ./result/noirc_abi_wasm)" >> $GITHUB_ENV + - name: Build acvm_js + run: ./.github/scripts/acvm_js-build.sh - name: Upload artifact - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: - name: noirc_abi_wasm - path: ${{ env.UPLOAD_PATH }} - retention-days: 10 + name: acvm-js + path: ./acvm-repo/acvm_js/outputs/out/acvm_js + retention-days: 3 test-acvm_js-node: needs: [build-acvm-js] @@ -154,7 +162,7 @@ jobs: uses: actions/checkout@v4 - name: Download artifact - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: acvm-js path: ./acvm-repo/acvm_js @@ -176,7 +184,7 @@ jobs: uses: actions/checkout@v4 - name: Download artifact - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: acvm-js path: ./acvm-repo/acvm_js @@ -184,10 +192,8 @@ jobs: - name: Set up test environment uses: ./.github/actions/setup - - name: Install playwright deps - run: | - npx playwright install - npx playwright install-deps + - name: Install Playwright + run: ./.github/scripts/playwright-install.sh - name: Run browser tests run: yarn workspace @noir-lang/acvm_js test:browser @@ -200,10 +206,10 @@ jobs: steps: - name: Checkout sources - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Download wasm package artifact - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: noirc_abi_wasm path: ./tooling/noirc_abi_wasm @@ -231,7 +237,7 @@ jobs: uses: actions/checkout@v4 - name: Download wasm package artifact - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: noirc_abi_wasm path: ./tooling/noirc_abi_wasm @@ -247,7 +253,7 @@ jobs: yarn workspace @noir-lang/backend_barretenberg test test-noir-js: - needs: [build-acvm-js, build-noirc-abi] + needs: [build-nargo, build-acvm-js, build-noirc-abi] name: Noir JS runs-on: ubuntu-latest timeout-minutes: 30 @@ -256,18 +262,32 @@ jobs: - name: Checkout uses: actions/checkout@v4 + - name: Download nargo binary + uses: actions/download-artifact@v4 + with: + name: nargo + path: ./nargo + - name: Download artifact - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: acvm-js path: ./acvm-repo/acvm_js - name: Download wasm package artifact - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: noirc_abi_wasm path: ./tooling/noirc_abi_wasm + - name: Set nargo on PATH + run: | + nargo_binary="${{ github.workspace }}/nargo/nargo" + chmod +x $nargo_binary + echo "$(dirname $nargo_binary)" >> $GITHUB_PATH + export PATH="$PATH:$(dirname $nargo_binary)" + nargo -V + - name: Install Yarn dependencies uses: ./.github/actions/setup @@ -293,7 +313,7 @@ jobs: uses: actions/checkout@v4 - name: Download wasm package artifact - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: noir_wasm path: ./compiler/wasm @@ -302,7 +322,7 @@ jobs: uses: ./.github/actions/setup - name: Download nargo binary - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: nargo path: ./nargo @@ -337,19 +357,19 @@ jobs: uses: actions/checkout@v4 - name: Download nargo binary - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: nargo path: ./nargo - name: Download acvm_js package artifact - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: acvm-js path: ./acvm-repo/acvm_js - name: Download noirc_abi package artifact - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: noirc_abi_wasm path: ./tooling/noirc_abi_wasm @@ -374,8 +394,8 @@ jobs: - name: Run noir_codegen tests run: yarn workspace @noir-lang/noir_codegen test - test-integration: - name: Integration Tests + test-integration-node: + name: Integration Tests (Node) runs-on: ubuntu-latest needs: [build-acvm-js, build-noir-wasm, build-nargo, build-noirc-abi] timeout-minutes: 30 @@ -385,25 +405,25 @@ jobs: uses: actions/checkout@v4 - name: Download nargo binary - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: nargo path: ./nargo - name: Download acvm_js package artifact - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: acvm-js path: ./acvm-repo/acvm_js - name: Download noir_wasm package artifact - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: noir_wasm path: ./compiler/wasm - name: Download noirc_abi package artifact - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: noirc_abi_wasm path: ./tooling/noirc_abi_wasm @@ -419,6 +439,48 @@ jobs: - name: Install Yarn dependencies uses: ./.github/actions/setup + - name: Setup `integration-tests` + run: | + # Note the lack of spaces between package names. + PACKAGES_TO_BUILD="@noir-lang/types,@noir-lang/backend_barretenberg,@noir-lang/noir_js" + yarn workspaces foreach -vtp --from "{$PACKAGES_TO_BUILD}" run build + + - name: Run `integration-tests` + working-directory: ./compiler/integration-tests + run: | + yarn test:node + + test-integration-browser: + name: Integration Tests (Browser) + runs-on: ubuntu-latest + needs: [build-acvm-js, build-noir-wasm, build-nargo, build-noirc-abi] + timeout-minutes: 30 + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Download acvm_js package artifact + uses: actions/download-artifact@v4 + with: + name: acvm-js + path: ./acvm-repo/acvm_js + + - name: Download noir_wasm package artifact + uses: actions/download-artifact@v4 + with: + name: noir_wasm + path: ./compiler/wasm + + - name: Download noirc_abi package artifact + uses: actions/download-artifact@v4 + with: + name: noirc_abi_wasm + path: ./tooling/noirc_abi_wasm + + - name: Install Yarn dependencies + uses: ./.github/actions/setup + - name: Install Playwright uses: ./.github/actions/install-playwright @@ -429,8 +491,9 @@ jobs: yarn workspaces foreach -vtp --from "{$PACKAGES_TO_BUILD}" run build - name: Run `integration-tests` + working-directory: ./compiler/integration-tests run: | - yarn test:integration + yarn test:browser # This is a job which depends on all test jobs and reports the overall status. # This allows us to add/remove test jobs without having to update the required workflows. @@ -447,7 +510,8 @@ jobs: - test-noir-js - test-noir-wasm - test-noir-codegen - - test-integration + - test-integration-node + - test-integration-browser steps: - name: Report overall success @@ -459,4 +523,4 @@ jobs: fi env: # We treat any skipped or failing jobs as a failure for the workflow as a whole. - FAIL: ${{ contains(needs.*.result, 'failure') || contains(needs.*.result, 'skipped') }} + FAIL: ${{ contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled') || contains(needs.*.result, 'skipped') }} diff --git a/.github/workflows/test-rust-workspace-msrv.yml b/.github/workflows/test-rust-workspace-msrv.yml new file mode 100644 index 00000000000..0b2855fa834 --- /dev/null +++ b/.github/workflows/test-rust-workspace-msrv.yml @@ -0,0 +1,125 @@ +name: Test (MSRV check) + +# TL;DR https://github.com/noir-lang/noir/issues/4384 +# +# This workflow acts to ensure that we can publish to crates.io, we need this extra check as libraries don't respect the Cargo.lock file committed in this repository. +# We must then always be able to build the workspace using the latest versions of all of our dependencies, so we explicitly update them and build in this workflow. + +on: + schedule: + # Run a nightly check at 2 AM UTC + - cron: "0 2 * * *" + push: + branches: + - master + +# This will cancel previous runs when a branch or PR is updated +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.ref || github.run_id }} + cancel-in-progress: true + +jobs: + build-test-artifacts: + name: Build test artifacts + runs-on: ubuntu-latest + timeout-minutes: 30 + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Setup toolchain + uses: dtolnay/rust-toolchain@1.73.0 + with: + targets: x86_64-unknown-linux-gnu + + # We force the ACVM crate and all of its dependencies to update their dependencies + # This ensures that we'll be able to build the crates when they're being published. + - name: Update Cargo.lock + run: | + cargo update --package acvm --aggressive + cargo update --package bn254_blackbox_solver --aggressive + + - uses: Swatinem/rust-cache@v2 + with: + key: x86_64-unknown-linux-gnu-msrv-check + cache-on-failure: true + save-if: ${{ github.event_name != 'merge_group' }} + + - name: Install nextest + uses: taiki-e/install-action@v2 + with: + tool: nextest@0.9.67 + + - name: Build and archive tests + run: cargo nextest archive --workspace --release --archive-file nextest-archive.tar.zst + + - name: Upload archive to workflow + uses: actions/upload-artifact@v4 + with: + name: nextest-archive + path: nextest-archive.tar.zst + + run-tests: + name: "Run tests (partition ${{matrix.partition}})" + runs-on: ubuntu-latest + needs: [build-test-artifacts] + strategy: + fail-fast: false + matrix: + partition: [1, 2, 3, 4] + steps: + - uses: actions/checkout@v4 + + - name: Setup toolchain + uses: dtolnay/rust-toolchain@1.73.0 + with: + targets: x86_64-unknown-linux-gnu + + - name: Install nextest + uses: taiki-e/install-action@v2 + with: + tool: nextest@0.9.67 + + - name: Download archive + uses: actions/download-artifact@v4 + with: + name: nextest-archive + - name: Run tests + run: | + cargo nextest run --archive-file nextest-archive.tar.zst \ + --partition count:${{ matrix.partition }}/4 + + # This is a job which depends on all test jobs and reports the overall status. + # This allows us to add/remove test jobs without having to update the required workflows. + tests-end: + name: Rust End + runs-on: ubuntu-latest + # We want this job to always run (even if the dependant jobs fail) as we want this job to fail rather than skipping. + if: ${{ always() }} + needs: + - run-tests + + steps: + - name: Report overall success + run: | + if [[ $FAIL == true ]]; then + exit 1 + else + exit 0 + fi + env: + # We treat any cancelled, skipped or failing jobs as a failure for the workflow as a whole. + FAIL: ${{ contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled') || contains(needs.*.result, 'skipped') }} + + # Raise an issue if the tests failed + - name: Alert on failed publish + uses: JasonEtco/create-an-issue@v2 + if: ${{ failure() }} + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + WORKFLOW_NAME: ${{ github.workflow }} + WORKFLOW_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} + with: + update_existing: true + filename: .github/JS_PUBLISH_FAILED.md \ No newline at end of file diff --git a/.github/workflows/test-rust-workspace.yml b/.github/workflows/test-rust-workspace.yml index eccd7585fcf..22684de3044 100644 --- a/.github/workflows/test-rust-workspace.yml +++ b/.github/workflows/test-rust-workspace.yml @@ -13,33 +13,88 @@ concurrency: cancel-in-progress: true jobs: - test: - name: Test on ${{ matrix.os }} - runs-on: ${{ matrix.runner }} + build-test-artifacts: + name: Build test artifacts + runs-on: ubuntu-latest timeout-minutes: 30 - strategy: - fail-fast: false - matrix: - include: - - os: ubuntu - runner: ubuntu-latest - target: x86_64-unknown-linux-gnu - steps: - name: Checkout uses: actions/checkout@v4 - name: Setup toolchain - uses: dtolnay/rust-toolchain@1.71.1 + uses: dtolnay/rust-toolchain@1.73.0 with: - targets: ${{ matrix.target }} + targets: x86_64-unknown-linux-gnu - uses: Swatinem/rust-cache@v2 with: - key: ${{ matrix.target }} + key: x86_64-unknown-linux-gnu cache-on-failure: true save-if: ${{ github.event_name != 'merge_group' }} + - name: Install nextest + uses: taiki-e/install-action@v2 + with: + tool: nextest@0.9.67 + + - name: Build and archive tests + run: cargo nextest archive --workspace --release --archive-file nextest-archive.tar.zst + + - name: Upload archive to workflow + uses: actions/upload-artifact@v4 + with: + name: nextest-archive + path: nextest-archive.tar.zst + + run-tests: + name: "Run tests (partition ${{matrix.partition}})" + runs-on: ubuntu-latest + needs: [build-test-artifacts] + strategy: + fail-fast: false + matrix: + partition: [1, 2, 3, 4] + steps: + - uses: actions/checkout@v4 + + - name: Setup toolchain + uses: dtolnay/rust-toolchain@1.73.0 + with: + targets: x86_64-unknown-linux-gnu + + - name: Install nextest + uses: taiki-e/install-action@v2 + with: + tool: nextest@0.9.67 + + - name: Download archive + uses: actions/download-artifact@v4 + with: + name: nextest-archive - name: Run tests - run: cargo test --workspace --locked --release + run: | + cargo nextest run --archive-file nextest-archive.tar.zst \ + --partition count:${{ matrix.partition }}/4 + + # This is a job which depends on all test jobs and reports the overall status. + # This allows us to add/remove test jobs without having to update the required workflows. + tests-end: + name: Rust End + runs-on: ubuntu-latest + # We want this job to always run (even if the dependant jobs fail) as we want this job to fail rather than skipping. + if: ${{ always() }} + needs: + - run-tests + + steps: + - name: Report overall success + run: | + if [[ $FAIL == true ]]; then + exit 1 + else + exit 0 + fi + env: + # We treat any cancelled, skipped or failing jobs as a failure for the workflow as a whole. + FAIL: ${{ contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled') || contains(needs.*.result, 'skipped') }} diff --git a/.release-please-manifest.json b/.release-please-manifest.json index f440a7a2c51..8916585d7f1 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,4 +1,4 @@ { - ".": "0.23.0", - "acvm-repo": "0.39.0" + ".": "0.24.0", + "acvm-repo": "0.40.0" } \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index af7eb5b2f19..e9b2dfb48a5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,96 @@ # Changelog +## [0.24.0](https://github.com/noir-lang/noir/compare/v0.23.0...v0.24.0) (2024-02-12) + + +### ⚠ BREAKING CHANGES + +* rename bigint_neg into bigint_sub (https://github.com/AztecProtocol/aztec-packages/pull/4420) +* Add expression width into acir (https://github.com/AztecProtocol/aztec-packages/pull/4014) +* init storage macro (https://github.com/AztecProtocol/aztec-packages/pull/4200) +* **acir:** Move `is_recursive` flag to be part of the circuit definition (https://github.com/AztecProtocol/aztec-packages/pull/4221) +* Sync commits from `aztec-packages` ([#4144](https://github.com/noir-lang/noir/issues/4144)) + +### Features + +* Add bit size to const opcode (https://github.com/AztecProtocol/aztec-packages/pull/4385) ([158c8ce](https://github.com/noir-lang/noir/commit/158c8cec7f0dc698042e9512001dd2c9d6b40bcc)) +* Add brillig array index check ([#4127](https://github.com/noir-lang/noir/issues/4127)) ([c29f85f](https://github.com/noir-lang/noir/commit/c29f85fb5b1795e47282e4dbfbc1ceed2feb420c)) +* Add definitions for From and Into traits to Noir prelude ([#4169](https://github.com/noir-lang/noir/issues/4169)) ([4421ce4](https://github.com/noir-lang/noir/commit/4421ce4f8f91c7fcac34fbdb76e204df93a46df8)) +* Add expression width into acir (https://github.com/AztecProtocol/aztec-packages/pull/4014) ([158c8ce](https://github.com/noir-lang/noir/commit/158c8cec7f0dc698042e9512001dd2c9d6b40bcc)) +* Add instrumentation for tracking variables in debugging ([#4122](https://github.com/noir-lang/noir/issues/4122)) ([c58d691](https://github.com/noir-lang/noir/commit/c58d69141b54a918cd1675400c00bfd48720f896)) +* Add option to print monomorphized program ([#4119](https://github.com/noir-lang/noir/issues/4119)) ([80f7e29](https://github.com/noir-lang/noir/commit/80f7e29340ceb88781dc80a13325468ace3b0cf3)) +* Add support for overriding expression width ([#4117](https://github.com/noir-lang/noir/issues/4117)) ([c8026d5](https://github.com/noir-lang/noir/commit/c8026d557d535b10fe455165d6445076df7a03de)) +* Add warnings for usage of restricted bit sizes ([#4234](https://github.com/noir-lang/noir/issues/4234)) ([0ffc38b](https://github.com/noir-lang/noir/commit/0ffc38bc8e91291c21cad3682ef77250e3c1e237)) +* Allow bitshifts to be represented in SSA for brillig ([#4301](https://github.com/noir-lang/noir/issues/4301)) ([d86ff1a](https://github.com/noir-lang/noir/commit/d86ff1a16eed0a3f2994176c9399dafaf5bde108)) +* Allow brillig to read arrays directly from memory (https://github.com/AztecProtocol/aztec-packages/pull/4460) ([158c8ce](https://github.com/noir-lang/noir/commit/158c8cec7f0dc698042e9512001dd2c9d6b40bcc)) +* Allow globals to refer to any expression ([#4293](https://github.com/noir-lang/noir/issues/4293)) ([479330e](https://github.com/noir-lang/noir/commit/479330e9e767e0c06908a63c975341d9f83b5e7a)) +* Allow nested arrays and vectors in Brillig foreign calls (https://github.com/AztecProtocol/aztec-packages/pull/4478) ([158c8ce](https://github.com/noir-lang/noir/commit/158c8cec7f0dc698042e9512001dd2c9d6b40bcc)) +* Allow variables and stack trace inspection in the debugger ([#4184](https://github.com/noir-lang/noir/issues/4184)) ([bf263fc](https://github.com/noir-lang/noir/commit/bf263fc8d843940f328a90f6366edd2671fb2682)) +* **avm:** Back in avm context with macro - refactor context (https://github.com/AztecProtocol/aztec-packages/pull/4438) ([158c8ce](https://github.com/noir-lang/noir/commit/158c8cec7f0dc698042e9512001dd2c9d6b40bcc)) +* **aztec-nr:** Initial work for aztec public vm macro (https://github.com/AztecProtocol/aztec-packages/pull/4400) ([158c8ce](https://github.com/noir-lang/noir/commit/158c8cec7f0dc698042e9512001dd2c9d6b40bcc)) +* Deallocate stack items at the instruction level ([#4339](https://github.com/noir-lang/noir/issues/4339)) ([8f024a8](https://github.com/noir-lang/noir/commit/8f024a86d615da5e10bb198e1a5fca6d565ef547)) +* Disable constraint bubbling pass ([#4131](https://github.com/noir-lang/noir/issues/4131)) ([9ba2de6](https://github.com/noir-lang/noir/commit/9ba2de6143cd678b8656a84fab890e836257a13d)) +* Disable unused variable checks on low-level and oracle functions ([#4179](https://github.com/noir-lang/noir/issues/4179)) ([8f70e57](https://github.com/noir-lang/noir/commit/8f70e57ded3b8a46388eedf1c0ec83772f88733e)) +* Evaluation of dynamic assert messages ([#4101](https://github.com/noir-lang/noir/issues/4101)) ([c284e01](https://github.com/noir-lang/noir/commit/c284e01bfe20ceae4414dc123624b5cbb8b66d09)) +* Improve Error Handling for Cargo in Bootstrap Script ([#4211](https://github.com/noir-lang/noir/issues/4211)) ([3a90849](https://github.com/noir-lang/noir/commit/3a908491d649be503df24038fc1eab875d77c8f1)) +* Init storage macro (https://github.com/AztecProtocol/aztec-packages/pull/4200) ([158c8ce](https://github.com/noir-lang/noir/commit/158c8cec7f0dc698042e9512001dd2c9d6b40bcc)) +* **lsp:** Goto type reference for Struct ([#4091](https://github.com/noir-lang/noir/issues/4091)) ([d56cac2](https://github.com/noir-lang/noir/commit/d56cac2af7dc1cce0795f8e9701bb17cc3e67e14)) +* Move bounded_vec into the noir stdlib ([#4197](https://github.com/noir-lang/noir/issues/4197)) ([c50621f](https://github.com/noir-lang/noir/commit/c50621f1acddfb9138d6a036fd78c7a6c08dd084)) +* Multiply first to allow more ACIR gen optimizations ([#4201](https://github.com/noir-lang/noir/issues/4201)) ([882639d](https://github.com/noir-lang/noir/commit/882639de109f0ecccf2a8522e2181a301145e19f)) +* Option expect method ([#4219](https://github.com/noir-lang/noir/issues/4219)) ([8e042f2](https://github.com/noir-lang/noir/commit/8e042f2cbcc8a698aa45241aedbb0131b4acdc46)) +* Perform constraints on uncasted values if they are the same type ([#4303](https://github.com/noir-lang/noir/issues/4303)) ([816fa85](https://github.com/noir-lang/noir/commit/816fa85d6fcb081d79d1a255b1503324ce53f71d)) +* Remove predicate from `sort` intrinsic function ([#4228](https://github.com/noir-lang/noir/issues/4228)) ([d646243](https://github.com/noir-lang/noir/commit/d646243b2e8deff64d4d7dfe379d9caeba81c3b5)) +* Remove replacement of boolean range opcodes with `AssertZero` opcodes ([#4107](https://github.com/noir-lang/noir/issues/4107)) ([dac0e87](https://github.com/noir-lang/noir/commit/dac0e87ee3be3446b92bbb12ef4832fd493fcee3)) +* Replace bitwise ANDs used for truncation with `Instruction::Truncate` ([#4327](https://github.com/noir-lang/noir/issues/4327)) ([eb67ff6](https://github.com/noir-lang/noir/commit/eb67ff6ca8b15eb824ac7ee01ed8387bf50ce57b)) +* Replace modulo operations with truncations where possible ([#4329](https://github.com/noir-lang/noir/issues/4329)) ([70f2435](https://github.com/noir-lang/noir/commit/70f2435685d1d2a8fdd28160187d4312ec78d294)) +* Separate compilation and expression narrowing in `nargo` interface ([#4100](https://github.com/noir-lang/noir/issues/4100)) ([62a4e37](https://github.com/noir-lang/noir/commit/62a4e37ef2274af2839011c3bab7bfdbf9f164fa)) +* Simplify all unsigned constant NOT instructions ([#4230](https://github.com/noir-lang/noir/issues/4230)) ([fab4a6e](https://github.com/noir-lang/noir/commit/fab4a6e6ff025c83ec43313e36b8a236d030313a)) +* Sync commits from `aztec-packages` ([#4144](https://github.com/noir-lang/noir/issues/4144)) ([0205d3b](https://github.com/noir-lang/noir/commit/0205d3b4ad0cf5ffd775a43eb5af273a772cf138)) +* Use constraint information to perform constant folding ([#4060](https://github.com/noir-lang/noir/issues/4060)) ([9a4bf16](https://github.com/noir-lang/noir/commit/9a4bf16033c8d39c351eb532a4b015256cd22186)) + + +### Bug Fixes + +* Accurate tracking of slice capacities across blocks ([#4240](https://github.com/noir-lang/noir/issues/4240)) ([7420dbb](https://github.com/noir-lang/noir/commit/7420dbb7471bf243665d0bb3014886095c10c16f)) +* Allow function calls in global definitions ([#4320](https://github.com/noir-lang/noir/issues/4320)) ([0dc205c](https://github.com/noir-lang/noir/commit/0dc205cdf28fcd858bdff3e9dd5d21c7498f451c)) +* Allow performing bitwise NOT on unsigned integers ([#4229](https://github.com/noir-lang/noir/issues/4229)) ([b3ddf10](https://github.com/noir-lang/noir/commit/b3ddf10a2cbb80e88821baf7d76c478c3b98b3ea)) +* Apply generic arguments from trait constraints before instantiating identifiers ([#4121](https://github.com/noir-lang/noir/issues/4121)) ([eb6fc0f](https://github.com/noir-lang/noir/commit/eb6fc0f3658bf126ed38d7aec7ee3f44ee0533b5)) +* Apply range constraints to return values from unconstrained functions ([#4217](https://github.com/noir-lang/noir/issues/4217)) ([3af2a89](https://github.com/noir-lang/noir/commit/3af2a89826f7d9b6dcd1782b8b38417c64065293)) +* Apply trait constraints from method calls ([#4152](https://github.com/noir-lang/noir/issues/4152)) ([68c5486](https://github.com/noir-lang/noir/commit/68c5486fda5a32eef74dd5b83b51024c1b3ab40c)) +* Better errors for missing `fn` keyword ([#4154](https://github.com/noir-lang/noir/issues/4154)) ([057c208](https://github.com/noir-lang/noir/commit/057c2083a61bdad7dfcdc8c3f39769b41ae6926e)) +* Check for tests in all packages before failing due to an unsatisfied test filter ([#4114](https://github.com/noir-lang/noir/issues/4114)) ([1107373](https://github.com/noir-lang/noir/commit/1107373bbbb9a8ca088dd6ac43131392cb2f33e1)) +* Clean error when attemping to return a slice from Brillig to ACIR ([#4280](https://github.com/noir-lang/noir/issues/4280)) ([bcad4ec](https://github.com/noir-lang/noir/commit/bcad4ec5cc3e3f606e5bf673c7e367f1b63b20a2)) +* Correct result when assigning shared arrays in unconstrained code ([#4210](https://github.com/noir-lang/noir/issues/4210)) ([bdd8a96](https://github.com/noir-lang/noir/commit/bdd8a96fb8364edcab4db06804e4949bacf18bf4)) +* **docs:** Codegen docs before cutting a new version ([#4183](https://github.com/noir-lang/noir/issues/4183)) ([2914310](https://github.com/noir-lang/noir/commit/29143104fa907b446d534ac204069572cdc6f2f9)) +* Ensure that destination register is allocated when moving between registers in brillig gen ([#4316](https://github.com/noir-lang/noir/issues/4316)) ([ca0a56e](https://github.com/noir-lang/noir/commit/ca0a56ee6bd07af8a3af5317d487ac94847115fc)) +* Ensure that unconstrained entrypoint functions don't generate constraints ([#4292](https://github.com/noir-lang/noir/issues/4292)) ([fae4ead](https://github.com/noir-lang/noir/commit/fae4eadfedbf42ae73610c3475158072a183b329)) +* From field with constant values ([#4226](https://github.com/noir-lang/noir/issues/4226)) ([593916b](https://github.com/noir-lang/noir/commit/593916bb61fb926730a34519b1429a8d035e10b6)) +* **lsp:** Crash when file not in workspace ([#4146](https://github.com/noir-lang/noir/issues/4146)) ([cf7130f](https://github.com/noir-lang/noir/commit/cf7130f2e19e2d241e003c5527de9bf9d74cea40)) +* **lsp:** Replace panics with errors ([#4209](https://github.com/noir-lang/noir/issues/4209)) ([26e9618](https://github.com/noir-lang/noir/commit/26e961860709e9c0ab3d1eb561fd39b5bd95a0fb)) +* Maintain correct type when simplifying `x ^ x` ([#4082](https://github.com/noir-lang/noir/issues/4082)) ([9d83c2b](https://github.com/noir-lang/noir/commit/9d83c2b7d49490027bfa2974c1e2c5a85cc00aff)) +* Message formatting for assert statement ([#4323](https://github.com/noir-lang/noir/issues/4323)) ([3972ead](https://github.com/noir-lang/noir/commit/3972ead2593cd1d3f61c3311e948ec27bd9b1491)) +* Prevent debugger crashing on circuits with no opcodes ([#4283](https://github.com/noir-lang/noir/issues/4283)) ([2e32845](https://github.com/noir-lang/noir/commit/2e328454054a7c90b8b762b7c9ff0823eb0997c5)) +* Prevent declarations of blackbox functions outside of the stdlib ([#4177](https://github.com/noir-lang/noir/issues/4177)) ([9fb6b09](https://github.com/noir-lang/noir/commit/9fb6b092c504d29d7f190952387de66c7e6e570c)) +* Remove panic from `init_log_level` in `acvm_js` ([#4195](https://github.com/noir-lang/noir/issues/4195)) ([2e26530](https://github.com/noir-lang/noir/commit/2e26530bf53006c1ed4fee310bcaa905c95dd95b)) +* Respect order in bubble up for redundant asserts ([#4109](https://github.com/noir-lang/noir/issues/4109)) ([189aa48](https://github.com/noir-lang/noir/commit/189aa48c6c32fb6621b0e38a1f2d5d76d26ff0f2)) +* Revert "correct result when assigning shared arrays" and added regression test ([#4333](https://github.com/noir-lang/noir/issues/4333)) ([05e78b3](https://github.com/noir-lang/noir/commit/05e78b39e9465b37138bba1c9b374a74404925aa)) +* Save the data bus to the current function before generating others ([#4047](https://github.com/noir-lang/noir/issues/4047)) ([0a5bd4f](https://github.com/noir-lang/noir/commit/0a5bd4faa880dfcadf74372d8caeb458b2b55132)) +* Simplify constant assert messages into `ConstrainError::Static` ([#4287](https://github.com/noir-lang/noir/issues/4287)) ([fd15052](https://github.com/noir-lang/noir/commit/fd150521a480c04ff64f84e3c1a2faf1e8394516)) +* Ssa typing for array & slice indexes ([#4278](https://github.com/noir-lang/noir/issues/4278)) ([4074bab](https://github.com/noir-lang/noir/commit/4074babef6e25c0f723f2bc9b1b2c89302f8e0b9)) +* Ssa typing for assign_lvalue_index ([#4289](https://github.com/noir-lang/noir/issues/4289)) ([37f149c](https://github.com/noir-lang/noir/commit/37f149c68e195cf29f81bef4616739cda65f8da7)) +* SSA typing for right shifts ([#4302](https://github.com/noir-lang/noir/issues/4302)) ([41ee1aa](https://github.com/noir-lang/noir/commit/41ee1aa645e00b5e4926be24a1d8130bb1efad28)) +* Ssa typing of make_offset ([#4277](https://github.com/noir-lang/noir/issues/4277)) ([e4378ee](https://github.com/noir-lang/noir/commit/e4378eed877f20ef4de7d5eaac4209c282f2860a)) +* Track graphs of item dependencies to find dependency cycles ([#4266](https://github.com/noir-lang/noir/issues/4266)) ([61eabf1](https://github.com/noir-lang/noir/commit/61eabf1aa4f3eeba4695dcd988cdd3828ec269a5)) +* Type check ACIR mutable reference passed to brillig ([#4281](https://github.com/noir-lang/noir/issues/4281)) ([7e139de](https://github.com/noir-lang/noir/commit/7e139de3499478cf573d2a7ad480f434cb898d9f)) +* Update array method type signatures in the docs ([#4178](https://github.com/noir-lang/noir/issues/4178)) ([7c0a955](https://github.com/noir-lang/noir/commit/7c0a955486e14628356bb269402f4287c5600df4)) +* Zero out input to `to_radix` calls if inactive ([#4116](https://github.com/noir-lang/noir/issues/4116)) ([3f5bad3](https://github.com/noir-lang/noir/commit/3f5bad3e60b8e2e72155e09f3951a73c3087a9c0)) + + +### Miscellaneous Chores + +* **acir:** Move `is_recursive` flag to be part of the circuit definition (https://github.com/AztecProtocol/aztec-packages/pull/4221) ([158c8ce](https://github.com/noir-lang/noir/commit/158c8cec7f0dc698042e9512001dd2c9d6b40bcc)) +* Rename bigint_neg into bigint_sub (https://github.com/AztecProtocol/aztec-packages/pull/4420) ([158c8ce](https://github.com/noir-lang/noir/commit/158c8cec7f0dc698042e9512001dd2c9d6b40bcc)) + ## [0.23.0](https://github.com/noir-lang/noir/compare/v0.22.0...v0.23.0) (2024-01-22) diff --git a/Cargo.lock b/Cargo.lock index 93f1d25fc76..c0438eaf81f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4,7 +4,7 @@ version = 3 [[package]] name = "acir" -version = "0.39.0" +version = "0.40.0" dependencies = [ "acir_field", "base64 0.21.2", @@ -23,7 +23,7 @@ dependencies = [ [[package]] name = "acir_field" -version = "0.39.0" +version = "0.40.0" dependencies = [ "ark-bls12-381", "ark-bn254", @@ -37,7 +37,7 @@ dependencies = [ [[package]] name = "acvm" -version = "0.39.0" +version = "0.40.0" dependencies = [ "acir", "acvm_blackbox_solver", @@ -53,7 +53,7 @@ dependencies = [ [[package]] name = "acvm_blackbox_solver" -version = "0.39.0" +version = "0.40.0" dependencies = [ "acir", "blake2", @@ -68,7 +68,7 @@ dependencies = [ [[package]] name = "acvm_js" -version = "0.39.0" +version = "0.40.0" dependencies = [ "acvm", "bn254_blackbox_solver", @@ -212,10 +212,7 @@ checksum = "a4668cab20f66d8d020e1fbc0ebe47217433c1b6c8f2040faf858554e394ace6" [[package]] name = "arena" -version = "0.23.0" -dependencies = [ - "generational-arena", -] +version = "0.24.0" [[package]] name = "ark-bls12-381" @@ -416,9 +413,11 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "aztec_macros" -version = "0.23.0" +version = "0.24.0" dependencies = [ + "convert_case 0.6.0", "iter-extended", + "noirc_errors", "noirc_frontend", ] @@ -588,9 +587,10 @@ dependencies = [ "ark-ff", "flate2", "getrandom 0.2.10", - "grumpkin", "js-sys", + "noir_grumpkin", "num-bigint", + "num-traits", "pkg-config", "reqwest", "rust-embed", @@ -602,7 +602,7 @@ dependencies = [ [[package]] name = "brillig" -version = "0.39.0" +version = "0.40.0" dependencies = [ "acir_field", "serde", @@ -610,7 +610,7 @@ dependencies = [ [[package]] name = "brillig_vm" -version = "0.39.0" +version = "0.40.0" dependencies = [ "acir", "acvm_blackbox_solver", @@ -825,6 +825,14 @@ dependencies = [ "clap_derive", ] +[[package]] +name = "clap-markdown" +version = "0.1.3" +source = "git+https://github.com/noir-lang/clap-markdown?rev=450d759532c88f0dba70891ceecdbc9ff8f25d2b#450d759532c88f0dba70891ceecdbc9ff8f25d2b" +dependencies = [ + "clap", +] + [[package]] name = "clap_builder" version = "4.4.7" @@ -1003,6 +1011,15 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" +[[package]] +name = "convert_case" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec182b0ca2f35d8fc196cf3404988fd8b8c739a4d270ff118a398feb0cbec1ca" +dependencies = [ + "unicode-segmentation", +] + [[package]] name = "core-foundation-sys" version = "0.8.4" @@ -1368,7 +1385,7 @@ version = "0.99.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" dependencies = [ - "convert_case", + "convert_case 0.4.0", "proc-macro2", "quote", "rustc_version", @@ -1662,6 +1679,12 @@ dependencies = [ "winapi", ] +[[package]] +name = "fixedbitset" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" + [[package]] name = "flate2" version = "1.0.28" @@ -1683,7 +1706,7 @@ dependencies = [ [[package]] name = "fm" -version = "0.23.0" +version = "0.24.0" dependencies = [ "codespan-reporting", "iter-extended", @@ -1818,15 +1841,6 @@ dependencies = [ "byteorder", ] -[[package]] -name = "generational-arena" -version = "0.2.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "877e94aff08e743b651baaea359664321055749b398adff8740a7399af7796e7" -dependencies = [ - "cfg-if 1.0.0", -] - [[package]] name = "generic-array" version = "0.14.7" @@ -1932,17 +1946,6 @@ dependencies = [ "subtle", ] -[[package]] -name = "grumpkin" -version = "0.1.0" -source = "git+https://github.com/noir-lang/grumpkin?rev=56d99799381f79e42148aaef0de2b0cf9a4b9a5d#56d99799381f79e42148aaef0de2b0cf9a4b9a5d" -dependencies = [ - "ark-bn254", - "ark-ec", - "ark-ff", - "ark-std", -] - [[package]] name = "h2" version = "0.3.24" @@ -2295,7 +2298,7 @@ dependencies = [ [[package]] name = "iter-extended" -version = "0.23.0" +version = "0.24.0" [[package]] name = "itertools" @@ -2648,7 +2651,7 @@ checksum = "7843ec2de400bcbc6a6328c958dc38e5359da6e93e72e37bc5246bf1ae776389" [[package]] name = "nargo" -version = "0.23.0" +version = "0.24.0" dependencies = [ "acvm", "codespan-reporting", @@ -2668,7 +2671,6 @@ dependencies = [ "rayon", "rustc_version", "serde", - "serial_test", "tempfile", "thiserror", "tracing", @@ -2676,7 +2678,7 @@ dependencies = [ [[package]] name = "nargo_cli" -version = "0.23.0" +version = "0.24.0" dependencies = [ "acvm", "assert_cmd", @@ -2686,6 +2688,7 @@ dependencies = [ "bn254_blackbox_solver", "build-data", "clap", + "clap-markdown", "color-eyre", "const_format", "criterion", @@ -2727,7 +2730,7 @@ dependencies = [ [[package]] name = "nargo_fmt" -version = "0.23.0" +version = "0.24.0" dependencies = [ "bytecount", "noirc_frontend", @@ -2739,7 +2742,7 @@ dependencies = [ [[package]] name = "nargo_toml" -version = "0.23.0" +version = "0.24.0" dependencies = [ "dirs", "fm", @@ -2812,7 +2815,7 @@ dependencies = [ [[package]] name = "noir_debugger" -version = "0.23.0" +version = "0.24.0" dependencies = [ "acvm", "assert_cmd", @@ -2824,18 +2827,30 @@ dependencies = [ "nargo", "noirc_driver", "noirc_errors", + "noirc_frontend", "noirc_printable_type", "owo-colors", "rexpect", "serde_json", "tempfile", - "test-binary", "thiserror", ] +[[package]] +name = "noir_grumpkin" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4e7d49a4b14b13c0dc730b05780b385828ab88f4148daaad7db080ecdce07350" +dependencies = [ + "ark-bn254", + "ark-ec", + "ark-ff", + "ark-std", +] + [[package]] name = "noir_lsp" -version = "0.23.0" +version = "0.24.0" dependencies = [ "acvm", "async-lsp", @@ -2861,7 +2876,7 @@ dependencies = [ [[package]] name = "noir_wasm" -version = "0.23.0" +version = "0.24.0" dependencies = [ "acvm", "build-data", @@ -2884,7 +2899,7 @@ dependencies = [ [[package]] name = "noirc_abi" -version = "0.23.0" +version = "0.24.0" dependencies = [ "acvm", "iter-extended", @@ -2901,7 +2916,7 @@ dependencies = [ [[package]] name = "noirc_abi_wasm" -version = "0.23.0" +version = "0.24.0" dependencies = [ "acvm", "build-data", @@ -2918,7 +2933,7 @@ dependencies = [ [[package]] name = "noirc_driver" -version = "0.23.0" +version = "0.24.0" dependencies = [ "acvm", "aztec_macros", @@ -2931,14 +2946,16 @@ dependencies = [ "noirc_errors", "noirc_evaluator", "noirc_frontend", + "noirc_macros", "rust-embed", "serde", + "thiserror", "tracing", ] [[package]] name = "noirc_errors" -version = "0.23.0" +version = "0.24.0" dependencies = [ "acvm", "base64 0.21.2", @@ -2947,6 +2964,7 @@ dependencies = [ "codespan-reporting", "flate2", "fm", + "noirc_printable_type", "serde", "serde_json", "serde_with", @@ -2955,7 +2973,7 @@ dependencies = [ [[package]] name = "noirc_evaluator" -version = "0.23.0" +version = "0.24.0" dependencies = [ "acvm", "fxhash", @@ -2971,7 +2989,7 @@ dependencies = [ [[package]] name = "noirc_frontend" -version = "0.23.0" +version = "0.24.0" dependencies = [ "acvm", "arena", @@ -2980,6 +2998,7 @@ dependencies = [ "iter-extended", "noirc_errors", "noirc_printable_type", + "petgraph", "regex", "rustc-hash", "serde", @@ -2993,9 +3012,17 @@ dependencies = [ "tracing", ] +[[package]] +name = "noirc_macros" +version = "0.24.0" +dependencies = [ + "iter-extended", + "noirc_frontend", +] + [[package]] name = "noirc_printable_type" -version = "0.23.0" +version = "0.24.0" dependencies = [ "acvm", "iter-extended", @@ -3183,6 +3210,16 @@ version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" +[[package]] +name = "petgraph" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1d3afd2628e69da2be385eb6f2fd57c8ac7977ceeff6dc166ff1657b0e386a9" +dependencies = [ + "fixedbitset", + "indexmap 2.0.0", +] + [[package]] name = "phf" version = "0.10.1" @@ -4232,31 +4269,6 @@ dependencies = [ "syn 2.0.32", ] -[[package]] -name = "serial_test" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e56dd856803e253c8f298af3f4d7eb0ae5e23a737252cd90bb4f3b435033b2d" -dependencies = [ - "dashmap", - "futures 0.3.28", - "lazy_static", - "log", - "parking_lot 0.12.1", - "serial_test_derive", -] - -[[package]] -name = "serial_test_derive" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91d129178576168c589c9ec973feedf7d3126c01ac2bf08795109aa35b69fb8f" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.32", -] - [[package]] name = "sha2" version = "0.10.7" diff --git a/Cargo.toml b/Cargo.toml index 5dfff3dbb5d..7d5da7b00d0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,7 +1,10 @@ [workspace] members = [ + # Macros crates for metaprogramming "aztec_macros", + "noirc_macros", + # Compiler crates "compiler/noirc_evaluator", "compiler/noirc_frontend", "compiler/noirc_errors", @@ -38,23 +41,23 @@ resolver = "2" [workspace.package] # x-release-please-start-version -version = "0.23.0" +version = "0.24.0" # x-release-please-end authors = ["The Noir Team "] edition = "2021" -rust-version = "1.71.1" +rust-version = "1.73.0" license = "MIT OR Apache-2.0" repository = "https://github.com/noir-lang/noir/" [workspace.dependencies] # ACVM workspace dependencies -acir_field = { version = "0.39.0", path = "acvm-repo/acir_field", default-features = false } -acir = { version = "0.39.0", path = "acvm-repo/acir", default-features = false } -acvm = { version = "0.39.0", path = "acvm-repo/acvm" } -brillig = { version = "0.39.0", path = "acvm-repo/brillig", default-features = false } -brillig_vm = { version = "0.39.0", path = "acvm-repo/brillig_vm", default-features = false } -acvm_blackbox_solver = { version = "0.39.0", path = "acvm-repo/blackbox_solver", default-features = false } +acir_field = { version = "0.40.0", path = "acvm-repo/acir_field", default-features = false } +acir = { version = "0.40.0", path = "acvm-repo/acir", default-features = false } +acvm = { version = "0.40.0", path = "acvm-repo/acvm" } +brillig = { version = "0.40.0", path = "acvm-repo/brillig", default-features = false } +brillig_vm = { version = "0.40.0", path = "acvm-repo/brillig_vm", default-features = false } +acvm_blackbox_solver = { version = "0.40.0", path = "acvm-repo/blackbox_solver", default-features = false } bn254_blackbox_solver = { version = "0.39.0", path = "acvm-repo/bn254_blackbox_solver", default-features = false } # Noir compiler workspace dependencies @@ -66,12 +69,10 @@ noirc_errors = { path = "compiler/noirc_errors" } noirc_evaluator = { path = "compiler/noirc_evaluator" } noirc_frontend = { path = "compiler/noirc_frontend" } noirc_printable_type = { path = "compiler/noirc_printable_type" } -noir_wasm = { path = "compiler/wasm" } # Noir tooling workspace dependencies nargo = { path = "tooling/nargo" } nargo_fmt = { path = "tooling/nargo_fmt" } -nargo_cli = { path = "tooling/nargo_cli" } nargo_toml = { path = "tooling/nargo_toml" } noir_lsp = { path = "tooling/lsp" } noir_debugger = { path = "tooling/debugger" } @@ -94,8 +95,6 @@ getrandom = "0.2" # Debugger dap = "0.4.1-alpha1" - -cfg-if = "1.0.0" clap = { version = "4.3.19", features = ["derive", "env"] } codespan = { version = "0.11.1", features = ["serialization"] } codespan-lsp = "0.11.1" diff --git a/Dockerfile b/Dockerfile index 000292e0a47..3a478c3f95a 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,12 +1,12 @@ -FROM rust:bookworm +FROM rust:bullseye WORKDIR /usr/src/noir COPY . . RUN ./scripts/bootstrap_native.sh # When running the container, mount the users home directory to same location. -FROM ubuntu:lunar +FROM ubuntu:focal # Install Tini as nargo doesn't handle signals properly. # Install git as nargo needs it to clone. RUN apt-get update && apt-get install -y git tini && rm -rf /var/lib/apt/lists/* && apt-get clean COPY --from=0 /usr/src/noir/target/release/nargo /usr/src/noir/target/release/nargo -ENTRYPOINT ["/usr/bin/tini", "--", "/usr/src/noir/target/release/nargo"] \ No newline at end of file +ENTRYPOINT ["/usr/bin/tini", "--", "/usr/src/noir/target/release/nargo"] diff --git a/Dockerfile.ci b/Dockerfile.ci index a73ce4ab969..e0dc030980c 100644 --- a/Dockerfile.ci +++ b/Dockerfile.ci @@ -1,4 +1,4 @@ -FROM rust:1.71.1-slim-bookworm as base +FROM rust:1.73.0-slim-bookworm as base RUN apt-get update && apt-get upgrade -y && apt-get install build-essential git -y WORKDIR /usr/src/noir ENV PATH="${PATH}:/usr/src/noir/target/release" diff --git a/README.md b/README.md index 771c3f1c74d..5c93512ae26 100644 --- a/README.md +++ b/README.md @@ -54,7 +54,7 @@ Concretely the following items are on the road map: ## Minimum Rust version -This crate's minimum supported rustc version is 1.71.1. +This crate's minimum supported rustc version is 1.73.0. ## Working on this project diff --git a/acvm-repo/CHANGELOG.md b/acvm-repo/CHANGELOG.md index 7f68244a7eb..acb465e5cc9 100644 --- a/acvm-repo/CHANGELOG.md +++ b/acvm-repo/CHANGELOG.md @@ -5,6 +5,59 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [0.40.0](https://github.com/noir-lang/noir/compare/v0.39.0...v0.40.0) (2024-02-12) + + +### ⚠ BREAKING CHANGES + +* rename bigint_neg into bigint_sub (https://github.com/AztecProtocol/aztec-packages/pull/4420) +* Add expression width into acir (https://github.com/AztecProtocol/aztec-packages/pull/4014) +* init storage macro (https://github.com/AztecProtocol/aztec-packages/pull/4200) +* **acir:** Move `is_recursive` flag to be part of the circuit definition (https://github.com/AztecProtocol/aztec-packages/pull/4221) +* Sync commits from `aztec-packages` ([#4144](https://github.com/noir-lang/noir/issues/4144)) +* Breaking changes from aztec-packages ([#3955](https://github.com/noir-lang/noir/issues/3955)) +* Rename Arithmetic opcode to AssertZero ([#3840](https://github.com/noir-lang/noir/issues/3840)) +* Remove unused methods on ACIR opcodes ([#3841](https://github.com/noir-lang/noir/issues/3841)) +* Remove partial backend feature ([#3805](https://github.com/noir-lang/noir/issues/3805)) + +### Features + +* Add bit size to const opcode (https://github.com/AztecProtocol/aztec-packages/pull/4385) ([158c8ce](https://github.com/noir-lang/noir/commit/158c8cec7f0dc698042e9512001dd2c9d6b40bcc)) +* Add expression width into acir (https://github.com/AztecProtocol/aztec-packages/pull/4014) ([158c8ce](https://github.com/noir-lang/noir/commit/158c8cec7f0dc698042e9512001dd2c9d6b40bcc)) +* Add instrumentation for tracking variables in debugging ([#4122](https://github.com/noir-lang/noir/issues/4122)) ([c58d691](https://github.com/noir-lang/noir/commit/c58d69141b54a918cd1675400c00bfd48720f896)) +* Add support for overriding expression width ([#4117](https://github.com/noir-lang/noir/issues/4117)) ([c8026d5](https://github.com/noir-lang/noir/commit/c8026d557d535b10fe455165d6445076df7a03de)) +* Allow brillig to read arrays directly from memory (https://github.com/AztecProtocol/aztec-packages/pull/4460) ([158c8ce](https://github.com/noir-lang/noir/commit/158c8cec7f0dc698042e9512001dd2c9d6b40bcc)) +* Allow nested arrays and vectors in Brillig foreign calls (https://github.com/AztecProtocol/aztec-packages/pull/4478) ([158c8ce](https://github.com/noir-lang/noir/commit/158c8cec7f0dc698042e9512001dd2c9d6b40bcc)) +* Allow variables and stack trace inspection in the debugger ([#4184](https://github.com/noir-lang/noir/issues/4184)) ([bf263fc](https://github.com/noir-lang/noir/commit/bf263fc8d843940f328a90f6366edd2671fb2682)) +* **avm:** Back in avm context with macro - refactor context (https://github.com/AztecProtocol/aztec-packages/pull/4438) ([158c8ce](https://github.com/noir-lang/noir/commit/158c8cec7f0dc698042e9512001dd2c9d6b40bcc)) +* **aztec-nr:** Initial work for aztec public vm macro (https://github.com/AztecProtocol/aztec-packages/pull/4400) ([158c8ce](https://github.com/noir-lang/noir/commit/158c8cec7f0dc698042e9512001dd2c9d6b40bcc)) +* Aztec-packages ([#3754](https://github.com/noir-lang/noir/issues/3754)) ([c043265](https://github.com/noir-lang/noir/commit/c043265e550b59bd4296504826fe15d3ce3e9ad2)) +* Breaking changes from aztec-packages ([#3955](https://github.com/noir-lang/noir/issues/3955)) ([5be049e](https://github.com/noir-lang/noir/commit/5be049eee6c342649462282ee04f6411e6ea392c)) +* Evaluation of dynamic assert messages ([#4101](https://github.com/noir-lang/noir/issues/4101)) ([c284e01](https://github.com/noir-lang/noir/commit/c284e01bfe20ceae4414dc123624b5cbb8b66d09)) +* Init storage macro (https://github.com/AztecProtocol/aztec-packages/pull/4200) ([158c8ce](https://github.com/noir-lang/noir/commit/158c8cec7f0dc698042e9512001dd2c9d6b40bcc)) +* Remove range constraints from witnesses which are constrained to be constants ([#3928](https://github.com/noir-lang/noir/issues/3928)) ([afe9c7a](https://github.com/noir-lang/noir/commit/afe9c7a38bb9d4245205d3aa46d4ce23d70a5671)) +* Remove replacement of boolean range opcodes with `AssertZero` opcodes ([#4107](https://github.com/noir-lang/noir/issues/4107)) ([dac0e87](https://github.com/noir-lang/noir/commit/dac0e87ee3be3446b92bbb12ef4832fd493fcee3)) +* Speed up transformation of debug messages ([#3815](https://github.com/noir-lang/noir/issues/3815)) ([2a8af1e](https://github.com/noir-lang/noir/commit/2a8af1e4141ffff61547ee1c2837a6392bd5db48)) +* Sync `aztec-packages` ([#4011](https://github.com/noir-lang/noir/issues/4011)) ([fee2452](https://github.com/noir-lang/noir/commit/fee24523c427c27f0bdaf98ea09a852a2da3e94c)) +* Sync commits from `aztec-packages` ([#4068](https://github.com/noir-lang/noir/issues/4068)) ([7a8f3a3](https://github.com/noir-lang/noir/commit/7a8f3a33b57875e681e3d81e667e3570a1cdbdcc)) +* Sync commits from `aztec-packages` ([#4144](https://github.com/noir-lang/noir/issues/4144)) ([0205d3b](https://github.com/noir-lang/noir/commit/0205d3b4ad0cf5ffd775a43eb5af273a772cf138)) + + +### Bug Fixes + +* Deserialize odd length hex literals ([#3747](https://github.com/noir-lang/noir/issues/3747)) ([4000fb2](https://github.com/noir-lang/noir/commit/4000fb279221eb07187d657bfaa7f1c7b311abf2)) +* Remove panic from `init_log_level` in `acvm_js` ([#4195](https://github.com/noir-lang/noir/issues/4195)) ([2e26530](https://github.com/noir-lang/noir/commit/2e26530bf53006c1ed4fee310bcaa905c95dd95b)) +* Return error rather instead of panicking on invalid circuit ([#3976](https://github.com/noir-lang/noir/issues/3976)) ([67201bf](https://github.com/noir-lang/noir/commit/67201bfc21a9c8858aa86be9cd47d463fb78d925)) + + +### Miscellaneous Chores + +* **acir:** Move `is_recursive` flag to be part of the circuit definition (https://github.com/AztecProtocol/aztec-packages/pull/4221) ([158c8ce](https://github.com/noir-lang/noir/commit/158c8cec7f0dc698042e9512001dd2c9d6b40bcc)) +* Remove partial backend feature ([#3805](https://github.com/noir-lang/noir/issues/3805)) ([0383100](https://github.com/noir-lang/noir/commit/0383100853a80a5b28b797cdfeae0d271f1b7805)) +* Remove unused methods on ACIR opcodes ([#3841](https://github.com/noir-lang/noir/issues/3841)) ([9e5d0e8](https://github.com/noir-lang/noir/commit/9e5d0e813d61a0bfb5ee68174ed287c5a20f1579)) +* Rename Arithmetic opcode to AssertZero ([#3840](https://github.com/noir-lang/noir/issues/3840)) ([836f171](https://github.com/noir-lang/noir/commit/836f17145c2901060706294461c2d282dd121b3e)) +* Rename bigint_neg into bigint_sub (https://github.com/AztecProtocol/aztec-packages/pull/4420) ([158c8ce](https://github.com/noir-lang/noir/commit/158c8cec7f0dc698042e9512001dd2c9d6b40bcc)) + ## [0.39.0](https://github.com/noir-lang/noir/compare/v0.38.0...v0.39.0) (2024-01-22) diff --git a/acvm-repo/acir/Cargo.toml b/acvm-repo/acir/Cargo.toml index 49b10c57cc8..7021333486f 100644 --- a/acvm-repo/acir/Cargo.toml +++ b/acvm-repo/acir/Cargo.toml @@ -2,7 +2,7 @@ name = "acir" description = "ACIR is the IR that the VM processes, it is analogous to LLVM IR" # x-release-please-start-version -version = "0.39.0" +version = "0.40.0" # x-release-please-end authors.workspace = true edition.workspace = true diff --git a/acvm-repo/acir/codegen/acir.cpp b/acvm-repo/acir/codegen/acir.cpp index 0f94e91ab10..0fc84d47a0f 100644 --- a/acvm-repo/acir/codegen/acir.cpp +++ b/acvm-repo/acir/codegen/acir.cpp @@ -5,221 +5,307 @@ namespace Circuit { - struct Witness { - uint32_t value; - - friend bool operator==(const Witness&, const Witness&); - std::vector bincodeSerialize() const; - static Witness bincodeDeserialize(std::vector); - }; - - struct FunctionInput { - Circuit::Witness witness; - uint32_t num_bits; - - friend bool operator==(const FunctionInput&, const FunctionInput&); - std::vector bincodeSerialize() const; - static FunctionInput bincodeDeserialize(std::vector); - }; - - struct BlackBoxFuncCall { - - struct AND { - Circuit::FunctionInput lhs; - Circuit::FunctionInput rhs; - Circuit::Witness output; + struct BinaryFieldOp { - friend bool operator==(const AND&, const AND&); + struct Add { + friend bool operator==(const Add&, const Add&); std::vector bincodeSerialize() const; - static AND bincodeDeserialize(std::vector); + static Add bincodeDeserialize(std::vector); }; - struct XOR { - Circuit::FunctionInput lhs; - Circuit::FunctionInput rhs; - Circuit::Witness output; + struct Sub { + friend bool operator==(const Sub&, const Sub&); + std::vector bincodeSerialize() const; + static Sub bincodeDeserialize(std::vector); + }; - friend bool operator==(const XOR&, const XOR&); + struct Mul { + friend bool operator==(const Mul&, const Mul&); std::vector bincodeSerialize() const; - static XOR bincodeDeserialize(std::vector); + static Mul bincodeDeserialize(std::vector); }; - struct RANGE { - Circuit::FunctionInput input; + struct Div { + friend bool operator==(const Div&, const Div&); + std::vector bincodeSerialize() const; + static Div bincodeDeserialize(std::vector); + }; - friend bool operator==(const RANGE&, const RANGE&); + struct Equals { + friend bool operator==(const Equals&, const Equals&); std::vector bincodeSerialize() const; - static RANGE bincodeDeserialize(std::vector); + static Equals bincodeDeserialize(std::vector); }; - struct SHA256 { - std::vector inputs; - std::vector outputs; + std::variant value; - friend bool operator==(const SHA256&, const SHA256&); + friend bool operator==(const BinaryFieldOp&, const BinaryFieldOp&); + std::vector bincodeSerialize() const; + static BinaryFieldOp bincodeDeserialize(std::vector); + }; + + struct BinaryIntOp { + + struct Add { + friend bool operator==(const Add&, const Add&); std::vector bincodeSerialize() const; - static SHA256 bincodeDeserialize(std::vector); + static Add bincodeDeserialize(std::vector); }; - struct Blake2s { - std::vector inputs; - std::vector outputs; + struct Sub { + friend bool operator==(const Sub&, const Sub&); + std::vector bincodeSerialize() const; + static Sub bincodeDeserialize(std::vector); + }; - friend bool operator==(const Blake2s&, const Blake2s&); + struct Mul { + friend bool operator==(const Mul&, const Mul&); std::vector bincodeSerialize() const; - static Blake2s bincodeDeserialize(std::vector); + static Mul bincodeDeserialize(std::vector); }; - struct Blake3 { - std::vector inputs; - std::vector outputs; + struct SignedDiv { + friend bool operator==(const SignedDiv&, const SignedDiv&); + std::vector bincodeSerialize() const; + static SignedDiv bincodeDeserialize(std::vector); + }; - friend bool operator==(const Blake3&, const Blake3&); + struct UnsignedDiv { + friend bool operator==(const UnsignedDiv&, const UnsignedDiv&); std::vector bincodeSerialize() const; - static Blake3 bincodeDeserialize(std::vector); + static UnsignedDiv bincodeDeserialize(std::vector); }; - struct SchnorrVerify { - Circuit::FunctionInput public_key_x; - Circuit::FunctionInput public_key_y; - std::vector signature; - std::vector message; - Circuit::Witness output; + struct Equals { + friend bool operator==(const Equals&, const Equals&); + std::vector bincodeSerialize() const; + static Equals bincodeDeserialize(std::vector); + }; - friend bool operator==(const SchnorrVerify&, const SchnorrVerify&); + struct LessThan { + friend bool operator==(const LessThan&, const LessThan&); std::vector bincodeSerialize() const; - static SchnorrVerify bincodeDeserialize(std::vector); + static LessThan bincodeDeserialize(std::vector); }; - struct PedersenCommitment { - std::vector inputs; - uint32_t domain_separator; - std::array outputs; + struct LessThanEquals { + friend bool operator==(const LessThanEquals&, const LessThanEquals&); + std::vector bincodeSerialize() const; + static LessThanEquals bincodeDeserialize(std::vector); + }; - friend bool operator==(const PedersenCommitment&, const PedersenCommitment&); + struct And { + friend bool operator==(const And&, const And&); std::vector bincodeSerialize() const; - static PedersenCommitment bincodeDeserialize(std::vector); + static And bincodeDeserialize(std::vector); }; - struct PedersenHash { - std::vector inputs; - uint32_t domain_separator; - Circuit::Witness output; + struct Or { + friend bool operator==(const Or&, const Or&); + std::vector bincodeSerialize() const; + static Or bincodeDeserialize(std::vector); + }; - friend bool operator==(const PedersenHash&, const PedersenHash&); + struct Xor { + friend bool operator==(const Xor&, const Xor&); std::vector bincodeSerialize() const; - static PedersenHash bincodeDeserialize(std::vector); + static Xor bincodeDeserialize(std::vector); }; - struct EcdsaSecp256k1 { - std::vector public_key_x; - std::vector public_key_y; - std::vector signature; - std::vector hashed_message; - Circuit::Witness output; + struct Shl { + friend bool operator==(const Shl&, const Shl&); + std::vector bincodeSerialize() const; + static Shl bincodeDeserialize(std::vector); + }; - friend bool operator==(const EcdsaSecp256k1&, const EcdsaSecp256k1&); + struct Shr { + friend bool operator==(const Shr&, const Shr&); std::vector bincodeSerialize() const; - static EcdsaSecp256k1 bincodeDeserialize(std::vector); + static Shr bincodeDeserialize(std::vector); }; - struct EcdsaSecp256r1 { - std::vector public_key_x; - std::vector public_key_y; - std::vector signature; - std::vector hashed_message; - Circuit::Witness output; + std::variant value; - friend bool operator==(const EcdsaSecp256r1&, const EcdsaSecp256r1&); + friend bool operator==(const BinaryIntOp&, const BinaryIntOp&); + std::vector bincodeSerialize() const; + static BinaryIntOp bincodeDeserialize(std::vector); + }; + + struct MemoryAddress { + uint64_t value; + + friend bool operator==(const MemoryAddress&, const MemoryAddress&); + std::vector bincodeSerialize() const; + static MemoryAddress bincodeDeserialize(std::vector); + }; + + struct HeapArray { + Circuit::MemoryAddress pointer; + uint64_t size; + + friend bool operator==(const HeapArray&, const HeapArray&); + std::vector bincodeSerialize() const; + static HeapArray bincodeDeserialize(std::vector); + }; + + struct HeapVector { + Circuit::MemoryAddress pointer; + Circuit::MemoryAddress size; + + friend bool operator==(const HeapVector&, const HeapVector&); + std::vector bincodeSerialize() const; + static HeapVector bincodeDeserialize(std::vector); + }; + + struct BlackBoxOp { + + struct Sha256 { + Circuit::HeapVector message; + Circuit::HeapArray output; + + friend bool operator==(const Sha256&, const Sha256&); std::vector bincodeSerialize() const; - static EcdsaSecp256r1 bincodeDeserialize(std::vector); + static Sha256 bincodeDeserialize(std::vector); }; - struct FixedBaseScalarMul { - Circuit::FunctionInput low; - Circuit::FunctionInput high; - std::array outputs; + struct Blake2s { + Circuit::HeapVector message; + Circuit::HeapArray output; - friend bool operator==(const FixedBaseScalarMul&, const FixedBaseScalarMul&); + friend bool operator==(const Blake2s&, const Blake2s&); std::vector bincodeSerialize() const; - static FixedBaseScalarMul bincodeDeserialize(std::vector); + static Blake2s bincodeDeserialize(std::vector); }; - struct EmbeddedCurveAdd { - Circuit::FunctionInput input1_x; - Circuit::FunctionInput input1_y; - Circuit::FunctionInput input2_x; - Circuit::FunctionInput input2_y; - std::array outputs; + struct Blake3 { + Circuit::HeapVector message; + Circuit::HeapArray output; - friend bool operator==(const EmbeddedCurveAdd&, const EmbeddedCurveAdd&); + friend bool operator==(const Blake3&, const Blake3&); std::vector bincodeSerialize() const; - static EmbeddedCurveAdd bincodeDeserialize(std::vector); + static Blake3 bincodeDeserialize(std::vector); }; struct Keccak256 { - std::vector inputs; - std::vector outputs; + Circuit::HeapVector message; + Circuit::HeapArray output; friend bool operator==(const Keccak256&, const Keccak256&); std::vector bincodeSerialize() const; static Keccak256 bincodeDeserialize(std::vector); }; - struct Keccak256VariableLength { - std::vector inputs; - Circuit::FunctionInput var_message_size; - std::vector outputs; - - friend bool operator==(const Keccak256VariableLength&, const Keccak256VariableLength&); - std::vector bincodeSerialize() const; - static Keccak256VariableLength bincodeDeserialize(std::vector); - }; - struct Keccakf1600 { - std::vector inputs; - std::vector outputs; + Circuit::HeapVector message; + Circuit::HeapArray output; friend bool operator==(const Keccakf1600&, const Keccakf1600&); std::vector bincodeSerialize() const; static Keccakf1600 bincodeDeserialize(std::vector); }; - struct RecursiveAggregation { - std::vector verification_key; - std::vector proof; - std::vector public_inputs; - Circuit::FunctionInput key_hash; + struct EcdsaSecp256k1 { + Circuit::HeapVector hashed_msg; + Circuit::HeapArray public_key_x; + Circuit::HeapArray public_key_y; + Circuit::HeapArray signature; + Circuit::MemoryAddress result; - friend bool operator==(const RecursiveAggregation&, const RecursiveAggregation&); + friend bool operator==(const EcdsaSecp256k1&, const EcdsaSecp256k1&); std::vector bincodeSerialize() const; - static RecursiveAggregation bincodeDeserialize(std::vector); + static EcdsaSecp256k1 bincodeDeserialize(std::vector); }; - struct BigIntAdd { - uint32_t lhs; - uint32_t rhs; - uint32_t output; + struct EcdsaSecp256r1 { + Circuit::HeapVector hashed_msg; + Circuit::HeapArray public_key_x; + Circuit::HeapArray public_key_y; + Circuit::HeapArray signature; + Circuit::MemoryAddress result; + + friend bool operator==(const EcdsaSecp256r1&, const EcdsaSecp256r1&); + std::vector bincodeSerialize() const; + static EcdsaSecp256r1 bincodeDeserialize(std::vector); + }; + + struct SchnorrVerify { + Circuit::MemoryAddress public_key_x; + Circuit::MemoryAddress public_key_y; + Circuit::HeapVector message; + Circuit::HeapVector signature; + Circuit::MemoryAddress result; + + friend bool operator==(const SchnorrVerify&, const SchnorrVerify&); + std::vector bincodeSerialize() const; + static SchnorrVerify bincodeDeserialize(std::vector); + }; + + struct PedersenCommitment { + Circuit::HeapVector inputs; + Circuit::MemoryAddress domain_separator; + Circuit::HeapArray output; + + friend bool operator==(const PedersenCommitment&, const PedersenCommitment&); + std::vector bincodeSerialize() const; + static PedersenCommitment bincodeDeserialize(std::vector); + }; + + struct PedersenHash { + Circuit::HeapVector inputs; + Circuit::MemoryAddress domain_separator; + Circuit::MemoryAddress output; + + friend bool operator==(const PedersenHash&, const PedersenHash&); + std::vector bincodeSerialize() const; + static PedersenHash bincodeDeserialize(std::vector); + }; + + struct FixedBaseScalarMul { + Circuit::MemoryAddress low; + Circuit::MemoryAddress high; + Circuit::HeapArray result; + + friend bool operator==(const FixedBaseScalarMul&, const FixedBaseScalarMul&); + std::vector bincodeSerialize() const; + static FixedBaseScalarMul bincodeDeserialize(std::vector); + }; + + struct EmbeddedCurveAdd { + Circuit::MemoryAddress input1_x; + Circuit::MemoryAddress input1_y; + Circuit::MemoryAddress input2_x; + Circuit::MemoryAddress input2_y; + Circuit::HeapArray result; + + friend bool operator==(const EmbeddedCurveAdd&, const EmbeddedCurveAdd&); + std::vector bincodeSerialize() const; + static EmbeddedCurveAdd bincodeDeserialize(std::vector); + }; + + struct BigIntAdd { + Circuit::MemoryAddress lhs; + Circuit::MemoryAddress rhs; + Circuit::MemoryAddress output; friend bool operator==(const BigIntAdd&, const BigIntAdd&); std::vector bincodeSerialize() const; static BigIntAdd bincodeDeserialize(std::vector); }; - struct BigIntNeg { - uint32_t lhs; - uint32_t rhs; - uint32_t output; + struct BigIntSub { + Circuit::MemoryAddress lhs; + Circuit::MemoryAddress rhs; + Circuit::MemoryAddress output; - friend bool operator==(const BigIntNeg&, const BigIntNeg&); + friend bool operator==(const BigIntSub&, const BigIntSub&); std::vector bincodeSerialize() const; - static BigIntNeg bincodeDeserialize(std::vector); + static BigIntSub bincodeDeserialize(std::vector); }; struct BigIntMul { - uint32_t lhs; - uint32_t rhs; - uint32_t output; + Circuit::MemoryAddress lhs; + Circuit::MemoryAddress rhs; + Circuit::MemoryAddress output; friend bool operator==(const BigIntMul&, const BigIntMul&); std::vector bincodeSerialize() const; @@ -227,9 +313,9 @@ namespace Circuit { }; struct BigIntDiv { - uint32_t lhs; - uint32_t rhs; - uint32_t output; + Circuit::MemoryAddress lhs; + Circuit::MemoryAddress rhs; + Circuit::MemoryAddress output; friend bool operator==(const BigIntDiv&, const BigIntDiv&); std::vector bincodeSerialize() const; @@ -237,9 +323,9 @@ namespace Circuit { }; struct BigIntFromLeBytes { - std::vector inputs; - std::vector modulus; - uint32_t output; + Circuit::HeapVector inputs; + Circuit::HeapVector modulus; + Circuit::MemoryAddress output; friend bool operator==(const BigIntFromLeBytes&, const BigIntFromLeBytes&); std::vector bincodeSerialize() const; @@ -247,8 +333,8 @@ namespace Circuit { }; struct BigIntToLeBytes { - uint32_t input; - std::vector outputs; + Circuit::MemoryAddress input; + Circuit::HeapVector output; friend bool operator==(const BigIntToLeBytes&, const BigIntToLeBytes&); std::vector bincodeSerialize() const; @@ -256,9 +342,9 @@ namespace Circuit { }; struct Poseidon2Permutation { - std::vector inputs; - std::vector outputs; - uint32_t len; + Circuit::HeapVector message; + Circuit::HeapArray output; + Circuit::MemoryAddress len; friend bool operator==(const Poseidon2Permutation&, const Poseidon2Permutation&); std::vector bincodeSerialize() const; @@ -266,231 +352,320 @@ namespace Circuit { }; struct Sha256Compression { - std::vector inputs; - std::vector hash_values; - std::vector outputs; + Circuit::HeapVector input; + Circuit::HeapVector hash_values; + Circuit::HeapArray output; friend bool operator==(const Sha256Compression&, const Sha256Compression&); std::vector bincodeSerialize() const; static Sha256Compression bincodeDeserialize(std::vector); }; - std::variant value; - - friend bool operator==(const BlackBoxFuncCall&, const BlackBoxFuncCall&); - std::vector bincodeSerialize() const; - static BlackBoxFuncCall bincodeDeserialize(std::vector); - }; - - struct BlockId { - uint32_t value; - - friend bool operator==(const BlockId&, const BlockId&); - std::vector bincodeSerialize() const; - static BlockId bincodeDeserialize(std::vector); - }; - - struct Expression { - std::vector> mul_terms; - std::vector> linear_combinations; - std::string q_c; + std::variant value; - friend bool operator==(const Expression&, const Expression&); + friend bool operator==(const BlackBoxOp&, const BlackBoxOp&); std::vector bincodeSerialize() const; - static Expression bincodeDeserialize(std::vector); + static BlackBoxOp bincodeDeserialize(std::vector); }; - struct BrilligInputs { + struct HeapValueType; - struct Single { - Circuit::Expression value; + struct HeapValueType { - friend bool operator==(const Single&, const Single&); + struct Simple { + friend bool operator==(const Simple&, const Simple&); std::vector bincodeSerialize() const; - static Single bincodeDeserialize(std::vector); + static Simple bincodeDeserialize(std::vector); }; struct Array { - std::vector value; + std::vector value_types; + uint64_t size; friend bool operator==(const Array&, const Array&); std::vector bincodeSerialize() const; static Array bincodeDeserialize(std::vector); }; - std::variant value; + struct Vector { + std::vector value_types; - friend bool operator==(const BrilligInputs&, const BrilligInputs&); + friend bool operator==(const Vector&, const Vector&); + std::vector bincodeSerialize() const; + static Vector bincodeDeserialize(std::vector); + }; + + std::variant value; + + friend bool operator==(const HeapValueType&, const HeapValueType&); std::vector bincodeSerialize() const; - static BrilligInputs bincodeDeserialize(std::vector); + static HeapValueType bincodeDeserialize(std::vector); }; - struct BinaryFieldOp { + struct Value { + std::string inner; - struct Add { - friend bool operator==(const Add&, const Add&); - std::vector bincodeSerialize() const; - static Add bincodeDeserialize(std::vector); - }; + friend bool operator==(const Value&, const Value&); + std::vector bincodeSerialize() const; + static Value bincodeDeserialize(std::vector); + }; - struct Sub { - friend bool operator==(const Sub&, const Sub&); - std::vector bincodeSerialize() const; - static Sub bincodeDeserialize(std::vector); - }; + struct ValueOrArray { - struct Mul { - friend bool operator==(const Mul&, const Mul&); + struct MemoryAddress { + Circuit::MemoryAddress value; + + friend bool operator==(const MemoryAddress&, const MemoryAddress&); std::vector bincodeSerialize() const; - static Mul bincodeDeserialize(std::vector); + static MemoryAddress bincodeDeserialize(std::vector); }; - struct Div { - friend bool operator==(const Div&, const Div&); + struct HeapArray { + Circuit::HeapArray value; + + friend bool operator==(const HeapArray&, const HeapArray&); std::vector bincodeSerialize() const; - static Div bincodeDeserialize(std::vector); + static HeapArray bincodeDeserialize(std::vector); }; - struct Equals { - friend bool operator==(const Equals&, const Equals&); + struct HeapVector { + Circuit::HeapVector value; + + friend bool operator==(const HeapVector&, const HeapVector&); std::vector bincodeSerialize() const; - static Equals bincodeDeserialize(std::vector); + static HeapVector bincodeDeserialize(std::vector); }; - std::variant value; + std::variant value; - friend bool operator==(const BinaryFieldOp&, const BinaryFieldOp&); + friend bool operator==(const ValueOrArray&, const ValueOrArray&); std::vector bincodeSerialize() const; - static BinaryFieldOp bincodeDeserialize(std::vector); + static ValueOrArray bincodeDeserialize(std::vector); }; - struct BinaryIntOp { + struct BrilligOpcode { - struct Add { - friend bool operator==(const Add&, const Add&); - std::vector bincodeSerialize() const; - static Add bincodeDeserialize(std::vector); - }; + struct BinaryFieldOp { + Circuit::MemoryAddress destination; + Circuit::BinaryFieldOp op; + Circuit::MemoryAddress lhs; + Circuit::MemoryAddress rhs; - struct Sub { - friend bool operator==(const Sub&, const Sub&); + friend bool operator==(const BinaryFieldOp&, const BinaryFieldOp&); std::vector bincodeSerialize() const; - static Sub bincodeDeserialize(std::vector); + static BinaryFieldOp bincodeDeserialize(std::vector); }; - struct Mul { - friend bool operator==(const Mul&, const Mul&); + struct BinaryIntOp { + Circuit::MemoryAddress destination; + Circuit::BinaryIntOp op; + uint32_t bit_size; + Circuit::MemoryAddress lhs; + Circuit::MemoryAddress rhs; + + friend bool operator==(const BinaryIntOp&, const BinaryIntOp&); std::vector bincodeSerialize() const; - static Mul bincodeDeserialize(std::vector); + static BinaryIntOp bincodeDeserialize(std::vector); }; - struct SignedDiv { - friend bool operator==(const SignedDiv&, const SignedDiv&); + struct Cast { + Circuit::MemoryAddress destination; + Circuit::MemoryAddress source; + uint32_t bit_size; + + friend bool operator==(const Cast&, const Cast&); std::vector bincodeSerialize() const; - static SignedDiv bincodeDeserialize(std::vector); + static Cast bincodeDeserialize(std::vector); }; - struct UnsignedDiv { - friend bool operator==(const UnsignedDiv&, const UnsignedDiv&); + struct JumpIfNot { + Circuit::MemoryAddress condition; + uint64_t location; + + friend bool operator==(const JumpIfNot&, const JumpIfNot&); std::vector bincodeSerialize() const; - static UnsignedDiv bincodeDeserialize(std::vector); + static JumpIfNot bincodeDeserialize(std::vector); }; - struct Equals { - friend bool operator==(const Equals&, const Equals&); + struct JumpIf { + Circuit::MemoryAddress condition; + uint64_t location; + + friend bool operator==(const JumpIf&, const JumpIf&); std::vector bincodeSerialize() const; - static Equals bincodeDeserialize(std::vector); + static JumpIf bincodeDeserialize(std::vector); }; - struct LessThan { - friend bool operator==(const LessThan&, const LessThan&); + struct Jump { + uint64_t location; + + friend bool operator==(const Jump&, const Jump&); std::vector bincodeSerialize() const; - static LessThan bincodeDeserialize(std::vector); + static Jump bincodeDeserialize(std::vector); }; - struct LessThanEquals { - friend bool operator==(const LessThanEquals&, const LessThanEquals&); + struct CalldataCopy { + Circuit::MemoryAddress destination_address; + uint64_t size; + uint64_t offset; + + friend bool operator==(const CalldataCopy&, const CalldataCopy&); std::vector bincodeSerialize() const; - static LessThanEquals bincodeDeserialize(std::vector); + static CalldataCopy bincodeDeserialize(std::vector); }; - struct And { - friend bool operator==(const And&, const And&); + struct Call { + uint64_t location; + + friend bool operator==(const Call&, const Call&); std::vector bincodeSerialize() const; - static And bincodeDeserialize(std::vector); + static Call bincodeDeserialize(std::vector); }; - struct Or { - friend bool operator==(const Or&, const Or&); + struct Const { + Circuit::MemoryAddress destination; + uint32_t bit_size; + Circuit::Value value; + + friend bool operator==(const Const&, const Const&); std::vector bincodeSerialize() const; - static Or bincodeDeserialize(std::vector); + static Const bincodeDeserialize(std::vector); }; - struct Xor { - friend bool operator==(const Xor&, const Xor&); + struct Return { + friend bool operator==(const Return&, const Return&); std::vector bincodeSerialize() const; - static Xor bincodeDeserialize(std::vector); + static Return bincodeDeserialize(std::vector); }; - struct Shl { - friend bool operator==(const Shl&, const Shl&); + struct ForeignCall { + std::string function; + std::vector destinations; + std::vector destination_value_types; + std::vector inputs; + std::vector input_value_types; + + friend bool operator==(const ForeignCall&, const ForeignCall&); std::vector bincodeSerialize() const; - static Shl bincodeDeserialize(std::vector); + static ForeignCall bincodeDeserialize(std::vector); }; - struct Shr { - friend bool operator==(const Shr&, const Shr&); + struct Mov { + Circuit::MemoryAddress destination; + Circuit::MemoryAddress source; + + friend bool operator==(const Mov&, const Mov&); std::vector bincodeSerialize() const; - static Shr bincodeDeserialize(std::vector); + static Mov bincodeDeserialize(std::vector); }; - std::variant value; + struct Load { + Circuit::MemoryAddress destination; + Circuit::MemoryAddress source_pointer; - friend bool operator==(const BinaryIntOp&, const BinaryIntOp&); - std::vector bincodeSerialize() const; - static BinaryIntOp bincodeDeserialize(std::vector); - }; + friend bool operator==(const Load&, const Load&); + std::vector bincodeSerialize() const; + static Load bincodeDeserialize(std::vector); + }; - struct RegisterIndex { - uint64_t value; + struct Store { + Circuit::MemoryAddress destination_pointer; + Circuit::MemoryAddress source; + + friend bool operator==(const Store&, const Store&); + std::vector bincodeSerialize() const; + static Store bincodeDeserialize(std::vector); + }; + + struct BlackBox { + Circuit::BlackBoxOp value; + + friend bool operator==(const BlackBox&, const BlackBox&); + std::vector bincodeSerialize() const; + static BlackBox bincodeDeserialize(std::vector); + }; + + struct Trap { + friend bool operator==(const Trap&, const Trap&); + std::vector bincodeSerialize() const; + static Trap bincodeDeserialize(std::vector); + }; + + struct Stop { + uint64_t return_data_offset; + uint64_t return_data_size; + + friend bool operator==(const Stop&, const Stop&); + std::vector bincodeSerialize() const; + static Stop bincodeDeserialize(std::vector); + }; - friend bool operator==(const RegisterIndex&, const RegisterIndex&); + std::variant value; + + friend bool operator==(const BrilligOpcode&, const BrilligOpcode&); std::vector bincodeSerialize() const; - static RegisterIndex bincodeDeserialize(std::vector); + static BrilligOpcode bincodeDeserialize(std::vector); }; - struct HeapArray { - Circuit::RegisterIndex pointer; - uint64_t size; + struct Witness { + uint32_t value; - friend bool operator==(const HeapArray&, const HeapArray&); + friend bool operator==(const Witness&, const Witness&); std::vector bincodeSerialize() const; - static HeapArray bincodeDeserialize(std::vector); + static Witness bincodeDeserialize(std::vector); }; - struct HeapVector { - Circuit::RegisterIndex pointer; - Circuit::RegisterIndex size; + struct FunctionInput { + Circuit::Witness witness; + uint32_t num_bits; - friend bool operator==(const HeapVector&, const HeapVector&); + friend bool operator==(const FunctionInput&, const FunctionInput&); std::vector bincodeSerialize() const; - static HeapVector bincodeDeserialize(std::vector); + static FunctionInput bincodeDeserialize(std::vector); }; - struct BlackBoxOp { + struct BlackBoxFuncCall { - struct Sha256 { - Circuit::HeapVector message; - Circuit::HeapArray output; + struct AND { + Circuit::FunctionInput lhs; + Circuit::FunctionInput rhs; + Circuit::Witness output; - friend bool operator==(const Sha256&, const Sha256&); + friend bool operator==(const AND&, const AND&); std::vector bincodeSerialize() const; - static Sha256 bincodeDeserialize(std::vector); + static AND bincodeDeserialize(std::vector); + }; + + struct XOR { + Circuit::FunctionInput lhs; + Circuit::FunctionInput rhs; + Circuit::Witness output; + + friend bool operator==(const XOR&, const XOR&); + std::vector bincodeSerialize() const; + static XOR bincodeDeserialize(std::vector); + }; + + struct RANGE { + Circuit::FunctionInput input; + + friend bool operator==(const RANGE&, const RANGE&); + std::vector bincodeSerialize() const; + static RANGE bincodeDeserialize(std::vector); + }; + + struct SHA256 { + std::vector inputs; + std::vector outputs; + + friend bool operator==(const SHA256&, const SHA256&); + std::vector bincodeSerialize() const; + static SHA256 bincodeDeserialize(std::vector); }; struct Blake2s { - Circuit::HeapVector message; - Circuit::HeapArray output; + std::vector inputs; + std::vector outputs; friend bool operator==(const Blake2s&, const Blake2s&); std::vector bincodeSerialize() const; @@ -498,38 +673,52 @@ namespace Circuit { }; struct Blake3 { - Circuit::HeapVector message; - Circuit::HeapArray output; + std::vector inputs; + std::vector outputs; friend bool operator==(const Blake3&, const Blake3&); std::vector bincodeSerialize() const; static Blake3 bincodeDeserialize(std::vector); }; - struct Keccak256 { - Circuit::HeapVector message; - Circuit::HeapArray output; + struct SchnorrVerify { + Circuit::FunctionInput public_key_x; + Circuit::FunctionInput public_key_y; + std::vector signature; + std::vector message; + Circuit::Witness output; - friend bool operator==(const Keccak256&, const Keccak256&); + friend bool operator==(const SchnorrVerify&, const SchnorrVerify&); std::vector bincodeSerialize() const; - static Keccak256 bincodeDeserialize(std::vector); + static SchnorrVerify bincodeDeserialize(std::vector); }; - struct Keccakf1600 { - Circuit::HeapVector message; - Circuit::HeapArray output; + struct PedersenCommitment { + std::vector inputs; + uint32_t domain_separator; + std::array outputs; - friend bool operator==(const Keccakf1600&, const Keccakf1600&); + friend bool operator==(const PedersenCommitment&, const PedersenCommitment&); std::vector bincodeSerialize() const; - static Keccakf1600 bincodeDeserialize(std::vector); + static PedersenCommitment bincodeDeserialize(std::vector); + }; + + struct PedersenHash { + std::vector inputs; + uint32_t domain_separator; + Circuit::Witness output; + + friend bool operator==(const PedersenHash&, const PedersenHash&); + std::vector bincodeSerialize() const; + static PedersenHash bincodeDeserialize(std::vector); }; struct EcdsaSecp256k1 { - Circuit::HeapVector hashed_msg; - Circuit::HeapArray public_key_x; - Circuit::HeapArray public_key_y; - Circuit::HeapArray signature; - Circuit::RegisterIndex result; + std::vector public_key_x; + std::vector public_key_y; + std::vector signature; + std::vector hashed_message; + Circuit::Witness output; friend bool operator==(const EcdsaSecp256k1&, const EcdsaSecp256k1&); std::vector bincodeSerialize() const; @@ -537,95 +726,102 @@ namespace Circuit { }; struct EcdsaSecp256r1 { - Circuit::HeapVector hashed_msg; - Circuit::HeapArray public_key_x; - Circuit::HeapArray public_key_y; - Circuit::HeapArray signature; - Circuit::RegisterIndex result; + std::vector public_key_x; + std::vector public_key_y; + std::vector signature; + std::vector hashed_message; + Circuit::Witness output; friend bool operator==(const EcdsaSecp256r1&, const EcdsaSecp256r1&); std::vector bincodeSerialize() const; static EcdsaSecp256r1 bincodeDeserialize(std::vector); }; - struct SchnorrVerify { - Circuit::RegisterIndex public_key_x; - Circuit::RegisterIndex public_key_y; - Circuit::HeapVector message; - Circuit::HeapVector signature; - Circuit::RegisterIndex result; + struct FixedBaseScalarMul { + Circuit::FunctionInput low; + Circuit::FunctionInput high; + std::array outputs; - friend bool operator==(const SchnorrVerify&, const SchnorrVerify&); + friend bool operator==(const FixedBaseScalarMul&, const FixedBaseScalarMul&); std::vector bincodeSerialize() const; - static SchnorrVerify bincodeDeserialize(std::vector); + static FixedBaseScalarMul bincodeDeserialize(std::vector); }; - struct PedersenCommitment { - Circuit::HeapVector inputs; - Circuit::RegisterIndex domain_separator; - Circuit::HeapArray output; + struct EmbeddedCurveAdd { + Circuit::FunctionInput input1_x; + Circuit::FunctionInput input1_y; + Circuit::FunctionInput input2_x; + Circuit::FunctionInput input2_y; + std::array outputs; - friend bool operator==(const PedersenCommitment&, const PedersenCommitment&); + friend bool operator==(const EmbeddedCurveAdd&, const EmbeddedCurveAdd&); std::vector bincodeSerialize() const; - static PedersenCommitment bincodeDeserialize(std::vector); + static EmbeddedCurveAdd bincodeDeserialize(std::vector); }; - struct PedersenHash { - Circuit::HeapVector inputs; - Circuit::RegisterIndex domain_separator; - Circuit::RegisterIndex output; + struct Keccak256 { + std::vector inputs; + std::vector outputs; - friend bool operator==(const PedersenHash&, const PedersenHash&); + friend bool operator==(const Keccak256&, const Keccak256&); std::vector bincodeSerialize() const; - static PedersenHash bincodeDeserialize(std::vector); + static Keccak256 bincodeDeserialize(std::vector); }; - struct FixedBaseScalarMul { - Circuit::RegisterIndex low; - Circuit::RegisterIndex high; - Circuit::HeapArray result; + struct Keccak256VariableLength { + std::vector inputs; + Circuit::FunctionInput var_message_size; + std::vector outputs; - friend bool operator==(const FixedBaseScalarMul&, const FixedBaseScalarMul&); + friend bool operator==(const Keccak256VariableLength&, const Keccak256VariableLength&); std::vector bincodeSerialize() const; - static FixedBaseScalarMul bincodeDeserialize(std::vector); + static Keccak256VariableLength bincodeDeserialize(std::vector); }; - struct EmbeddedCurveAdd { - Circuit::RegisterIndex input1_x; - Circuit::RegisterIndex input1_y; - Circuit::RegisterIndex input2_x; - Circuit::RegisterIndex input2_y; - Circuit::HeapArray result; + struct Keccakf1600 { + std::vector inputs; + std::vector outputs; - friend bool operator==(const EmbeddedCurveAdd&, const EmbeddedCurveAdd&); + friend bool operator==(const Keccakf1600&, const Keccakf1600&); std::vector bincodeSerialize() const; - static EmbeddedCurveAdd bincodeDeserialize(std::vector); + static Keccakf1600 bincodeDeserialize(std::vector); + }; + + struct RecursiveAggregation { + std::vector verification_key; + std::vector proof; + std::vector public_inputs; + Circuit::FunctionInput key_hash; + + friend bool operator==(const RecursiveAggregation&, const RecursiveAggregation&); + std::vector bincodeSerialize() const; + static RecursiveAggregation bincodeDeserialize(std::vector); }; struct BigIntAdd { - Circuit::RegisterIndex lhs; - Circuit::RegisterIndex rhs; - Circuit::RegisterIndex output; + uint32_t lhs; + uint32_t rhs; + uint32_t output; friend bool operator==(const BigIntAdd&, const BigIntAdd&); std::vector bincodeSerialize() const; static BigIntAdd bincodeDeserialize(std::vector); }; - struct BigIntNeg { - Circuit::RegisterIndex lhs; - Circuit::RegisterIndex rhs; - Circuit::RegisterIndex output; + struct BigIntSub { + uint32_t lhs; + uint32_t rhs; + uint32_t output; - friend bool operator==(const BigIntNeg&, const BigIntNeg&); + friend bool operator==(const BigIntSub&, const BigIntSub&); std::vector bincodeSerialize() const; - static BigIntNeg bincodeDeserialize(std::vector); + static BigIntSub bincodeDeserialize(std::vector); }; struct BigIntMul { - Circuit::RegisterIndex lhs; - Circuit::RegisterIndex rhs; - Circuit::RegisterIndex output; + uint32_t lhs; + uint32_t rhs; + uint32_t output; friend bool operator==(const BigIntMul&, const BigIntMul&); std::vector bincodeSerialize() const; @@ -633,9 +829,9 @@ namespace Circuit { }; struct BigIntDiv { - Circuit::RegisterIndex lhs; - Circuit::RegisterIndex rhs; - Circuit::RegisterIndex output; + uint32_t lhs; + uint32_t rhs; + uint32_t output; friend bool operator==(const BigIntDiv&, const BigIntDiv&); std::vector bincodeSerialize() const; @@ -643,9 +839,9 @@ namespace Circuit { }; struct BigIntFromLeBytes { - Circuit::HeapVector inputs; - Circuit::HeapVector modulus; - Circuit::RegisterIndex output; + std::vector inputs; + std::vector modulus; + uint32_t output; friend bool operator==(const BigIntFromLeBytes&, const BigIntFromLeBytes&); std::vector bincodeSerialize() const; @@ -653,8 +849,8 @@ namespace Circuit { }; struct BigIntToLeBytes { - Circuit::RegisterIndex input; - Circuit::HeapVector output; + uint32_t input; + std::vector outputs; friend bool operator==(const BigIntToLeBytes&, const BigIntToLeBytes&); std::vector bincodeSerialize() const; @@ -662,9 +858,9 @@ namespace Circuit { }; struct Poseidon2Permutation { - Circuit::HeapVector message; - Circuit::HeapArray output; - Circuit::RegisterIndex len; + std::vector inputs; + std::vector outputs; + uint32_t len; friend bool operator==(const Poseidon2Permutation&, const Poseidon2Permutation&); std::vector bincodeSerialize() const; @@ -672,199 +868,71 @@ namespace Circuit { }; struct Sha256Compression { - Circuit::HeapVector input; - Circuit::HeapVector hash_values; - Circuit::HeapArray output; + std::vector inputs; + std::vector hash_values; + std::vector outputs; friend bool operator==(const Sha256Compression&, const Sha256Compression&); std::vector bincodeSerialize() const; static Sha256Compression bincodeDeserialize(std::vector); }; - std::variant value; + std::variant value; - friend bool operator==(const BlackBoxOp&, const BlackBoxOp&); + friend bool operator==(const BlackBoxFuncCall&, const BlackBoxFuncCall&); std::vector bincodeSerialize() const; - static BlackBoxOp bincodeDeserialize(std::vector); + static BlackBoxFuncCall bincodeDeserialize(std::vector); }; - struct RegisterOrMemory { - - struct RegisterIndex { - Circuit::RegisterIndex value; - - friend bool operator==(const RegisterIndex&, const RegisterIndex&); - std::vector bincodeSerialize() const; - static RegisterIndex bincodeDeserialize(std::vector); - }; - - struct HeapArray { - Circuit::HeapArray value; - - friend bool operator==(const HeapArray&, const HeapArray&); - std::vector bincodeSerialize() const; - static HeapArray bincodeDeserialize(std::vector); - }; - - struct HeapVector { - Circuit::HeapVector value; - - friend bool operator==(const HeapVector&, const HeapVector&); - std::vector bincodeSerialize() const; - static HeapVector bincodeDeserialize(std::vector); - }; - - std::variant value; + struct BlockId { + uint32_t value; - friend bool operator==(const RegisterOrMemory&, const RegisterOrMemory&); + friend bool operator==(const BlockId&, const BlockId&); std::vector bincodeSerialize() const; - static RegisterOrMemory bincodeDeserialize(std::vector); + static BlockId bincodeDeserialize(std::vector); }; - struct Value { - std::string inner; + struct Expression { + std::vector> mul_terms; + std::vector> linear_combinations; + std::string q_c; - friend bool operator==(const Value&, const Value&); + friend bool operator==(const Expression&, const Expression&); std::vector bincodeSerialize() const; - static Value bincodeDeserialize(std::vector); + static Expression bincodeDeserialize(std::vector); }; - struct BrilligOpcode { - - struct BinaryFieldOp { - Circuit::RegisterIndex destination; - Circuit::BinaryFieldOp op; - Circuit::RegisterIndex lhs; - Circuit::RegisterIndex rhs; - - friend bool operator==(const BinaryFieldOp&, const BinaryFieldOp&); - std::vector bincodeSerialize() const; - static BinaryFieldOp bincodeDeserialize(std::vector); - }; - - struct BinaryIntOp { - Circuit::RegisterIndex destination; - Circuit::BinaryIntOp op; - uint32_t bit_size; - Circuit::RegisterIndex lhs; - Circuit::RegisterIndex rhs; - - friend bool operator==(const BinaryIntOp&, const BinaryIntOp&); - std::vector bincodeSerialize() const; - static BinaryIntOp bincodeDeserialize(std::vector); - }; - - struct JumpIfNot { - Circuit::RegisterIndex condition; - uint64_t location; - - friend bool operator==(const JumpIfNot&, const JumpIfNot&); - std::vector bincodeSerialize() const; - static JumpIfNot bincodeDeserialize(std::vector); - }; - - struct JumpIf { - Circuit::RegisterIndex condition; - uint64_t location; - - friend bool operator==(const JumpIf&, const JumpIf&); - std::vector bincodeSerialize() const; - static JumpIf bincodeDeserialize(std::vector); - }; - - struct Jump { - uint64_t location; - - friend bool operator==(const Jump&, const Jump&); - std::vector bincodeSerialize() const; - static Jump bincodeDeserialize(std::vector); - }; - - struct Call { - uint64_t location; - - friend bool operator==(const Call&, const Call&); - std::vector bincodeSerialize() const; - static Call bincodeDeserialize(std::vector); - }; - - struct Const { - Circuit::RegisterIndex destination; - Circuit::Value value; - - friend bool operator==(const Const&, const Const&); - std::vector bincodeSerialize() const; - static Const bincodeDeserialize(std::vector); - }; - - struct Return { - friend bool operator==(const Return&, const Return&); - std::vector bincodeSerialize() const; - static Return bincodeDeserialize(std::vector); - }; - - struct ForeignCall { - std::string function; - std::vector destinations; - std::vector inputs; - - friend bool operator==(const ForeignCall&, const ForeignCall&); - std::vector bincodeSerialize() const; - static ForeignCall bincodeDeserialize(std::vector); - }; - - struct Mov { - Circuit::RegisterIndex destination; - Circuit::RegisterIndex source; - - friend bool operator==(const Mov&, const Mov&); - std::vector bincodeSerialize() const; - static Mov bincodeDeserialize(std::vector); - }; - - struct Load { - Circuit::RegisterIndex destination; - Circuit::RegisterIndex source_pointer; - - friend bool operator==(const Load&, const Load&); - std::vector bincodeSerialize() const; - static Load bincodeDeserialize(std::vector); - }; + struct BrilligInputs { - struct Store { - Circuit::RegisterIndex destination_pointer; - Circuit::RegisterIndex source; + struct Single { + Circuit::Expression value; - friend bool operator==(const Store&, const Store&); + friend bool operator==(const Single&, const Single&); std::vector bincodeSerialize() const; - static Store bincodeDeserialize(std::vector); + static Single bincodeDeserialize(std::vector); }; - struct BlackBox { - Circuit::BlackBoxOp value; + struct Array { + std::vector value; - friend bool operator==(const BlackBox&, const BlackBox&); + friend bool operator==(const Array&, const Array&); std::vector bincodeSerialize() const; - static BlackBox bincodeDeserialize(std::vector); + static Array bincodeDeserialize(std::vector); }; - struct Trap { - friend bool operator==(const Trap&, const Trap&); - std::vector bincodeSerialize() const; - static Trap bincodeDeserialize(std::vector); - }; + struct MemoryArray { + Circuit::BlockId value; - struct Stop { - friend bool operator==(const Stop&, const Stop&); + friend bool operator==(const MemoryArray&, const MemoryArray&); std::vector bincodeSerialize() const; - static Stop bincodeDeserialize(std::vector); + static MemoryArray bincodeDeserialize(std::vector); }; - std::variant value; + std::variant value; - friend bool operator==(const BrilligOpcode&, const BrilligOpcode&); + friend bool operator==(const BrilligInputs&, const BrilligInputs&); std::vector bincodeSerialize() const; - static BrilligOpcode bincodeDeserialize(std::vector); + static BrilligInputs bincodeDeserialize(std::vector); }; struct BrilligOutputs { @@ -915,18 +983,7 @@ namespace Circuit { static ToLeRadix bincodeDeserialize(std::vector); }; - struct PermutationSort { - std::vector> inputs; - uint32_t tuple; - std::vector bits; - std::vector sort_by; - - friend bool operator==(const PermutationSort&, const PermutationSort&); - std::vector bincodeSerialize() const; - static PermutationSort bincodeDeserialize(std::vector); - }; - - std::variant value; + std::variant value; friend bool operator==(const Directive&, const Directive&); std::vector bincodeSerialize() const; @@ -991,84 +1048,437 @@ namespace Circuit { Circuit::BlockId block_id; std::vector init; - friend bool operator==(const MemoryInit&, const MemoryInit&); - std::vector bincodeSerialize() const; - static MemoryInit bincodeDeserialize(std::vector); - }; + friend bool operator==(const MemoryInit&, const MemoryInit&); + std::vector bincodeSerialize() const; + static MemoryInit bincodeDeserialize(std::vector); + }; + + std::variant value; + + friend bool operator==(const Opcode&, const Opcode&); + std::vector bincodeSerialize() const; + static Opcode bincodeDeserialize(std::vector); + }; + + struct ExpressionWidth { + + struct Unbounded { + friend bool operator==(const Unbounded&, const Unbounded&); + std::vector bincodeSerialize() const; + static Unbounded bincodeDeserialize(std::vector); + }; + + struct Bounded { + uint64_t width; + + friend bool operator==(const Bounded&, const Bounded&); + std::vector bincodeSerialize() const; + static Bounded bincodeDeserialize(std::vector); + }; + + std::variant value; + + friend bool operator==(const ExpressionWidth&, const ExpressionWidth&); + std::vector bincodeSerialize() const; + static ExpressionWidth bincodeDeserialize(std::vector); + }; + + struct OpcodeLocation { + + struct Acir { + uint64_t value; + + friend bool operator==(const Acir&, const Acir&); + std::vector bincodeSerialize() const; + static Acir bincodeDeserialize(std::vector); + }; + + struct Brillig { + uint64_t acir_index; + uint64_t brillig_index; + + friend bool operator==(const Brillig&, const Brillig&); + std::vector bincodeSerialize() const; + static Brillig bincodeDeserialize(std::vector); + }; + + std::variant value; + + friend bool operator==(const OpcodeLocation&, const OpcodeLocation&); + std::vector bincodeSerialize() const; + static OpcodeLocation bincodeDeserialize(std::vector); + }; + + struct PublicInputs { + std::vector value; + + friend bool operator==(const PublicInputs&, const PublicInputs&); + std::vector bincodeSerialize() const; + static PublicInputs bincodeDeserialize(std::vector); + }; + + struct Circuit { + uint32_t current_witness_index; + std::vector opcodes; + Circuit::ExpressionWidth expression_width; + std::vector private_parameters; + Circuit::PublicInputs public_parameters; + Circuit::PublicInputs return_values; + std::vector> assert_messages; + bool recursive; + + friend bool operator==(const Circuit&, const Circuit&); + std::vector bincodeSerialize() const; + static Circuit bincodeDeserialize(std::vector); + }; + +} // end of namespace Circuit + + +namespace Circuit { + + inline bool operator==(const BinaryFieldOp &lhs, const BinaryFieldOp &rhs) { + if (!(lhs.value == rhs.value)) { return false; } + return true; + } + + inline std::vector BinaryFieldOp::bincodeSerialize() const { + auto serializer = serde::BincodeSerializer(); + serde::Serializable::serialize(*this, serializer); + return std::move(serializer).bytes(); + } + + inline BinaryFieldOp BinaryFieldOp::bincodeDeserialize(std::vector input) { + auto deserializer = serde::BincodeDeserializer(input); + auto value = serde::Deserializable::deserialize(deserializer); + if (deserializer.get_buffer_offset() < input.size()) { + throw serde::deserialization_error("Some input bytes were not read"); + } + return value; + } + +} // end of namespace Circuit + +template <> +template +void serde::Serializable::serialize(const Circuit::BinaryFieldOp &obj, Serializer &serializer) { + serializer.increase_container_depth(); + serde::Serializable::serialize(obj.value, serializer); + serializer.decrease_container_depth(); +} + +template <> +template +Circuit::BinaryFieldOp serde::Deserializable::deserialize(Deserializer &deserializer) { + deserializer.increase_container_depth(); + Circuit::BinaryFieldOp obj; + obj.value = serde::Deserializable::deserialize(deserializer); + deserializer.decrease_container_depth(); + return obj; +} + +namespace Circuit { + + inline bool operator==(const BinaryFieldOp::Add &lhs, const BinaryFieldOp::Add &rhs) { + return true; + } + + inline std::vector BinaryFieldOp::Add::bincodeSerialize() const { + auto serializer = serde::BincodeSerializer(); + serde::Serializable::serialize(*this, serializer); + return std::move(serializer).bytes(); + } + + inline BinaryFieldOp::Add BinaryFieldOp::Add::bincodeDeserialize(std::vector input) { + auto deserializer = serde::BincodeDeserializer(input); + auto value = serde::Deserializable::deserialize(deserializer); + if (deserializer.get_buffer_offset() < input.size()) { + throw serde::deserialization_error("Some input bytes were not read"); + } + return value; + } + +} // end of namespace Circuit + +template <> +template +void serde::Serializable::serialize(const Circuit::BinaryFieldOp::Add &obj, Serializer &serializer) { +} + +template <> +template +Circuit::BinaryFieldOp::Add serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BinaryFieldOp::Add obj; + return obj; +} + +namespace Circuit { + + inline bool operator==(const BinaryFieldOp::Sub &lhs, const BinaryFieldOp::Sub &rhs) { + return true; + } + + inline std::vector BinaryFieldOp::Sub::bincodeSerialize() const { + auto serializer = serde::BincodeSerializer(); + serde::Serializable::serialize(*this, serializer); + return std::move(serializer).bytes(); + } + + inline BinaryFieldOp::Sub BinaryFieldOp::Sub::bincodeDeserialize(std::vector input) { + auto deserializer = serde::BincodeDeserializer(input); + auto value = serde::Deserializable::deserialize(deserializer); + if (deserializer.get_buffer_offset() < input.size()) { + throw serde::deserialization_error("Some input bytes were not read"); + } + return value; + } + +} // end of namespace Circuit + +template <> +template +void serde::Serializable::serialize(const Circuit::BinaryFieldOp::Sub &obj, Serializer &serializer) { +} + +template <> +template +Circuit::BinaryFieldOp::Sub serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BinaryFieldOp::Sub obj; + return obj; +} + +namespace Circuit { + + inline bool operator==(const BinaryFieldOp::Mul &lhs, const BinaryFieldOp::Mul &rhs) { + return true; + } + + inline std::vector BinaryFieldOp::Mul::bincodeSerialize() const { + auto serializer = serde::BincodeSerializer(); + serde::Serializable::serialize(*this, serializer); + return std::move(serializer).bytes(); + } + + inline BinaryFieldOp::Mul BinaryFieldOp::Mul::bincodeDeserialize(std::vector input) { + auto deserializer = serde::BincodeDeserializer(input); + auto value = serde::Deserializable::deserialize(deserializer); + if (deserializer.get_buffer_offset() < input.size()) { + throw serde::deserialization_error("Some input bytes were not read"); + } + return value; + } + +} // end of namespace Circuit + +template <> +template +void serde::Serializable::serialize(const Circuit::BinaryFieldOp::Mul &obj, Serializer &serializer) { +} + +template <> +template +Circuit::BinaryFieldOp::Mul serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BinaryFieldOp::Mul obj; + return obj; +} + +namespace Circuit { + + inline bool operator==(const BinaryFieldOp::Div &lhs, const BinaryFieldOp::Div &rhs) { + return true; + } + + inline std::vector BinaryFieldOp::Div::bincodeSerialize() const { + auto serializer = serde::BincodeSerializer(); + serde::Serializable::serialize(*this, serializer); + return std::move(serializer).bytes(); + } + + inline BinaryFieldOp::Div BinaryFieldOp::Div::bincodeDeserialize(std::vector input) { + auto deserializer = serde::BincodeDeserializer(input); + auto value = serde::Deserializable::deserialize(deserializer); + if (deserializer.get_buffer_offset() < input.size()) { + throw serde::deserialization_error("Some input bytes were not read"); + } + return value; + } + +} // end of namespace Circuit + +template <> +template +void serde::Serializable::serialize(const Circuit::BinaryFieldOp::Div &obj, Serializer &serializer) { +} + +template <> +template +Circuit::BinaryFieldOp::Div serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BinaryFieldOp::Div obj; + return obj; +} + +namespace Circuit { + + inline bool operator==(const BinaryFieldOp::Equals &lhs, const BinaryFieldOp::Equals &rhs) { + return true; + } + + inline std::vector BinaryFieldOp::Equals::bincodeSerialize() const { + auto serializer = serde::BincodeSerializer(); + serde::Serializable::serialize(*this, serializer); + return std::move(serializer).bytes(); + } + + inline BinaryFieldOp::Equals BinaryFieldOp::Equals::bincodeDeserialize(std::vector input) { + auto deserializer = serde::BincodeDeserializer(input); + auto value = serde::Deserializable::deserialize(deserializer); + if (deserializer.get_buffer_offset() < input.size()) { + throw serde::deserialization_error("Some input bytes were not read"); + } + return value; + } + +} // end of namespace Circuit + +template <> +template +void serde::Serializable::serialize(const Circuit::BinaryFieldOp::Equals &obj, Serializer &serializer) { +} + +template <> +template +Circuit::BinaryFieldOp::Equals serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BinaryFieldOp::Equals obj; + return obj; +} + +namespace Circuit { + + inline bool operator==(const BinaryIntOp &lhs, const BinaryIntOp &rhs) { + if (!(lhs.value == rhs.value)) { return false; } + return true; + } + + inline std::vector BinaryIntOp::bincodeSerialize() const { + auto serializer = serde::BincodeSerializer(); + serde::Serializable::serialize(*this, serializer); + return std::move(serializer).bytes(); + } + + inline BinaryIntOp BinaryIntOp::bincodeDeserialize(std::vector input) { + auto deserializer = serde::BincodeDeserializer(input); + auto value = serde::Deserializable::deserialize(deserializer); + if (deserializer.get_buffer_offset() < input.size()) { + throw serde::deserialization_error("Some input bytes were not read"); + } + return value; + } - std::variant value; +} // end of namespace Circuit - friend bool operator==(const Opcode&, const Opcode&); - std::vector bincodeSerialize() const; - static Opcode bincodeDeserialize(std::vector); - }; +template <> +template +void serde::Serializable::serialize(const Circuit::BinaryIntOp &obj, Serializer &serializer) { + serializer.increase_container_depth(); + serde::Serializable::serialize(obj.value, serializer); + serializer.decrease_container_depth(); +} - struct OpcodeLocation { +template <> +template +Circuit::BinaryIntOp serde::Deserializable::deserialize(Deserializer &deserializer) { + deserializer.increase_container_depth(); + Circuit::BinaryIntOp obj; + obj.value = serde::Deserializable::deserialize(deserializer); + deserializer.decrease_container_depth(); + return obj; +} - struct Acir { - uint64_t value; +namespace Circuit { - friend bool operator==(const Acir&, const Acir&); - std::vector bincodeSerialize() const; - static Acir bincodeDeserialize(std::vector); - }; + inline bool operator==(const BinaryIntOp::Add &lhs, const BinaryIntOp::Add &rhs) { + return true; + } - struct Brillig { - uint64_t acir_index; - uint64_t brillig_index; + inline std::vector BinaryIntOp::Add::bincodeSerialize() const { + auto serializer = serde::BincodeSerializer(); + serde::Serializable::serialize(*this, serializer); + return std::move(serializer).bytes(); + } - friend bool operator==(const Brillig&, const Brillig&); - std::vector bincodeSerialize() const; - static Brillig bincodeDeserialize(std::vector); - }; + inline BinaryIntOp::Add BinaryIntOp::Add::bincodeDeserialize(std::vector input) { + auto deserializer = serde::BincodeDeserializer(input); + auto value = serde::Deserializable::deserialize(deserializer); + if (deserializer.get_buffer_offset() < input.size()) { + throw serde::deserialization_error("Some input bytes were not read"); + } + return value; + } - std::variant value; +} // end of namespace Circuit - friend bool operator==(const OpcodeLocation&, const OpcodeLocation&); - std::vector bincodeSerialize() const; - static OpcodeLocation bincodeDeserialize(std::vector); - }; +template <> +template +void serde::Serializable::serialize(const Circuit::BinaryIntOp::Add &obj, Serializer &serializer) { +} - struct PublicInputs { - std::vector value; +template <> +template +Circuit::BinaryIntOp::Add serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BinaryIntOp::Add obj; + return obj; +} - friend bool operator==(const PublicInputs&, const PublicInputs&); - std::vector bincodeSerialize() const; - static PublicInputs bincodeDeserialize(std::vector); - }; +namespace Circuit { - struct Circuit { - uint32_t current_witness_index; - std::vector opcodes; - std::vector private_parameters; - Circuit::PublicInputs public_parameters; - Circuit::PublicInputs return_values; - std::vector> assert_messages; + inline bool operator==(const BinaryIntOp::Sub &lhs, const BinaryIntOp::Sub &rhs) { + return true; + } - friend bool operator==(const Circuit&, const Circuit&); - std::vector bincodeSerialize() const; - static Circuit bincodeDeserialize(std::vector); - }; + inline std::vector BinaryIntOp::Sub::bincodeSerialize() const { + auto serializer = serde::BincodeSerializer(); + serde::Serializable::serialize(*this, serializer); + return std::move(serializer).bytes(); + } + + inline BinaryIntOp::Sub BinaryIntOp::Sub::bincodeDeserialize(std::vector input) { + auto deserializer = serde::BincodeDeserializer(input); + auto value = serde::Deserializable::deserialize(deserializer); + if (deserializer.get_buffer_offset() < input.size()) { + throw serde::deserialization_error("Some input bytes were not read"); + } + return value; + } } // end of namespace Circuit +template <> +template +void serde::Serializable::serialize(const Circuit::BinaryIntOp::Sub &obj, Serializer &serializer) { +} + +template <> +template +Circuit::BinaryIntOp::Sub serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BinaryIntOp::Sub obj; + return obj; +} namespace Circuit { - inline bool operator==(const BinaryFieldOp &lhs, const BinaryFieldOp &rhs) { - if (!(lhs.value == rhs.value)) { return false; } + inline bool operator==(const BinaryIntOp::Mul &lhs, const BinaryIntOp::Mul &rhs) { return true; } - inline std::vector BinaryFieldOp::bincodeSerialize() const { + inline std::vector BinaryIntOp::Mul::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BinaryFieldOp BinaryFieldOp::bincodeDeserialize(std::vector input) { + inline BinaryIntOp::Mul BinaryIntOp::Mul::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -1079,37 +1489,31 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BinaryFieldOp &obj, Serializer &serializer) { - serializer.increase_container_depth(); - serde::Serializable::serialize(obj.value, serializer); - serializer.decrease_container_depth(); +void serde::Serializable::serialize(const Circuit::BinaryIntOp::Mul &obj, Serializer &serializer) { } template <> template -Circuit::BinaryFieldOp serde::Deserializable::deserialize(Deserializer &deserializer) { - deserializer.increase_container_depth(); - Circuit::BinaryFieldOp obj; - obj.value = serde::Deserializable::deserialize(deserializer); - deserializer.decrease_container_depth(); +Circuit::BinaryIntOp::Mul serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BinaryIntOp::Mul obj; return obj; } namespace Circuit { - inline bool operator==(const BinaryFieldOp::Add &lhs, const BinaryFieldOp::Add &rhs) { + inline bool operator==(const BinaryIntOp::SignedDiv &lhs, const BinaryIntOp::SignedDiv &rhs) { return true; } - inline std::vector BinaryFieldOp::Add::bincodeSerialize() const { + inline std::vector BinaryIntOp::SignedDiv::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BinaryFieldOp::Add BinaryFieldOp::Add::bincodeDeserialize(std::vector input) { + inline BinaryIntOp::SignedDiv BinaryIntOp::SignedDiv::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -1120,31 +1524,31 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BinaryFieldOp::Add &obj, Serializer &serializer) { +void serde::Serializable::serialize(const Circuit::BinaryIntOp::SignedDiv &obj, Serializer &serializer) { } template <> template -Circuit::BinaryFieldOp::Add serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BinaryFieldOp::Add obj; +Circuit::BinaryIntOp::SignedDiv serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BinaryIntOp::SignedDiv obj; return obj; } namespace Circuit { - inline bool operator==(const BinaryFieldOp::Sub &lhs, const BinaryFieldOp::Sub &rhs) { + inline bool operator==(const BinaryIntOp::UnsignedDiv &lhs, const BinaryIntOp::UnsignedDiv &rhs) { return true; } - inline std::vector BinaryFieldOp::Sub::bincodeSerialize() const { + inline std::vector BinaryIntOp::UnsignedDiv::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BinaryFieldOp::Sub BinaryFieldOp::Sub::bincodeDeserialize(std::vector input) { + inline BinaryIntOp::UnsignedDiv BinaryIntOp::UnsignedDiv::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -1155,31 +1559,31 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BinaryFieldOp::Sub &obj, Serializer &serializer) { +void serde::Serializable::serialize(const Circuit::BinaryIntOp::UnsignedDiv &obj, Serializer &serializer) { } template <> template -Circuit::BinaryFieldOp::Sub serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BinaryFieldOp::Sub obj; +Circuit::BinaryIntOp::UnsignedDiv serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BinaryIntOp::UnsignedDiv obj; return obj; } namespace Circuit { - inline bool operator==(const BinaryFieldOp::Mul &lhs, const BinaryFieldOp::Mul &rhs) { + inline bool operator==(const BinaryIntOp::Equals &lhs, const BinaryIntOp::Equals &rhs) { return true; } - inline std::vector BinaryFieldOp::Mul::bincodeSerialize() const { + inline std::vector BinaryIntOp::Equals::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BinaryFieldOp::Mul BinaryFieldOp::Mul::bincodeDeserialize(std::vector input) { + inline BinaryIntOp::Equals BinaryIntOp::Equals::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -1190,31 +1594,31 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BinaryFieldOp::Mul &obj, Serializer &serializer) { +void serde::Serializable::serialize(const Circuit::BinaryIntOp::Equals &obj, Serializer &serializer) { } template <> template -Circuit::BinaryFieldOp::Mul serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BinaryFieldOp::Mul obj; +Circuit::BinaryIntOp::Equals serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BinaryIntOp::Equals obj; return obj; } namespace Circuit { - inline bool operator==(const BinaryFieldOp::Div &lhs, const BinaryFieldOp::Div &rhs) { + inline bool operator==(const BinaryIntOp::LessThan &lhs, const BinaryIntOp::LessThan &rhs) { return true; } - inline std::vector BinaryFieldOp::Div::bincodeSerialize() const { + inline std::vector BinaryIntOp::LessThan::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BinaryFieldOp::Div BinaryFieldOp::Div::bincodeDeserialize(std::vector input) { + inline BinaryIntOp::LessThan BinaryIntOp::LessThan::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -1225,31 +1629,31 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BinaryFieldOp::Div &obj, Serializer &serializer) { +void serde::Serializable::serialize(const Circuit::BinaryIntOp::LessThan &obj, Serializer &serializer) { } template <> template -Circuit::BinaryFieldOp::Div serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BinaryFieldOp::Div obj; +Circuit::BinaryIntOp::LessThan serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BinaryIntOp::LessThan obj; return obj; } namespace Circuit { - inline bool operator==(const BinaryFieldOp::Equals &lhs, const BinaryFieldOp::Equals &rhs) { + inline bool operator==(const BinaryIntOp::LessThanEquals &lhs, const BinaryIntOp::LessThanEquals &rhs) { return true; } - inline std::vector BinaryFieldOp::Equals::bincodeSerialize() const { + inline std::vector BinaryIntOp::LessThanEquals::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BinaryFieldOp::Equals BinaryFieldOp::Equals::bincodeDeserialize(std::vector input) { + inline BinaryIntOp::LessThanEquals BinaryIntOp::LessThanEquals::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -1260,32 +1664,31 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BinaryFieldOp::Equals &obj, Serializer &serializer) { +void serde::Serializable::serialize(const Circuit::BinaryIntOp::LessThanEquals &obj, Serializer &serializer) { } template <> template -Circuit::BinaryFieldOp::Equals serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BinaryFieldOp::Equals obj; +Circuit::BinaryIntOp::LessThanEquals serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BinaryIntOp::LessThanEquals obj; return obj; } namespace Circuit { - inline bool operator==(const BinaryIntOp &lhs, const BinaryIntOp &rhs) { - if (!(lhs.value == rhs.value)) { return false; } + inline bool operator==(const BinaryIntOp::And &lhs, const BinaryIntOp::And &rhs) { return true; } - inline std::vector BinaryIntOp::bincodeSerialize() const { + inline std::vector BinaryIntOp::And::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BinaryIntOp BinaryIntOp::bincodeDeserialize(std::vector input) { + inline BinaryIntOp::And BinaryIntOp::And::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -1296,37 +1699,31 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BinaryIntOp &obj, Serializer &serializer) { - serializer.increase_container_depth(); - serde::Serializable::serialize(obj.value, serializer); - serializer.decrease_container_depth(); +void serde::Serializable::serialize(const Circuit::BinaryIntOp::And &obj, Serializer &serializer) { } template <> template -Circuit::BinaryIntOp serde::Deserializable::deserialize(Deserializer &deserializer) { - deserializer.increase_container_depth(); - Circuit::BinaryIntOp obj; - obj.value = serde::Deserializable::deserialize(deserializer); - deserializer.decrease_container_depth(); +Circuit::BinaryIntOp::And serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BinaryIntOp::And obj; return obj; } namespace Circuit { - inline bool operator==(const BinaryIntOp::Add &lhs, const BinaryIntOp::Add &rhs) { + inline bool operator==(const BinaryIntOp::Or &lhs, const BinaryIntOp::Or &rhs) { return true; } - inline std::vector BinaryIntOp::Add::bincodeSerialize() const { + inline std::vector BinaryIntOp::Or::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BinaryIntOp::Add BinaryIntOp::Add::bincodeDeserialize(std::vector input) { + inline BinaryIntOp::Or BinaryIntOp::Or::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -1337,31 +1734,31 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BinaryIntOp::Add &obj, Serializer &serializer) { +void serde::Serializable::serialize(const Circuit::BinaryIntOp::Or &obj, Serializer &serializer) { } template <> template -Circuit::BinaryIntOp::Add serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BinaryIntOp::Add obj; +Circuit::BinaryIntOp::Or serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BinaryIntOp::Or obj; return obj; } namespace Circuit { - inline bool operator==(const BinaryIntOp::Sub &lhs, const BinaryIntOp::Sub &rhs) { + inline bool operator==(const BinaryIntOp::Xor &lhs, const BinaryIntOp::Xor &rhs) { return true; } - inline std::vector BinaryIntOp::Sub::bincodeSerialize() const { + inline std::vector BinaryIntOp::Xor::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BinaryIntOp::Sub BinaryIntOp::Sub::bincodeDeserialize(std::vector input) { + inline BinaryIntOp::Xor BinaryIntOp::Xor::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -1372,31 +1769,31 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BinaryIntOp::Sub &obj, Serializer &serializer) { +void serde::Serializable::serialize(const Circuit::BinaryIntOp::Xor &obj, Serializer &serializer) { } template <> template -Circuit::BinaryIntOp::Sub serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BinaryIntOp::Sub obj; +Circuit::BinaryIntOp::Xor serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BinaryIntOp::Xor obj; return obj; } namespace Circuit { - inline bool operator==(const BinaryIntOp::Mul &lhs, const BinaryIntOp::Mul &rhs) { + inline bool operator==(const BinaryIntOp::Shl &lhs, const BinaryIntOp::Shl &rhs) { return true; } - inline std::vector BinaryIntOp::Mul::bincodeSerialize() const { + inline std::vector BinaryIntOp::Shl::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BinaryIntOp::Mul BinaryIntOp::Mul::bincodeDeserialize(std::vector input) { + inline BinaryIntOp::Shl BinaryIntOp::Shl::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -1407,31 +1804,31 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BinaryIntOp::Mul &obj, Serializer &serializer) { +void serde::Serializable::serialize(const Circuit::BinaryIntOp::Shl &obj, Serializer &serializer) { } template <> template -Circuit::BinaryIntOp::Mul serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BinaryIntOp::Mul obj; +Circuit::BinaryIntOp::Shl serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BinaryIntOp::Shl obj; return obj; } namespace Circuit { - inline bool operator==(const BinaryIntOp::SignedDiv &lhs, const BinaryIntOp::SignedDiv &rhs) { + inline bool operator==(const BinaryIntOp::Shr &lhs, const BinaryIntOp::Shr &rhs) { return true; } - inline std::vector BinaryIntOp::SignedDiv::bincodeSerialize() const { + inline std::vector BinaryIntOp::Shr::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BinaryIntOp::SignedDiv BinaryIntOp::SignedDiv::bincodeDeserialize(std::vector input) { + inline BinaryIntOp::Shr BinaryIntOp::Shr::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -1442,31 +1839,32 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BinaryIntOp::SignedDiv &obj, Serializer &serializer) { +void serde::Serializable::serialize(const Circuit::BinaryIntOp::Shr &obj, Serializer &serializer) { } template <> template -Circuit::BinaryIntOp::SignedDiv serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BinaryIntOp::SignedDiv obj; +Circuit::BinaryIntOp::Shr serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BinaryIntOp::Shr obj; return obj; } namespace Circuit { - inline bool operator==(const BinaryIntOp::UnsignedDiv &lhs, const BinaryIntOp::UnsignedDiv &rhs) { + inline bool operator==(const BlackBoxFuncCall &lhs, const BlackBoxFuncCall &rhs) { + if (!(lhs.value == rhs.value)) { return false; } return true; } - inline std::vector BinaryIntOp::UnsignedDiv::bincodeSerialize() const { + inline std::vector BlackBoxFuncCall::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BinaryIntOp::UnsignedDiv BinaryIntOp::UnsignedDiv::bincodeDeserialize(std::vector input) { + inline BlackBoxFuncCall BlackBoxFuncCall::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -1477,31 +1875,40 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BinaryIntOp::UnsignedDiv &obj, Serializer &serializer) { +void serde::Serializable::serialize(const Circuit::BlackBoxFuncCall &obj, Serializer &serializer) { + serializer.increase_container_depth(); + serde::Serializable::serialize(obj.value, serializer); + serializer.decrease_container_depth(); } template <> template -Circuit::BinaryIntOp::UnsignedDiv serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BinaryIntOp::UnsignedDiv obj; +Circuit::BlackBoxFuncCall serde::Deserializable::deserialize(Deserializer &deserializer) { + deserializer.increase_container_depth(); + Circuit::BlackBoxFuncCall obj; + obj.value = serde::Deserializable::deserialize(deserializer); + deserializer.decrease_container_depth(); return obj; } namespace Circuit { - inline bool operator==(const BinaryIntOp::Equals &lhs, const BinaryIntOp::Equals &rhs) { + inline bool operator==(const BlackBoxFuncCall::AND &lhs, const BlackBoxFuncCall::AND &rhs) { + if (!(lhs.lhs == rhs.lhs)) { return false; } + if (!(lhs.rhs == rhs.rhs)) { return false; } + if (!(lhs.output == rhs.output)) { return false; } return true; } - inline std::vector BinaryIntOp::Equals::bincodeSerialize() const { + inline std::vector BlackBoxFuncCall::AND::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BinaryIntOp::Equals BinaryIntOp::Equals::bincodeDeserialize(std::vector input) { + inline BlackBoxFuncCall::AND BlackBoxFuncCall::AND::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -1512,31 +1919,40 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BinaryIntOp::Equals &obj, Serializer &serializer) { +void serde::Serializable::serialize(const Circuit::BlackBoxFuncCall::AND &obj, Serializer &serializer) { + serde::Serializable::serialize(obj.lhs, serializer); + serde::Serializable::serialize(obj.rhs, serializer); + serde::Serializable::serialize(obj.output, serializer); } template <> template -Circuit::BinaryIntOp::Equals serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BinaryIntOp::Equals obj; +Circuit::BlackBoxFuncCall::AND serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BlackBoxFuncCall::AND obj; + obj.lhs = serde::Deserializable::deserialize(deserializer); + obj.rhs = serde::Deserializable::deserialize(deserializer); + obj.output = serde::Deserializable::deserialize(deserializer); return obj; } namespace Circuit { - inline bool operator==(const BinaryIntOp::LessThan &lhs, const BinaryIntOp::LessThan &rhs) { + inline bool operator==(const BlackBoxFuncCall::XOR &lhs, const BlackBoxFuncCall::XOR &rhs) { + if (!(lhs.lhs == rhs.lhs)) { return false; } + if (!(lhs.rhs == rhs.rhs)) { return false; } + if (!(lhs.output == rhs.output)) { return false; } return true; } - inline std::vector BinaryIntOp::LessThan::bincodeSerialize() const { + inline std::vector BlackBoxFuncCall::XOR::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BinaryIntOp::LessThan BinaryIntOp::LessThan::bincodeDeserialize(std::vector input) { + inline BlackBoxFuncCall::XOR BlackBoxFuncCall::XOR::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -1547,31 +1963,38 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BinaryIntOp::LessThan &obj, Serializer &serializer) { +void serde::Serializable::serialize(const Circuit::BlackBoxFuncCall::XOR &obj, Serializer &serializer) { + serde::Serializable::serialize(obj.lhs, serializer); + serde::Serializable::serialize(obj.rhs, serializer); + serde::Serializable::serialize(obj.output, serializer); } template <> template -Circuit::BinaryIntOp::LessThan serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BinaryIntOp::LessThan obj; +Circuit::BlackBoxFuncCall::XOR serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BlackBoxFuncCall::XOR obj; + obj.lhs = serde::Deserializable::deserialize(deserializer); + obj.rhs = serde::Deserializable::deserialize(deserializer); + obj.output = serde::Deserializable::deserialize(deserializer); return obj; } namespace Circuit { - inline bool operator==(const BinaryIntOp::LessThanEquals &lhs, const BinaryIntOp::LessThanEquals &rhs) { + inline bool operator==(const BlackBoxFuncCall::RANGE &lhs, const BlackBoxFuncCall::RANGE &rhs) { + if (!(lhs.input == rhs.input)) { return false; } return true; } - inline std::vector BinaryIntOp::LessThanEquals::bincodeSerialize() const { + inline std::vector BlackBoxFuncCall::RANGE::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BinaryIntOp::LessThanEquals BinaryIntOp::LessThanEquals::bincodeDeserialize(std::vector input) { + inline BlackBoxFuncCall::RANGE BlackBoxFuncCall::RANGE::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -1582,31 +2005,35 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BinaryIntOp::LessThanEquals &obj, Serializer &serializer) { +void serde::Serializable::serialize(const Circuit::BlackBoxFuncCall::RANGE &obj, Serializer &serializer) { + serde::Serializable::serialize(obj.input, serializer); } template <> template -Circuit::BinaryIntOp::LessThanEquals serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BinaryIntOp::LessThanEquals obj; +Circuit::BlackBoxFuncCall::RANGE serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BlackBoxFuncCall::RANGE obj; + obj.input = serde::Deserializable::deserialize(deserializer); return obj; } namespace Circuit { - inline bool operator==(const BinaryIntOp::And &lhs, const BinaryIntOp::And &rhs) { + inline bool operator==(const BlackBoxFuncCall::SHA256 &lhs, const BlackBoxFuncCall::SHA256 &rhs) { + if (!(lhs.inputs == rhs.inputs)) { return false; } + if (!(lhs.outputs == rhs.outputs)) { return false; } return true; } - inline std::vector BinaryIntOp::And::bincodeSerialize() const { + inline std::vector BlackBoxFuncCall::SHA256::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BinaryIntOp::And BinaryIntOp::And::bincodeDeserialize(std::vector input) { + inline BlackBoxFuncCall::SHA256 BlackBoxFuncCall::SHA256::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -1617,31 +2044,37 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BinaryIntOp::And &obj, Serializer &serializer) { +void serde::Serializable::serialize(const Circuit::BlackBoxFuncCall::SHA256 &obj, Serializer &serializer) { + serde::Serializable::serialize(obj.inputs, serializer); + serde::Serializable::serialize(obj.outputs, serializer); } template <> template -Circuit::BinaryIntOp::And serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BinaryIntOp::And obj; +Circuit::BlackBoxFuncCall::SHA256 serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BlackBoxFuncCall::SHA256 obj; + obj.inputs = serde::Deserializable::deserialize(deserializer); + obj.outputs = serde::Deserializable::deserialize(deserializer); return obj; } namespace Circuit { - inline bool operator==(const BinaryIntOp::Or &lhs, const BinaryIntOp::Or &rhs) { + inline bool operator==(const BlackBoxFuncCall::Blake2s &lhs, const BlackBoxFuncCall::Blake2s &rhs) { + if (!(lhs.inputs == rhs.inputs)) { return false; } + if (!(lhs.outputs == rhs.outputs)) { return false; } return true; } - inline std::vector BinaryIntOp::Or::bincodeSerialize() const { + inline std::vector BlackBoxFuncCall::Blake2s::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BinaryIntOp::Or BinaryIntOp::Or::bincodeDeserialize(std::vector input) { + inline BlackBoxFuncCall::Blake2s BlackBoxFuncCall::Blake2s::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -1652,31 +2085,37 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BinaryIntOp::Or &obj, Serializer &serializer) { +void serde::Serializable::serialize(const Circuit::BlackBoxFuncCall::Blake2s &obj, Serializer &serializer) { + serde::Serializable::serialize(obj.inputs, serializer); + serde::Serializable::serialize(obj.outputs, serializer); } template <> template -Circuit::BinaryIntOp::Or serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BinaryIntOp::Or obj; +Circuit::BlackBoxFuncCall::Blake2s serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BlackBoxFuncCall::Blake2s obj; + obj.inputs = serde::Deserializable::deserialize(deserializer); + obj.outputs = serde::Deserializable::deserialize(deserializer); return obj; } namespace Circuit { - inline bool operator==(const BinaryIntOp::Xor &lhs, const BinaryIntOp::Xor &rhs) { + inline bool operator==(const BlackBoxFuncCall::Blake3 &lhs, const BlackBoxFuncCall::Blake3 &rhs) { + if (!(lhs.inputs == rhs.inputs)) { return false; } + if (!(lhs.outputs == rhs.outputs)) { return false; } return true; } - inline std::vector BinaryIntOp::Xor::bincodeSerialize() const { + inline std::vector BlackBoxFuncCall::Blake3::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BinaryIntOp::Xor BinaryIntOp::Xor::bincodeDeserialize(std::vector input) { + inline BlackBoxFuncCall::Blake3 BlackBoxFuncCall::Blake3::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -1687,31 +2126,40 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BinaryIntOp::Xor &obj, Serializer &serializer) { +void serde::Serializable::serialize(const Circuit::BlackBoxFuncCall::Blake3 &obj, Serializer &serializer) { + serde::Serializable::serialize(obj.inputs, serializer); + serde::Serializable::serialize(obj.outputs, serializer); } template <> template -Circuit::BinaryIntOp::Xor serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BinaryIntOp::Xor obj; +Circuit::BlackBoxFuncCall::Blake3 serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BlackBoxFuncCall::Blake3 obj; + obj.inputs = serde::Deserializable::deserialize(deserializer); + obj.outputs = serde::Deserializable::deserialize(deserializer); return obj; } namespace Circuit { - inline bool operator==(const BinaryIntOp::Shl &lhs, const BinaryIntOp::Shl &rhs) { + inline bool operator==(const BlackBoxFuncCall::SchnorrVerify &lhs, const BlackBoxFuncCall::SchnorrVerify &rhs) { + if (!(lhs.public_key_x == rhs.public_key_x)) { return false; } + if (!(lhs.public_key_y == rhs.public_key_y)) { return false; } + if (!(lhs.signature == rhs.signature)) { return false; } + if (!(lhs.message == rhs.message)) { return false; } + if (!(lhs.output == rhs.output)) { return false; } return true; } - inline std::vector BinaryIntOp::Shl::bincodeSerialize() const { + inline std::vector BlackBoxFuncCall::SchnorrVerify::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BinaryIntOp::Shl BinaryIntOp::Shl::bincodeDeserialize(std::vector input) { + inline BlackBoxFuncCall::SchnorrVerify BlackBoxFuncCall::SchnorrVerify::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -1722,31 +2170,44 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BinaryIntOp::Shl &obj, Serializer &serializer) { +void serde::Serializable::serialize(const Circuit::BlackBoxFuncCall::SchnorrVerify &obj, Serializer &serializer) { + serde::Serializable::serialize(obj.public_key_x, serializer); + serde::Serializable::serialize(obj.public_key_y, serializer); + serde::Serializable::serialize(obj.signature, serializer); + serde::Serializable::serialize(obj.message, serializer); + serde::Serializable::serialize(obj.output, serializer); } template <> template -Circuit::BinaryIntOp::Shl serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BinaryIntOp::Shl obj; +Circuit::BlackBoxFuncCall::SchnorrVerify serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BlackBoxFuncCall::SchnorrVerify obj; + obj.public_key_x = serde::Deserializable::deserialize(deserializer); + obj.public_key_y = serde::Deserializable::deserialize(deserializer); + obj.signature = serde::Deserializable::deserialize(deserializer); + obj.message = serde::Deserializable::deserialize(deserializer); + obj.output = serde::Deserializable::deserialize(deserializer); return obj; } namespace Circuit { - inline bool operator==(const BinaryIntOp::Shr &lhs, const BinaryIntOp::Shr &rhs) { + inline bool operator==(const BlackBoxFuncCall::PedersenCommitment &lhs, const BlackBoxFuncCall::PedersenCommitment &rhs) { + if (!(lhs.inputs == rhs.inputs)) { return false; } + if (!(lhs.domain_separator == rhs.domain_separator)) { return false; } + if (!(lhs.outputs == rhs.outputs)) { return false; } return true; } - inline std::vector BinaryIntOp::Shr::bincodeSerialize() const { + inline std::vector BlackBoxFuncCall::PedersenCommitment::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BinaryIntOp::Shr BinaryIntOp::Shr::bincodeDeserialize(std::vector input) { + inline BlackBoxFuncCall::PedersenCommitment BlackBoxFuncCall::PedersenCommitment::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -1757,32 +2218,40 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BinaryIntOp::Shr &obj, Serializer &serializer) { +void serde::Serializable::serialize(const Circuit::BlackBoxFuncCall::PedersenCommitment &obj, Serializer &serializer) { + serde::Serializable::serialize(obj.inputs, serializer); + serde::Serializable::serialize(obj.domain_separator, serializer); + serde::Serializable::serialize(obj.outputs, serializer); } template <> template -Circuit::BinaryIntOp::Shr serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BinaryIntOp::Shr obj; +Circuit::BlackBoxFuncCall::PedersenCommitment serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BlackBoxFuncCall::PedersenCommitment obj; + obj.inputs = serde::Deserializable::deserialize(deserializer); + obj.domain_separator = serde::Deserializable::deserialize(deserializer); + obj.outputs = serde::Deserializable::deserialize(deserializer); return obj; } namespace Circuit { - inline bool operator==(const BlackBoxFuncCall &lhs, const BlackBoxFuncCall &rhs) { - if (!(lhs.value == rhs.value)) { return false; } + inline bool operator==(const BlackBoxFuncCall::PedersenHash &lhs, const BlackBoxFuncCall::PedersenHash &rhs) { + if (!(lhs.inputs == rhs.inputs)) { return false; } + if (!(lhs.domain_separator == rhs.domain_separator)) { return false; } + if (!(lhs.output == rhs.output)) { return false; } return true; } - inline std::vector BlackBoxFuncCall::bincodeSerialize() const { + inline std::vector BlackBoxFuncCall::PedersenHash::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BlackBoxFuncCall BlackBoxFuncCall::bincodeDeserialize(std::vector input) { + inline BlackBoxFuncCall::PedersenHash BlackBoxFuncCall::PedersenHash::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -1793,40 +2262,42 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BlackBoxFuncCall &obj, Serializer &serializer) { - serializer.increase_container_depth(); - serde::Serializable::serialize(obj.value, serializer); - serializer.decrease_container_depth(); +void serde::Serializable::serialize(const Circuit::BlackBoxFuncCall::PedersenHash &obj, Serializer &serializer) { + serde::Serializable::serialize(obj.inputs, serializer); + serde::Serializable::serialize(obj.domain_separator, serializer); + serde::Serializable::serialize(obj.output, serializer); } template <> template -Circuit::BlackBoxFuncCall serde::Deserializable::deserialize(Deserializer &deserializer) { - deserializer.increase_container_depth(); - Circuit::BlackBoxFuncCall obj; - obj.value = serde::Deserializable::deserialize(deserializer); - deserializer.decrease_container_depth(); +Circuit::BlackBoxFuncCall::PedersenHash serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BlackBoxFuncCall::PedersenHash obj; + obj.inputs = serde::Deserializable::deserialize(deserializer); + obj.domain_separator = serde::Deserializable::deserialize(deserializer); + obj.output = serde::Deserializable::deserialize(deserializer); return obj; } namespace Circuit { - inline bool operator==(const BlackBoxFuncCall::AND &lhs, const BlackBoxFuncCall::AND &rhs) { - if (!(lhs.lhs == rhs.lhs)) { return false; } - if (!(lhs.rhs == rhs.rhs)) { return false; } + inline bool operator==(const BlackBoxFuncCall::EcdsaSecp256k1 &lhs, const BlackBoxFuncCall::EcdsaSecp256k1 &rhs) { + if (!(lhs.public_key_x == rhs.public_key_x)) { return false; } + if (!(lhs.public_key_y == rhs.public_key_y)) { return false; } + if (!(lhs.signature == rhs.signature)) { return false; } + if (!(lhs.hashed_message == rhs.hashed_message)) { return false; } if (!(lhs.output == rhs.output)) { return false; } return true; } - inline std::vector BlackBoxFuncCall::AND::bincodeSerialize() const { + inline std::vector BlackBoxFuncCall::EcdsaSecp256k1::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BlackBoxFuncCall::AND BlackBoxFuncCall::AND::bincodeDeserialize(std::vector input) { + inline BlackBoxFuncCall::EcdsaSecp256k1 BlackBoxFuncCall::EcdsaSecp256k1::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -1837,40 +2308,46 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BlackBoxFuncCall::AND &obj, Serializer &serializer) { - serde::Serializable::serialize(obj.lhs, serializer); - serde::Serializable::serialize(obj.rhs, serializer); +void serde::Serializable::serialize(const Circuit::BlackBoxFuncCall::EcdsaSecp256k1 &obj, Serializer &serializer) { + serde::Serializable::serialize(obj.public_key_x, serializer); + serde::Serializable::serialize(obj.public_key_y, serializer); + serde::Serializable::serialize(obj.signature, serializer); + serde::Serializable::serialize(obj.hashed_message, serializer); serde::Serializable::serialize(obj.output, serializer); } template <> template -Circuit::BlackBoxFuncCall::AND serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BlackBoxFuncCall::AND obj; - obj.lhs = serde::Deserializable::deserialize(deserializer); - obj.rhs = serde::Deserializable::deserialize(deserializer); +Circuit::BlackBoxFuncCall::EcdsaSecp256k1 serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BlackBoxFuncCall::EcdsaSecp256k1 obj; + obj.public_key_x = serde::Deserializable::deserialize(deserializer); + obj.public_key_y = serde::Deserializable::deserialize(deserializer); + obj.signature = serde::Deserializable::deserialize(deserializer); + obj.hashed_message = serde::Deserializable::deserialize(deserializer); obj.output = serde::Deserializable::deserialize(deserializer); return obj; } namespace Circuit { - inline bool operator==(const BlackBoxFuncCall::XOR &lhs, const BlackBoxFuncCall::XOR &rhs) { - if (!(lhs.lhs == rhs.lhs)) { return false; } - if (!(lhs.rhs == rhs.rhs)) { return false; } + inline bool operator==(const BlackBoxFuncCall::EcdsaSecp256r1 &lhs, const BlackBoxFuncCall::EcdsaSecp256r1 &rhs) { + if (!(lhs.public_key_x == rhs.public_key_x)) { return false; } + if (!(lhs.public_key_y == rhs.public_key_y)) { return false; } + if (!(lhs.signature == rhs.signature)) { return false; } + if (!(lhs.hashed_message == rhs.hashed_message)) { return false; } if (!(lhs.output == rhs.output)) { return false; } return true; } - inline std::vector BlackBoxFuncCall::XOR::bincodeSerialize() const { + inline std::vector BlackBoxFuncCall::EcdsaSecp256r1::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BlackBoxFuncCall::XOR BlackBoxFuncCall::XOR::bincodeDeserialize(std::vector input) { + inline BlackBoxFuncCall::EcdsaSecp256r1 BlackBoxFuncCall::EcdsaSecp256r1::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -1881,38 +2358,44 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BlackBoxFuncCall::XOR &obj, Serializer &serializer) { - serde::Serializable::serialize(obj.lhs, serializer); - serde::Serializable::serialize(obj.rhs, serializer); +void serde::Serializable::serialize(const Circuit::BlackBoxFuncCall::EcdsaSecp256r1 &obj, Serializer &serializer) { + serde::Serializable::serialize(obj.public_key_x, serializer); + serde::Serializable::serialize(obj.public_key_y, serializer); + serde::Serializable::serialize(obj.signature, serializer); + serde::Serializable::serialize(obj.hashed_message, serializer); serde::Serializable::serialize(obj.output, serializer); } template <> template -Circuit::BlackBoxFuncCall::XOR serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BlackBoxFuncCall::XOR obj; - obj.lhs = serde::Deserializable::deserialize(deserializer); - obj.rhs = serde::Deserializable::deserialize(deserializer); +Circuit::BlackBoxFuncCall::EcdsaSecp256r1 serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BlackBoxFuncCall::EcdsaSecp256r1 obj; + obj.public_key_x = serde::Deserializable::deserialize(deserializer); + obj.public_key_y = serde::Deserializable::deserialize(deserializer); + obj.signature = serde::Deserializable::deserialize(deserializer); + obj.hashed_message = serde::Deserializable::deserialize(deserializer); obj.output = serde::Deserializable::deserialize(deserializer); return obj; } namespace Circuit { - inline bool operator==(const BlackBoxFuncCall::RANGE &lhs, const BlackBoxFuncCall::RANGE &rhs) { - if (!(lhs.input == rhs.input)) { return false; } + inline bool operator==(const BlackBoxFuncCall::FixedBaseScalarMul &lhs, const BlackBoxFuncCall::FixedBaseScalarMul &rhs) { + if (!(lhs.low == rhs.low)) { return false; } + if (!(lhs.high == rhs.high)) { return false; } + if (!(lhs.outputs == rhs.outputs)) { return false; } return true; } - inline std::vector BlackBoxFuncCall::RANGE::bincodeSerialize() const { + inline std::vector BlackBoxFuncCall::FixedBaseScalarMul::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BlackBoxFuncCall::RANGE BlackBoxFuncCall::RANGE::bincodeDeserialize(std::vector input) { + inline BlackBoxFuncCall::FixedBaseScalarMul BlackBoxFuncCall::FixedBaseScalarMul::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -1923,35 +2406,42 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BlackBoxFuncCall::RANGE &obj, Serializer &serializer) { - serde::Serializable::serialize(obj.input, serializer); +void serde::Serializable::serialize(const Circuit::BlackBoxFuncCall::FixedBaseScalarMul &obj, Serializer &serializer) { + serde::Serializable::serialize(obj.low, serializer); + serde::Serializable::serialize(obj.high, serializer); + serde::Serializable::serialize(obj.outputs, serializer); } template <> template -Circuit::BlackBoxFuncCall::RANGE serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BlackBoxFuncCall::RANGE obj; - obj.input = serde::Deserializable::deserialize(deserializer); +Circuit::BlackBoxFuncCall::FixedBaseScalarMul serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BlackBoxFuncCall::FixedBaseScalarMul obj; + obj.low = serde::Deserializable::deserialize(deserializer); + obj.high = serde::Deserializable::deserialize(deserializer); + obj.outputs = serde::Deserializable::deserialize(deserializer); return obj; } namespace Circuit { - inline bool operator==(const BlackBoxFuncCall::SHA256 &lhs, const BlackBoxFuncCall::SHA256 &rhs) { - if (!(lhs.inputs == rhs.inputs)) { return false; } + inline bool operator==(const BlackBoxFuncCall::EmbeddedCurveAdd &lhs, const BlackBoxFuncCall::EmbeddedCurveAdd &rhs) { + if (!(lhs.input1_x == rhs.input1_x)) { return false; } + if (!(lhs.input1_y == rhs.input1_y)) { return false; } + if (!(lhs.input2_x == rhs.input2_x)) { return false; } + if (!(lhs.input2_y == rhs.input2_y)) { return false; } if (!(lhs.outputs == rhs.outputs)) { return false; } return true; } - inline std::vector BlackBoxFuncCall::SHA256::bincodeSerialize() const { + inline std::vector BlackBoxFuncCall::EmbeddedCurveAdd::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BlackBoxFuncCall::SHA256 BlackBoxFuncCall::SHA256::bincodeDeserialize(std::vector input) { + inline BlackBoxFuncCall::EmbeddedCurveAdd BlackBoxFuncCall::EmbeddedCurveAdd::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -1962,37 +2452,43 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BlackBoxFuncCall::SHA256 &obj, Serializer &serializer) { - serde::Serializable::serialize(obj.inputs, serializer); +void serde::Serializable::serialize(const Circuit::BlackBoxFuncCall::EmbeddedCurveAdd &obj, Serializer &serializer) { + serde::Serializable::serialize(obj.input1_x, serializer); + serde::Serializable::serialize(obj.input1_y, serializer); + serde::Serializable::serialize(obj.input2_x, serializer); + serde::Serializable::serialize(obj.input2_y, serializer); serde::Serializable::serialize(obj.outputs, serializer); } template <> template -Circuit::BlackBoxFuncCall::SHA256 serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BlackBoxFuncCall::SHA256 obj; - obj.inputs = serde::Deserializable::deserialize(deserializer); +Circuit::BlackBoxFuncCall::EmbeddedCurveAdd serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BlackBoxFuncCall::EmbeddedCurveAdd obj; + obj.input1_x = serde::Deserializable::deserialize(deserializer); + obj.input1_y = serde::Deserializable::deserialize(deserializer); + obj.input2_x = serde::Deserializable::deserialize(deserializer); + obj.input2_y = serde::Deserializable::deserialize(deserializer); obj.outputs = serde::Deserializable::deserialize(deserializer); return obj; } namespace Circuit { - inline bool operator==(const BlackBoxFuncCall::Blake2s &lhs, const BlackBoxFuncCall::Blake2s &rhs) { + inline bool operator==(const BlackBoxFuncCall::Keccak256 &lhs, const BlackBoxFuncCall::Keccak256 &rhs) { if (!(lhs.inputs == rhs.inputs)) { return false; } if (!(lhs.outputs == rhs.outputs)) { return false; } return true; } - inline std::vector BlackBoxFuncCall::Blake2s::bincodeSerialize() const { + inline std::vector BlackBoxFuncCall::Keccak256::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BlackBoxFuncCall::Blake2s BlackBoxFuncCall::Blake2s::bincodeDeserialize(std::vector input) { + inline BlackBoxFuncCall::Keccak256 BlackBoxFuncCall::Keccak256::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -2003,15 +2499,15 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BlackBoxFuncCall::Blake2s &obj, Serializer &serializer) { +void serde::Serializable::serialize(const Circuit::BlackBoxFuncCall::Keccak256 &obj, Serializer &serializer) { serde::Serializable::serialize(obj.inputs, serializer); serde::Serializable::serialize(obj.outputs, serializer); } template <> template -Circuit::BlackBoxFuncCall::Blake2s serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BlackBoxFuncCall::Blake2s obj; +Circuit::BlackBoxFuncCall::Keccak256 serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BlackBoxFuncCall::Keccak256 obj; obj.inputs = serde::Deserializable::deserialize(deserializer); obj.outputs = serde::Deserializable::deserialize(deserializer); return obj; @@ -2019,21 +2515,22 @@ Circuit::BlackBoxFuncCall::Blake2s serde::Deserializable BlackBoxFuncCall::Blake3::bincodeSerialize() const { + inline std::vector BlackBoxFuncCall::Keccak256VariableLength::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BlackBoxFuncCall::Blake3 BlackBoxFuncCall::Blake3::bincodeDeserialize(std::vector input) { + inline BlackBoxFuncCall::Keccak256VariableLength BlackBoxFuncCall::Keccak256VariableLength::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -2044,40 +2541,39 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BlackBoxFuncCall::Blake3 &obj, Serializer &serializer) { +void serde::Serializable::serialize(const Circuit::BlackBoxFuncCall::Keccak256VariableLength &obj, Serializer &serializer) { serde::Serializable::serialize(obj.inputs, serializer); + serde::Serializable::serialize(obj.var_message_size, serializer); serde::Serializable::serialize(obj.outputs, serializer); } template <> template -Circuit::BlackBoxFuncCall::Blake3 serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BlackBoxFuncCall::Blake3 obj; +Circuit::BlackBoxFuncCall::Keccak256VariableLength serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BlackBoxFuncCall::Keccak256VariableLength obj; obj.inputs = serde::Deserializable::deserialize(deserializer); + obj.var_message_size = serde::Deserializable::deserialize(deserializer); obj.outputs = serde::Deserializable::deserialize(deserializer); return obj; } namespace Circuit { - inline bool operator==(const BlackBoxFuncCall::SchnorrVerify &lhs, const BlackBoxFuncCall::SchnorrVerify &rhs) { - if (!(lhs.public_key_x == rhs.public_key_x)) { return false; } - if (!(lhs.public_key_y == rhs.public_key_y)) { return false; } - if (!(lhs.signature == rhs.signature)) { return false; } - if (!(lhs.message == rhs.message)) { return false; } - if (!(lhs.output == rhs.output)) { return false; } + inline bool operator==(const BlackBoxFuncCall::Keccakf1600 &lhs, const BlackBoxFuncCall::Keccakf1600 &rhs) { + if (!(lhs.inputs == rhs.inputs)) { return false; } + if (!(lhs.outputs == rhs.outputs)) { return false; } return true; } - inline std::vector BlackBoxFuncCall::SchnorrVerify::bincodeSerialize() const { + inline std::vector BlackBoxFuncCall::Keccakf1600::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BlackBoxFuncCall::SchnorrVerify BlackBoxFuncCall::SchnorrVerify::bincodeDeserialize(std::vector input) { + inline BlackBoxFuncCall::Keccakf1600 BlackBoxFuncCall::Keccakf1600::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -2088,44 +2584,39 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BlackBoxFuncCall::SchnorrVerify &obj, Serializer &serializer) { - serde::Serializable::serialize(obj.public_key_x, serializer); - serde::Serializable::serialize(obj.public_key_y, serializer); - serde::Serializable::serialize(obj.signature, serializer); - serde::Serializable::serialize(obj.message, serializer); - serde::Serializable::serialize(obj.output, serializer); +void serde::Serializable::serialize(const Circuit::BlackBoxFuncCall::Keccakf1600 &obj, Serializer &serializer) { + serde::Serializable::serialize(obj.inputs, serializer); + serde::Serializable::serialize(obj.outputs, serializer); } template <> template -Circuit::BlackBoxFuncCall::SchnorrVerify serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BlackBoxFuncCall::SchnorrVerify obj; - obj.public_key_x = serde::Deserializable::deserialize(deserializer); - obj.public_key_y = serde::Deserializable::deserialize(deserializer); - obj.signature = serde::Deserializable::deserialize(deserializer); - obj.message = serde::Deserializable::deserialize(deserializer); - obj.output = serde::Deserializable::deserialize(deserializer); +Circuit::BlackBoxFuncCall::Keccakf1600 serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BlackBoxFuncCall::Keccakf1600 obj; + obj.inputs = serde::Deserializable::deserialize(deserializer); + obj.outputs = serde::Deserializable::deserialize(deserializer); return obj; } namespace Circuit { - inline bool operator==(const BlackBoxFuncCall::PedersenCommitment &lhs, const BlackBoxFuncCall::PedersenCommitment &rhs) { - if (!(lhs.inputs == rhs.inputs)) { return false; } - if (!(lhs.domain_separator == rhs.domain_separator)) { return false; } - if (!(lhs.outputs == rhs.outputs)) { return false; } + inline bool operator==(const BlackBoxFuncCall::RecursiveAggregation &lhs, const BlackBoxFuncCall::RecursiveAggregation &rhs) { + if (!(lhs.verification_key == rhs.verification_key)) { return false; } + if (!(lhs.proof == rhs.proof)) { return false; } + if (!(lhs.public_inputs == rhs.public_inputs)) { return false; } + if (!(lhs.key_hash == rhs.key_hash)) { return false; } return true; } - inline std::vector BlackBoxFuncCall::PedersenCommitment::bincodeSerialize() const { + inline std::vector BlackBoxFuncCall::RecursiveAggregation::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BlackBoxFuncCall::PedersenCommitment BlackBoxFuncCall::PedersenCommitment::bincodeDeserialize(std::vector input) { + inline BlackBoxFuncCall::RecursiveAggregation BlackBoxFuncCall::RecursiveAggregation::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -2136,40 +2627,42 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BlackBoxFuncCall::PedersenCommitment &obj, Serializer &serializer) { - serde::Serializable::serialize(obj.inputs, serializer); - serde::Serializable::serialize(obj.domain_separator, serializer); - serde::Serializable::serialize(obj.outputs, serializer); +void serde::Serializable::serialize(const Circuit::BlackBoxFuncCall::RecursiveAggregation &obj, Serializer &serializer) { + serde::Serializable::serialize(obj.verification_key, serializer); + serde::Serializable::serialize(obj.proof, serializer); + serde::Serializable::serialize(obj.public_inputs, serializer); + serde::Serializable::serialize(obj.key_hash, serializer); } template <> template -Circuit::BlackBoxFuncCall::PedersenCommitment serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BlackBoxFuncCall::PedersenCommitment obj; - obj.inputs = serde::Deserializable::deserialize(deserializer); - obj.domain_separator = serde::Deserializable::deserialize(deserializer); - obj.outputs = serde::Deserializable::deserialize(deserializer); +Circuit::BlackBoxFuncCall::RecursiveAggregation serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BlackBoxFuncCall::RecursiveAggregation obj; + obj.verification_key = serde::Deserializable::deserialize(deserializer); + obj.proof = serde::Deserializable::deserialize(deserializer); + obj.public_inputs = serde::Deserializable::deserialize(deserializer); + obj.key_hash = serde::Deserializable::deserialize(deserializer); return obj; } namespace Circuit { - inline bool operator==(const BlackBoxFuncCall::PedersenHash &lhs, const BlackBoxFuncCall::PedersenHash &rhs) { - if (!(lhs.inputs == rhs.inputs)) { return false; } - if (!(lhs.domain_separator == rhs.domain_separator)) { return false; } + inline bool operator==(const BlackBoxFuncCall::BigIntAdd &lhs, const BlackBoxFuncCall::BigIntAdd &rhs) { + if (!(lhs.lhs == rhs.lhs)) { return false; } + if (!(lhs.rhs == rhs.rhs)) { return false; } if (!(lhs.output == rhs.output)) { return false; } return true; } - inline std::vector BlackBoxFuncCall::PedersenHash::bincodeSerialize() const { + inline std::vector BlackBoxFuncCall::BigIntAdd::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BlackBoxFuncCall::PedersenHash BlackBoxFuncCall::PedersenHash::bincodeDeserialize(std::vector input) { + inline BlackBoxFuncCall::BigIntAdd BlackBoxFuncCall::BigIntAdd::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -2180,42 +2673,40 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BlackBoxFuncCall::PedersenHash &obj, Serializer &serializer) { - serde::Serializable::serialize(obj.inputs, serializer); - serde::Serializable::serialize(obj.domain_separator, serializer); +void serde::Serializable::serialize(const Circuit::BlackBoxFuncCall::BigIntAdd &obj, Serializer &serializer) { + serde::Serializable::serialize(obj.lhs, serializer); + serde::Serializable::serialize(obj.rhs, serializer); serde::Serializable::serialize(obj.output, serializer); } template <> template -Circuit::BlackBoxFuncCall::PedersenHash serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BlackBoxFuncCall::PedersenHash obj; - obj.inputs = serde::Deserializable::deserialize(deserializer); - obj.domain_separator = serde::Deserializable::deserialize(deserializer); +Circuit::BlackBoxFuncCall::BigIntAdd serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BlackBoxFuncCall::BigIntAdd obj; + obj.lhs = serde::Deserializable::deserialize(deserializer); + obj.rhs = serde::Deserializable::deserialize(deserializer); obj.output = serde::Deserializable::deserialize(deserializer); return obj; } namespace Circuit { - inline bool operator==(const BlackBoxFuncCall::EcdsaSecp256k1 &lhs, const BlackBoxFuncCall::EcdsaSecp256k1 &rhs) { - if (!(lhs.public_key_x == rhs.public_key_x)) { return false; } - if (!(lhs.public_key_y == rhs.public_key_y)) { return false; } - if (!(lhs.signature == rhs.signature)) { return false; } - if (!(lhs.hashed_message == rhs.hashed_message)) { return false; } + inline bool operator==(const BlackBoxFuncCall::BigIntSub &lhs, const BlackBoxFuncCall::BigIntSub &rhs) { + if (!(lhs.lhs == rhs.lhs)) { return false; } + if (!(lhs.rhs == rhs.rhs)) { return false; } if (!(lhs.output == rhs.output)) { return false; } return true; } - inline std::vector BlackBoxFuncCall::EcdsaSecp256k1::bincodeSerialize() const { + inline std::vector BlackBoxFuncCall::BigIntSub::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BlackBoxFuncCall::EcdsaSecp256k1 BlackBoxFuncCall::EcdsaSecp256k1::bincodeDeserialize(std::vector input) { + inline BlackBoxFuncCall::BigIntSub BlackBoxFuncCall::BigIntSub::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -2226,46 +2717,40 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BlackBoxFuncCall::EcdsaSecp256k1 &obj, Serializer &serializer) { - serde::Serializable::serialize(obj.public_key_x, serializer); - serde::Serializable::serialize(obj.public_key_y, serializer); - serde::Serializable::serialize(obj.signature, serializer); - serde::Serializable::serialize(obj.hashed_message, serializer); +void serde::Serializable::serialize(const Circuit::BlackBoxFuncCall::BigIntSub &obj, Serializer &serializer) { + serde::Serializable::serialize(obj.lhs, serializer); + serde::Serializable::serialize(obj.rhs, serializer); serde::Serializable::serialize(obj.output, serializer); } template <> template -Circuit::BlackBoxFuncCall::EcdsaSecp256k1 serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BlackBoxFuncCall::EcdsaSecp256k1 obj; - obj.public_key_x = serde::Deserializable::deserialize(deserializer); - obj.public_key_y = serde::Deserializable::deserialize(deserializer); - obj.signature = serde::Deserializable::deserialize(deserializer); - obj.hashed_message = serde::Deserializable::deserialize(deserializer); +Circuit::BlackBoxFuncCall::BigIntSub serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BlackBoxFuncCall::BigIntSub obj; + obj.lhs = serde::Deserializable::deserialize(deserializer); + obj.rhs = serde::Deserializable::deserialize(deserializer); obj.output = serde::Deserializable::deserialize(deserializer); return obj; } namespace Circuit { - inline bool operator==(const BlackBoxFuncCall::EcdsaSecp256r1 &lhs, const BlackBoxFuncCall::EcdsaSecp256r1 &rhs) { - if (!(lhs.public_key_x == rhs.public_key_x)) { return false; } - if (!(lhs.public_key_y == rhs.public_key_y)) { return false; } - if (!(lhs.signature == rhs.signature)) { return false; } - if (!(lhs.hashed_message == rhs.hashed_message)) { return false; } + inline bool operator==(const BlackBoxFuncCall::BigIntMul &lhs, const BlackBoxFuncCall::BigIntMul &rhs) { + if (!(lhs.lhs == rhs.lhs)) { return false; } + if (!(lhs.rhs == rhs.rhs)) { return false; } if (!(lhs.output == rhs.output)) { return false; } return true; } - inline std::vector BlackBoxFuncCall::EcdsaSecp256r1::bincodeSerialize() const { + inline std::vector BlackBoxFuncCall::BigIntMul::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BlackBoxFuncCall::EcdsaSecp256r1 BlackBoxFuncCall::EcdsaSecp256r1::bincodeDeserialize(std::vector input) { + inline BlackBoxFuncCall::BigIntMul BlackBoxFuncCall::BigIntMul::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -2276,44 +2761,40 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BlackBoxFuncCall::EcdsaSecp256r1 &obj, Serializer &serializer) { - serde::Serializable::serialize(obj.public_key_x, serializer); - serde::Serializable::serialize(obj.public_key_y, serializer); - serde::Serializable::serialize(obj.signature, serializer); - serde::Serializable::serialize(obj.hashed_message, serializer); +void serde::Serializable::serialize(const Circuit::BlackBoxFuncCall::BigIntMul &obj, Serializer &serializer) { + serde::Serializable::serialize(obj.lhs, serializer); + serde::Serializable::serialize(obj.rhs, serializer); serde::Serializable::serialize(obj.output, serializer); } template <> template -Circuit::BlackBoxFuncCall::EcdsaSecp256r1 serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BlackBoxFuncCall::EcdsaSecp256r1 obj; - obj.public_key_x = serde::Deserializable::deserialize(deserializer); - obj.public_key_y = serde::Deserializable::deserialize(deserializer); - obj.signature = serde::Deserializable::deserialize(deserializer); - obj.hashed_message = serde::Deserializable::deserialize(deserializer); +Circuit::BlackBoxFuncCall::BigIntMul serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BlackBoxFuncCall::BigIntMul obj; + obj.lhs = serde::Deserializable::deserialize(deserializer); + obj.rhs = serde::Deserializable::deserialize(deserializer); obj.output = serde::Deserializable::deserialize(deserializer); return obj; } namespace Circuit { - inline bool operator==(const BlackBoxFuncCall::FixedBaseScalarMul &lhs, const BlackBoxFuncCall::FixedBaseScalarMul &rhs) { - if (!(lhs.low == rhs.low)) { return false; } - if (!(lhs.high == rhs.high)) { return false; } - if (!(lhs.outputs == rhs.outputs)) { return false; } + inline bool operator==(const BlackBoxFuncCall::BigIntDiv &lhs, const BlackBoxFuncCall::BigIntDiv &rhs) { + if (!(lhs.lhs == rhs.lhs)) { return false; } + if (!(lhs.rhs == rhs.rhs)) { return false; } + if (!(lhs.output == rhs.output)) { return false; } return true; } - inline std::vector BlackBoxFuncCall::FixedBaseScalarMul::bincodeSerialize() const { + inline std::vector BlackBoxFuncCall::BigIntDiv::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BlackBoxFuncCall::FixedBaseScalarMul BlackBoxFuncCall::FixedBaseScalarMul::bincodeDeserialize(std::vector input) { + inline BlackBoxFuncCall::BigIntDiv BlackBoxFuncCall::BigIntDiv::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -2324,42 +2805,40 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BlackBoxFuncCall::FixedBaseScalarMul &obj, Serializer &serializer) { - serde::Serializable::serialize(obj.low, serializer); - serde::Serializable::serialize(obj.high, serializer); - serde::Serializable::serialize(obj.outputs, serializer); +void serde::Serializable::serialize(const Circuit::BlackBoxFuncCall::BigIntDiv &obj, Serializer &serializer) { + serde::Serializable::serialize(obj.lhs, serializer); + serde::Serializable::serialize(obj.rhs, serializer); + serde::Serializable::serialize(obj.output, serializer); } template <> template -Circuit::BlackBoxFuncCall::FixedBaseScalarMul serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BlackBoxFuncCall::FixedBaseScalarMul obj; - obj.low = serde::Deserializable::deserialize(deserializer); - obj.high = serde::Deserializable::deserialize(deserializer); - obj.outputs = serde::Deserializable::deserialize(deserializer); +Circuit::BlackBoxFuncCall::BigIntDiv serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BlackBoxFuncCall::BigIntDiv obj; + obj.lhs = serde::Deserializable::deserialize(deserializer); + obj.rhs = serde::Deserializable::deserialize(deserializer); + obj.output = serde::Deserializable::deserialize(deserializer); return obj; } namespace Circuit { - inline bool operator==(const BlackBoxFuncCall::EmbeddedCurveAdd &lhs, const BlackBoxFuncCall::EmbeddedCurveAdd &rhs) { - if (!(lhs.input1_x == rhs.input1_x)) { return false; } - if (!(lhs.input1_y == rhs.input1_y)) { return false; } - if (!(lhs.input2_x == rhs.input2_x)) { return false; } - if (!(lhs.input2_y == rhs.input2_y)) { return false; } - if (!(lhs.outputs == rhs.outputs)) { return false; } + inline bool operator==(const BlackBoxFuncCall::BigIntFromLeBytes &lhs, const BlackBoxFuncCall::BigIntFromLeBytes &rhs) { + if (!(lhs.inputs == rhs.inputs)) { return false; } + if (!(lhs.modulus == rhs.modulus)) { return false; } + if (!(lhs.output == rhs.output)) { return false; } return true; } - inline std::vector BlackBoxFuncCall::EmbeddedCurveAdd::bincodeSerialize() const { + inline std::vector BlackBoxFuncCall::BigIntFromLeBytes::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BlackBoxFuncCall::EmbeddedCurveAdd BlackBoxFuncCall::EmbeddedCurveAdd::bincodeDeserialize(std::vector input) { + inline BlackBoxFuncCall::BigIntFromLeBytes BlackBoxFuncCall::BigIntFromLeBytes::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -2370,43 +2849,39 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BlackBoxFuncCall::EmbeddedCurveAdd &obj, Serializer &serializer) { - serde::Serializable::serialize(obj.input1_x, serializer); - serde::Serializable::serialize(obj.input1_y, serializer); - serde::Serializable::serialize(obj.input2_x, serializer); - serde::Serializable::serialize(obj.input2_y, serializer); - serde::Serializable::serialize(obj.outputs, serializer); +void serde::Serializable::serialize(const Circuit::BlackBoxFuncCall::BigIntFromLeBytes &obj, Serializer &serializer) { + serde::Serializable::serialize(obj.inputs, serializer); + serde::Serializable::serialize(obj.modulus, serializer); + serde::Serializable::serialize(obj.output, serializer); } template <> template -Circuit::BlackBoxFuncCall::EmbeddedCurveAdd serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BlackBoxFuncCall::EmbeddedCurveAdd obj; - obj.input1_x = serde::Deserializable::deserialize(deserializer); - obj.input1_y = serde::Deserializable::deserialize(deserializer); - obj.input2_x = serde::Deserializable::deserialize(deserializer); - obj.input2_y = serde::Deserializable::deserialize(deserializer); - obj.outputs = serde::Deserializable::deserialize(deserializer); +Circuit::BlackBoxFuncCall::BigIntFromLeBytes serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BlackBoxFuncCall::BigIntFromLeBytes obj; + obj.inputs = serde::Deserializable::deserialize(deserializer); + obj.modulus = serde::Deserializable::deserialize(deserializer); + obj.output = serde::Deserializable::deserialize(deserializer); return obj; } namespace Circuit { - inline bool operator==(const BlackBoxFuncCall::Keccak256 &lhs, const BlackBoxFuncCall::Keccak256 &rhs) { - if (!(lhs.inputs == rhs.inputs)) { return false; } + inline bool operator==(const BlackBoxFuncCall::BigIntToLeBytes &lhs, const BlackBoxFuncCall::BigIntToLeBytes &rhs) { + if (!(lhs.input == rhs.input)) { return false; } if (!(lhs.outputs == rhs.outputs)) { return false; } return true; } - inline std::vector BlackBoxFuncCall::Keccak256::bincodeSerialize() const { + inline std::vector BlackBoxFuncCall::BigIntToLeBytes::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BlackBoxFuncCall::Keccak256 BlackBoxFuncCall::Keccak256::bincodeDeserialize(std::vector input) { + inline BlackBoxFuncCall::BigIntToLeBytes BlackBoxFuncCall::BigIntToLeBytes::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -2417,38 +2892,38 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BlackBoxFuncCall::Keccak256 &obj, Serializer &serializer) { - serde::Serializable::serialize(obj.inputs, serializer); +void serde::Serializable::serialize(const Circuit::BlackBoxFuncCall::BigIntToLeBytes &obj, Serializer &serializer) { + serde::Serializable::serialize(obj.input, serializer); serde::Serializable::serialize(obj.outputs, serializer); } template <> template -Circuit::BlackBoxFuncCall::Keccak256 serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BlackBoxFuncCall::Keccak256 obj; - obj.inputs = serde::Deserializable::deserialize(deserializer); +Circuit::BlackBoxFuncCall::BigIntToLeBytes serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BlackBoxFuncCall::BigIntToLeBytes obj; + obj.input = serde::Deserializable::deserialize(deserializer); obj.outputs = serde::Deserializable::deserialize(deserializer); return obj; } namespace Circuit { - inline bool operator==(const BlackBoxFuncCall::Keccak256VariableLength &lhs, const BlackBoxFuncCall::Keccak256VariableLength &rhs) { + inline bool operator==(const BlackBoxFuncCall::Poseidon2Permutation &lhs, const BlackBoxFuncCall::Poseidon2Permutation &rhs) { if (!(lhs.inputs == rhs.inputs)) { return false; } - if (!(lhs.var_message_size == rhs.var_message_size)) { return false; } if (!(lhs.outputs == rhs.outputs)) { return false; } + if (!(lhs.len == rhs.len)) { return false; } return true; } - inline std::vector BlackBoxFuncCall::Keccak256VariableLength::bincodeSerialize() const { + inline std::vector BlackBoxFuncCall::Poseidon2Permutation::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BlackBoxFuncCall::Keccak256VariableLength BlackBoxFuncCall::Keccak256VariableLength::bincodeDeserialize(std::vector input) { + inline BlackBoxFuncCall::Poseidon2Permutation BlackBoxFuncCall::Poseidon2Permutation::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -2459,39 +2934,40 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BlackBoxFuncCall::Keccak256VariableLength &obj, Serializer &serializer) { +void serde::Serializable::serialize(const Circuit::BlackBoxFuncCall::Poseidon2Permutation &obj, Serializer &serializer) { serde::Serializable::serialize(obj.inputs, serializer); - serde::Serializable::serialize(obj.var_message_size, serializer); serde::Serializable::serialize(obj.outputs, serializer); + serde::Serializable::serialize(obj.len, serializer); } template <> template -Circuit::BlackBoxFuncCall::Keccak256VariableLength serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BlackBoxFuncCall::Keccak256VariableLength obj; +Circuit::BlackBoxFuncCall::Poseidon2Permutation serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BlackBoxFuncCall::Poseidon2Permutation obj; obj.inputs = serde::Deserializable::deserialize(deserializer); - obj.var_message_size = serde::Deserializable::deserialize(deserializer); obj.outputs = serde::Deserializable::deserialize(deserializer); + obj.len = serde::Deserializable::deserialize(deserializer); return obj; } namespace Circuit { - inline bool operator==(const BlackBoxFuncCall::Keccakf1600 &lhs, const BlackBoxFuncCall::Keccakf1600 &rhs) { + inline bool operator==(const BlackBoxFuncCall::Sha256Compression &lhs, const BlackBoxFuncCall::Sha256Compression &rhs) { if (!(lhs.inputs == rhs.inputs)) { return false; } + if (!(lhs.hash_values == rhs.hash_values)) { return false; } if (!(lhs.outputs == rhs.outputs)) { return false; } return true; } - inline std::vector BlackBoxFuncCall::Keccakf1600::bincodeSerialize() const { + inline std::vector BlackBoxFuncCall::Sha256Compression::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BlackBoxFuncCall::Keccakf1600 BlackBoxFuncCall::Keccakf1600::bincodeDeserialize(std::vector input) { + inline BlackBoxFuncCall::Sha256Compression BlackBoxFuncCall::Sha256Compression::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -2502,39 +2978,38 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BlackBoxFuncCall::Keccakf1600 &obj, Serializer &serializer) { +void serde::Serializable::serialize(const Circuit::BlackBoxFuncCall::Sha256Compression &obj, Serializer &serializer) { serde::Serializable::serialize(obj.inputs, serializer); + serde::Serializable::serialize(obj.hash_values, serializer); serde::Serializable::serialize(obj.outputs, serializer); } template <> template -Circuit::BlackBoxFuncCall::Keccakf1600 serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BlackBoxFuncCall::Keccakf1600 obj; +Circuit::BlackBoxFuncCall::Sha256Compression serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BlackBoxFuncCall::Sha256Compression obj; obj.inputs = serde::Deserializable::deserialize(deserializer); + obj.hash_values = serde::Deserializable::deserialize(deserializer); obj.outputs = serde::Deserializable::deserialize(deserializer); return obj; } namespace Circuit { - inline bool operator==(const BlackBoxFuncCall::RecursiveAggregation &lhs, const BlackBoxFuncCall::RecursiveAggregation &rhs) { - if (!(lhs.verification_key == rhs.verification_key)) { return false; } - if (!(lhs.proof == rhs.proof)) { return false; } - if (!(lhs.public_inputs == rhs.public_inputs)) { return false; } - if (!(lhs.key_hash == rhs.key_hash)) { return false; } + inline bool operator==(const BlackBoxOp &lhs, const BlackBoxOp &rhs) { + if (!(lhs.value == rhs.value)) { return false; } return true; } - inline std::vector BlackBoxFuncCall::RecursiveAggregation::bincodeSerialize() const { + inline std::vector BlackBoxOp::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BlackBoxFuncCall::RecursiveAggregation BlackBoxFuncCall::RecursiveAggregation::bincodeDeserialize(std::vector input) { + inline BlackBoxOp BlackBoxOp::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -2545,42 +3020,39 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BlackBoxFuncCall::RecursiveAggregation &obj, Serializer &serializer) { - serde::Serializable::serialize(obj.verification_key, serializer); - serde::Serializable::serialize(obj.proof, serializer); - serde::Serializable::serialize(obj.public_inputs, serializer); - serde::Serializable::serialize(obj.key_hash, serializer); +void serde::Serializable::serialize(const Circuit::BlackBoxOp &obj, Serializer &serializer) { + serializer.increase_container_depth(); + serde::Serializable::serialize(obj.value, serializer); + serializer.decrease_container_depth(); } template <> template -Circuit::BlackBoxFuncCall::RecursiveAggregation serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BlackBoxFuncCall::RecursiveAggregation obj; - obj.verification_key = serde::Deserializable::deserialize(deserializer); - obj.proof = serde::Deserializable::deserialize(deserializer); - obj.public_inputs = serde::Deserializable::deserialize(deserializer); - obj.key_hash = serde::Deserializable::deserialize(deserializer); +Circuit::BlackBoxOp serde::Deserializable::deserialize(Deserializer &deserializer) { + deserializer.increase_container_depth(); + Circuit::BlackBoxOp obj; + obj.value = serde::Deserializable::deserialize(deserializer); + deserializer.decrease_container_depth(); return obj; } namespace Circuit { - inline bool operator==(const BlackBoxFuncCall::BigIntAdd &lhs, const BlackBoxFuncCall::BigIntAdd &rhs) { - if (!(lhs.lhs == rhs.lhs)) { return false; } - if (!(lhs.rhs == rhs.rhs)) { return false; } + inline bool operator==(const BlackBoxOp::Sha256 &lhs, const BlackBoxOp::Sha256 &rhs) { + if (!(lhs.message == rhs.message)) { return false; } if (!(lhs.output == rhs.output)) { return false; } return true; } - inline std::vector BlackBoxFuncCall::BigIntAdd::bincodeSerialize() const { + inline std::vector BlackBoxOp::Sha256::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BlackBoxFuncCall::BigIntAdd BlackBoxFuncCall::BigIntAdd::bincodeDeserialize(std::vector input) { + inline BlackBoxOp::Sha256 BlackBoxOp::Sha256::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -2591,40 +3063,37 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BlackBoxFuncCall::BigIntAdd &obj, Serializer &serializer) { - serde::Serializable::serialize(obj.lhs, serializer); - serde::Serializable::serialize(obj.rhs, serializer); +void serde::Serializable::serialize(const Circuit::BlackBoxOp::Sha256 &obj, Serializer &serializer) { + serde::Serializable::serialize(obj.message, serializer); serde::Serializable::serialize(obj.output, serializer); } template <> template -Circuit::BlackBoxFuncCall::BigIntAdd serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BlackBoxFuncCall::BigIntAdd obj; - obj.lhs = serde::Deserializable::deserialize(deserializer); - obj.rhs = serde::Deserializable::deserialize(deserializer); +Circuit::BlackBoxOp::Sha256 serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BlackBoxOp::Sha256 obj; + obj.message = serde::Deserializable::deserialize(deserializer); obj.output = serde::Deserializable::deserialize(deserializer); return obj; } namespace Circuit { - inline bool operator==(const BlackBoxFuncCall::BigIntNeg &lhs, const BlackBoxFuncCall::BigIntNeg &rhs) { - if (!(lhs.lhs == rhs.lhs)) { return false; } - if (!(lhs.rhs == rhs.rhs)) { return false; } + inline bool operator==(const BlackBoxOp::Blake2s &lhs, const BlackBoxOp::Blake2s &rhs) { + if (!(lhs.message == rhs.message)) { return false; } if (!(lhs.output == rhs.output)) { return false; } return true; } - inline std::vector BlackBoxFuncCall::BigIntNeg::bincodeSerialize() const { + inline std::vector BlackBoxOp::Blake2s::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BlackBoxFuncCall::BigIntNeg BlackBoxFuncCall::BigIntNeg::bincodeDeserialize(std::vector input) { + inline BlackBoxOp::Blake2s BlackBoxOp::Blake2s::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -2635,40 +3104,37 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BlackBoxFuncCall::BigIntNeg &obj, Serializer &serializer) { - serde::Serializable::serialize(obj.lhs, serializer); - serde::Serializable::serialize(obj.rhs, serializer); +void serde::Serializable::serialize(const Circuit::BlackBoxOp::Blake2s &obj, Serializer &serializer) { + serde::Serializable::serialize(obj.message, serializer); serde::Serializable::serialize(obj.output, serializer); } template <> template -Circuit::BlackBoxFuncCall::BigIntNeg serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BlackBoxFuncCall::BigIntNeg obj; - obj.lhs = serde::Deserializable::deserialize(deserializer); - obj.rhs = serde::Deserializable::deserialize(deserializer); +Circuit::BlackBoxOp::Blake2s serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BlackBoxOp::Blake2s obj; + obj.message = serde::Deserializable::deserialize(deserializer); obj.output = serde::Deserializable::deserialize(deserializer); return obj; } namespace Circuit { - inline bool operator==(const BlackBoxFuncCall::BigIntMul &lhs, const BlackBoxFuncCall::BigIntMul &rhs) { - if (!(lhs.lhs == rhs.lhs)) { return false; } - if (!(lhs.rhs == rhs.rhs)) { return false; } + inline bool operator==(const BlackBoxOp::Blake3 &lhs, const BlackBoxOp::Blake3 &rhs) { + if (!(lhs.message == rhs.message)) { return false; } if (!(lhs.output == rhs.output)) { return false; } return true; } - inline std::vector BlackBoxFuncCall::BigIntMul::bincodeSerialize() const { + inline std::vector BlackBoxOp::Blake3::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BlackBoxFuncCall::BigIntMul BlackBoxFuncCall::BigIntMul::bincodeDeserialize(std::vector input) { + inline BlackBoxOp::Blake3 BlackBoxOp::Blake3::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -2679,40 +3145,37 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BlackBoxFuncCall::BigIntMul &obj, Serializer &serializer) { - serde::Serializable::serialize(obj.lhs, serializer); - serde::Serializable::serialize(obj.rhs, serializer); +void serde::Serializable::serialize(const Circuit::BlackBoxOp::Blake3 &obj, Serializer &serializer) { + serde::Serializable::serialize(obj.message, serializer); serde::Serializable::serialize(obj.output, serializer); } template <> template -Circuit::BlackBoxFuncCall::BigIntMul serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BlackBoxFuncCall::BigIntMul obj; - obj.lhs = serde::Deserializable::deserialize(deserializer); - obj.rhs = serde::Deserializable::deserialize(deserializer); +Circuit::BlackBoxOp::Blake3 serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BlackBoxOp::Blake3 obj; + obj.message = serde::Deserializable::deserialize(deserializer); obj.output = serde::Deserializable::deserialize(deserializer); return obj; } namespace Circuit { - inline bool operator==(const BlackBoxFuncCall::BigIntDiv &lhs, const BlackBoxFuncCall::BigIntDiv &rhs) { - if (!(lhs.lhs == rhs.lhs)) { return false; } - if (!(lhs.rhs == rhs.rhs)) { return false; } + inline bool operator==(const BlackBoxOp::Keccak256 &lhs, const BlackBoxOp::Keccak256 &rhs) { + if (!(lhs.message == rhs.message)) { return false; } if (!(lhs.output == rhs.output)) { return false; } return true; } - inline std::vector BlackBoxFuncCall::BigIntDiv::bincodeSerialize() const { + inline std::vector BlackBoxOp::Keccak256::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BlackBoxFuncCall::BigIntDiv BlackBoxFuncCall::BigIntDiv::bincodeDeserialize(std::vector input) { + inline BlackBoxOp::Keccak256 BlackBoxOp::Keccak256::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -2723,40 +3186,37 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BlackBoxFuncCall::BigIntDiv &obj, Serializer &serializer) { - serde::Serializable::serialize(obj.lhs, serializer); - serde::Serializable::serialize(obj.rhs, serializer); +void serde::Serializable::serialize(const Circuit::BlackBoxOp::Keccak256 &obj, Serializer &serializer) { + serde::Serializable::serialize(obj.message, serializer); serde::Serializable::serialize(obj.output, serializer); } template <> template -Circuit::BlackBoxFuncCall::BigIntDiv serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BlackBoxFuncCall::BigIntDiv obj; - obj.lhs = serde::Deserializable::deserialize(deserializer); - obj.rhs = serde::Deserializable::deserialize(deserializer); +Circuit::BlackBoxOp::Keccak256 serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BlackBoxOp::Keccak256 obj; + obj.message = serde::Deserializable::deserialize(deserializer); obj.output = serde::Deserializable::deserialize(deserializer); return obj; } namespace Circuit { - inline bool operator==(const BlackBoxFuncCall::BigIntFromLeBytes &lhs, const BlackBoxFuncCall::BigIntFromLeBytes &rhs) { - if (!(lhs.inputs == rhs.inputs)) { return false; } - if (!(lhs.modulus == rhs.modulus)) { return false; } + inline bool operator==(const BlackBoxOp::Keccakf1600 &lhs, const BlackBoxOp::Keccakf1600 &rhs) { + if (!(lhs.message == rhs.message)) { return false; } if (!(lhs.output == rhs.output)) { return false; } return true; } - inline std::vector BlackBoxFuncCall::BigIntFromLeBytes::bincodeSerialize() const { + inline std::vector BlackBoxOp::Keccakf1600::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BlackBoxFuncCall::BigIntFromLeBytes BlackBoxFuncCall::BigIntFromLeBytes::bincodeDeserialize(std::vector input) { + inline BlackBoxOp::Keccakf1600 BlackBoxOp::Keccakf1600::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -2767,39 +3227,40 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BlackBoxFuncCall::BigIntFromLeBytes &obj, Serializer &serializer) { - serde::Serializable::serialize(obj.inputs, serializer); - serde::Serializable::serialize(obj.modulus, serializer); +void serde::Serializable::serialize(const Circuit::BlackBoxOp::Keccakf1600 &obj, Serializer &serializer) { + serde::Serializable::serialize(obj.message, serializer); serde::Serializable::serialize(obj.output, serializer); } template <> template -Circuit::BlackBoxFuncCall::BigIntFromLeBytes serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BlackBoxFuncCall::BigIntFromLeBytes obj; - obj.inputs = serde::Deserializable::deserialize(deserializer); - obj.modulus = serde::Deserializable::deserialize(deserializer); +Circuit::BlackBoxOp::Keccakf1600 serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BlackBoxOp::Keccakf1600 obj; + obj.message = serde::Deserializable::deserialize(deserializer); obj.output = serde::Deserializable::deserialize(deserializer); return obj; } namespace Circuit { - inline bool operator==(const BlackBoxFuncCall::BigIntToLeBytes &lhs, const BlackBoxFuncCall::BigIntToLeBytes &rhs) { - if (!(lhs.input == rhs.input)) { return false; } - if (!(lhs.outputs == rhs.outputs)) { return false; } + inline bool operator==(const BlackBoxOp::EcdsaSecp256k1 &lhs, const BlackBoxOp::EcdsaSecp256k1 &rhs) { + if (!(lhs.hashed_msg == rhs.hashed_msg)) { return false; } + if (!(lhs.public_key_x == rhs.public_key_x)) { return false; } + if (!(lhs.public_key_y == rhs.public_key_y)) { return false; } + if (!(lhs.signature == rhs.signature)) { return false; } + if (!(lhs.result == rhs.result)) { return false; } return true; } - inline std::vector BlackBoxFuncCall::BigIntToLeBytes::bincodeSerialize() const { + inline std::vector BlackBoxOp::EcdsaSecp256k1::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BlackBoxFuncCall::BigIntToLeBytes BlackBoxFuncCall::BigIntToLeBytes::bincodeDeserialize(std::vector input) { + inline BlackBoxOp::EcdsaSecp256k1 BlackBoxOp::EcdsaSecp256k1::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -2810,38 +3271,46 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BlackBoxFuncCall::BigIntToLeBytes &obj, Serializer &serializer) { - serde::Serializable::serialize(obj.input, serializer); - serde::Serializable::serialize(obj.outputs, serializer); +void serde::Serializable::serialize(const Circuit::BlackBoxOp::EcdsaSecp256k1 &obj, Serializer &serializer) { + serde::Serializable::serialize(obj.hashed_msg, serializer); + serde::Serializable::serialize(obj.public_key_x, serializer); + serde::Serializable::serialize(obj.public_key_y, serializer); + serde::Serializable::serialize(obj.signature, serializer); + serde::Serializable::serialize(obj.result, serializer); } template <> template -Circuit::BlackBoxFuncCall::BigIntToLeBytes serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BlackBoxFuncCall::BigIntToLeBytes obj; - obj.input = serde::Deserializable::deserialize(deserializer); - obj.outputs = serde::Deserializable::deserialize(deserializer); +Circuit::BlackBoxOp::EcdsaSecp256k1 serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BlackBoxOp::EcdsaSecp256k1 obj; + obj.hashed_msg = serde::Deserializable::deserialize(deserializer); + obj.public_key_x = serde::Deserializable::deserialize(deserializer); + obj.public_key_y = serde::Deserializable::deserialize(deserializer); + obj.signature = serde::Deserializable::deserialize(deserializer); + obj.result = serde::Deserializable::deserialize(deserializer); return obj; } namespace Circuit { - inline bool operator==(const BlackBoxFuncCall::Poseidon2Permutation &lhs, const BlackBoxFuncCall::Poseidon2Permutation &rhs) { - if (!(lhs.inputs == rhs.inputs)) { return false; } - if (!(lhs.outputs == rhs.outputs)) { return false; } - if (!(lhs.len == rhs.len)) { return false; } + inline bool operator==(const BlackBoxOp::EcdsaSecp256r1 &lhs, const BlackBoxOp::EcdsaSecp256r1 &rhs) { + if (!(lhs.hashed_msg == rhs.hashed_msg)) { return false; } + if (!(lhs.public_key_x == rhs.public_key_x)) { return false; } + if (!(lhs.public_key_y == rhs.public_key_y)) { return false; } + if (!(lhs.signature == rhs.signature)) { return false; } + if (!(lhs.result == rhs.result)) { return false; } return true; } - inline std::vector BlackBoxFuncCall::Poseidon2Permutation::bincodeSerialize() const { + inline std::vector BlackBoxOp::EcdsaSecp256r1::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BlackBoxFuncCall::Poseidon2Permutation BlackBoxFuncCall::Poseidon2Permutation::bincodeDeserialize(std::vector input) { + inline BlackBoxOp::EcdsaSecp256r1 BlackBoxOp::EcdsaSecp256r1::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -2852,40 +3321,46 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BlackBoxFuncCall::Poseidon2Permutation &obj, Serializer &serializer) { - serde::Serializable::serialize(obj.inputs, serializer); - serde::Serializable::serialize(obj.outputs, serializer); - serde::Serializable::serialize(obj.len, serializer); +void serde::Serializable::serialize(const Circuit::BlackBoxOp::EcdsaSecp256r1 &obj, Serializer &serializer) { + serde::Serializable::serialize(obj.hashed_msg, serializer); + serde::Serializable::serialize(obj.public_key_x, serializer); + serde::Serializable::serialize(obj.public_key_y, serializer); + serde::Serializable::serialize(obj.signature, serializer); + serde::Serializable::serialize(obj.result, serializer); } template <> template -Circuit::BlackBoxFuncCall::Poseidon2Permutation serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BlackBoxFuncCall::Poseidon2Permutation obj; - obj.inputs = serde::Deserializable::deserialize(deserializer); - obj.outputs = serde::Deserializable::deserialize(deserializer); - obj.len = serde::Deserializable::deserialize(deserializer); +Circuit::BlackBoxOp::EcdsaSecp256r1 serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BlackBoxOp::EcdsaSecp256r1 obj; + obj.hashed_msg = serde::Deserializable::deserialize(deserializer); + obj.public_key_x = serde::Deserializable::deserialize(deserializer); + obj.public_key_y = serde::Deserializable::deserialize(deserializer); + obj.signature = serde::Deserializable::deserialize(deserializer); + obj.result = serde::Deserializable::deserialize(deserializer); return obj; } namespace Circuit { - inline bool operator==(const BlackBoxFuncCall::Sha256Compression &lhs, const BlackBoxFuncCall::Sha256Compression &rhs) { - if (!(lhs.inputs == rhs.inputs)) { return false; } - if (!(lhs.hash_values == rhs.hash_values)) { return false; } - if (!(lhs.outputs == rhs.outputs)) { return false; } + inline bool operator==(const BlackBoxOp::SchnorrVerify &lhs, const BlackBoxOp::SchnorrVerify &rhs) { + if (!(lhs.public_key_x == rhs.public_key_x)) { return false; } + if (!(lhs.public_key_y == rhs.public_key_y)) { return false; } + if (!(lhs.message == rhs.message)) { return false; } + if (!(lhs.signature == rhs.signature)) { return false; } + if (!(lhs.result == rhs.result)) { return false; } return true; } - inline std::vector BlackBoxFuncCall::Sha256Compression::bincodeSerialize() const { + inline std::vector BlackBoxOp::SchnorrVerify::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BlackBoxFuncCall::Sha256Compression BlackBoxFuncCall::Sha256Compression::bincodeDeserialize(std::vector input) { + inline BlackBoxOp::SchnorrVerify BlackBoxOp::SchnorrVerify::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -2896,38 +3371,44 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BlackBoxFuncCall::Sha256Compression &obj, Serializer &serializer) { - serde::Serializable::serialize(obj.inputs, serializer); - serde::Serializable::serialize(obj.hash_values, serializer); - serde::Serializable::serialize(obj.outputs, serializer); +void serde::Serializable::serialize(const Circuit::BlackBoxOp::SchnorrVerify &obj, Serializer &serializer) { + serde::Serializable::serialize(obj.public_key_x, serializer); + serde::Serializable::serialize(obj.public_key_y, serializer); + serde::Serializable::serialize(obj.message, serializer); + serde::Serializable::serialize(obj.signature, serializer); + serde::Serializable::serialize(obj.result, serializer); } template <> template -Circuit::BlackBoxFuncCall::Sha256Compression serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BlackBoxFuncCall::Sha256Compression obj; - obj.inputs = serde::Deserializable::deserialize(deserializer); - obj.hash_values = serde::Deserializable::deserialize(deserializer); - obj.outputs = serde::Deserializable::deserialize(deserializer); +Circuit::BlackBoxOp::SchnorrVerify serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BlackBoxOp::SchnorrVerify obj; + obj.public_key_x = serde::Deserializable::deserialize(deserializer); + obj.public_key_y = serde::Deserializable::deserialize(deserializer); + obj.message = serde::Deserializable::deserialize(deserializer); + obj.signature = serde::Deserializable::deserialize(deserializer); + obj.result = serde::Deserializable::deserialize(deserializer); return obj; } namespace Circuit { - inline bool operator==(const BlackBoxOp &lhs, const BlackBoxOp &rhs) { - if (!(lhs.value == rhs.value)) { return false; } + inline bool operator==(const BlackBoxOp::PedersenCommitment &lhs, const BlackBoxOp::PedersenCommitment &rhs) { + if (!(lhs.inputs == rhs.inputs)) { return false; } + if (!(lhs.domain_separator == rhs.domain_separator)) { return false; } + if (!(lhs.output == rhs.output)) { return false; } return true; } - inline std::vector BlackBoxOp::bincodeSerialize() const { + inline std::vector BlackBoxOp::PedersenCommitment::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BlackBoxOp BlackBoxOp::bincodeDeserialize(std::vector input) { + inline BlackBoxOp::PedersenCommitment BlackBoxOp::PedersenCommitment::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -2938,39 +3419,40 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BlackBoxOp &obj, Serializer &serializer) { - serializer.increase_container_depth(); - serde::Serializable::serialize(obj.value, serializer); - serializer.decrease_container_depth(); +void serde::Serializable::serialize(const Circuit::BlackBoxOp::PedersenCommitment &obj, Serializer &serializer) { + serde::Serializable::serialize(obj.inputs, serializer); + serde::Serializable::serialize(obj.domain_separator, serializer); + serde::Serializable::serialize(obj.output, serializer); } template <> template -Circuit::BlackBoxOp serde::Deserializable::deserialize(Deserializer &deserializer) { - deserializer.increase_container_depth(); - Circuit::BlackBoxOp obj; - obj.value = serde::Deserializable::deserialize(deserializer); - deserializer.decrease_container_depth(); +Circuit::BlackBoxOp::PedersenCommitment serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BlackBoxOp::PedersenCommitment obj; + obj.inputs = serde::Deserializable::deserialize(deserializer); + obj.domain_separator = serde::Deserializable::deserialize(deserializer); + obj.output = serde::Deserializable::deserialize(deserializer); return obj; } namespace Circuit { - inline bool operator==(const BlackBoxOp::Sha256 &lhs, const BlackBoxOp::Sha256 &rhs) { - if (!(lhs.message == rhs.message)) { return false; } + inline bool operator==(const BlackBoxOp::PedersenHash &lhs, const BlackBoxOp::PedersenHash &rhs) { + if (!(lhs.inputs == rhs.inputs)) { return false; } + if (!(lhs.domain_separator == rhs.domain_separator)) { return false; } if (!(lhs.output == rhs.output)) { return false; } return true; } - inline std::vector BlackBoxOp::Sha256::bincodeSerialize() const { + inline std::vector BlackBoxOp::PedersenHash::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BlackBoxOp::Sha256 BlackBoxOp::Sha256::bincodeDeserialize(std::vector input) { + inline BlackBoxOp::PedersenHash BlackBoxOp::PedersenHash::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -2981,37 +3463,40 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BlackBoxOp::Sha256 &obj, Serializer &serializer) { - serde::Serializable::serialize(obj.message, serializer); +void serde::Serializable::serialize(const Circuit::BlackBoxOp::PedersenHash &obj, Serializer &serializer) { + serde::Serializable::serialize(obj.inputs, serializer); + serde::Serializable::serialize(obj.domain_separator, serializer); serde::Serializable::serialize(obj.output, serializer); } template <> template -Circuit::BlackBoxOp::Sha256 serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BlackBoxOp::Sha256 obj; - obj.message = serde::Deserializable::deserialize(deserializer); +Circuit::BlackBoxOp::PedersenHash serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BlackBoxOp::PedersenHash obj; + obj.inputs = serde::Deserializable::deserialize(deserializer); + obj.domain_separator = serde::Deserializable::deserialize(deserializer); obj.output = serde::Deserializable::deserialize(deserializer); return obj; } namespace Circuit { - inline bool operator==(const BlackBoxOp::Blake2s &lhs, const BlackBoxOp::Blake2s &rhs) { - if (!(lhs.message == rhs.message)) { return false; } - if (!(lhs.output == rhs.output)) { return false; } + inline bool operator==(const BlackBoxOp::FixedBaseScalarMul &lhs, const BlackBoxOp::FixedBaseScalarMul &rhs) { + if (!(lhs.low == rhs.low)) { return false; } + if (!(lhs.high == rhs.high)) { return false; } + if (!(lhs.result == rhs.result)) { return false; } return true; } - inline std::vector BlackBoxOp::Blake2s::bincodeSerialize() const { + inline std::vector BlackBoxOp::FixedBaseScalarMul::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BlackBoxOp::Blake2s BlackBoxOp::Blake2s::bincodeDeserialize(std::vector input) { + inline BlackBoxOp::FixedBaseScalarMul BlackBoxOp::FixedBaseScalarMul::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -3022,37 +3507,42 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BlackBoxOp::Blake2s &obj, Serializer &serializer) { - serde::Serializable::serialize(obj.message, serializer); - serde::Serializable::serialize(obj.output, serializer); +void serde::Serializable::serialize(const Circuit::BlackBoxOp::FixedBaseScalarMul &obj, Serializer &serializer) { + serde::Serializable::serialize(obj.low, serializer); + serde::Serializable::serialize(obj.high, serializer); + serde::Serializable::serialize(obj.result, serializer); } template <> template -Circuit::BlackBoxOp::Blake2s serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BlackBoxOp::Blake2s obj; - obj.message = serde::Deserializable::deserialize(deserializer); - obj.output = serde::Deserializable::deserialize(deserializer); +Circuit::BlackBoxOp::FixedBaseScalarMul serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BlackBoxOp::FixedBaseScalarMul obj; + obj.low = serde::Deserializable::deserialize(deserializer); + obj.high = serde::Deserializable::deserialize(deserializer); + obj.result = serde::Deserializable::deserialize(deserializer); return obj; } namespace Circuit { - inline bool operator==(const BlackBoxOp::Blake3 &lhs, const BlackBoxOp::Blake3 &rhs) { - if (!(lhs.message == rhs.message)) { return false; } - if (!(lhs.output == rhs.output)) { return false; } + inline bool operator==(const BlackBoxOp::EmbeddedCurveAdd &lhs, const BlackBoxOp::EmbeddedCurveAdd &rhs) { + if (!(lhs.input1_x == rhs.input1_x)) { return false; } + if (!(lhs.input1_y == rhs.input1_y)) { return false; } + if (!(lhs.input2_x == rhs.input2_x)) { return false; } + if (!(lhs.input2_y == rhs.input2_y)) { return false; } + if (!(lhs.result == rhs.result)) { return false; } return true; } - inline std::vector BlackBoxOp::Blake3::bincodeSerialize() const { + inline std::vector BlackBoxOp::EmbeddedCurveAdd::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BlackBoxOp::Blake3 BlackBoxOp::Blake3::bincodeDeserialize(std::vector input) { + inline BlackBoxOp::EmbeddedCurveAdd BlackBoxOp::EmbeddedCurveAdd::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -3063,37 +3553,44 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BlackBoxOp::Blake3 &obj, Serializer &serializer) { - serde::Serializable::serialize(obj.message, serializer); - serde::Serializable::serialize(obj.output, serializer); +void serde::Serializable::serialize(const Circuit::BlackBoxOp::EmbeddedCurveAdd &obj, Serializer &serializer) { + serde::Serializable::serialize(obj.input1_x, serializer); + serde::Serializable::serialize(obj.input1_y, serializer); + serde::Serializable::serialize(obj.input2_x, serializer); + serde::Serializable::serialize(obj.input2_y, serializer); + serde::Serializable::serialize(obj.result, serializer); } template <> template -Circuit::BlackBoxOp::Blake3 serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BlackBoxOp::Blake3 obj; - obj.message = serde::Deserializable::deserialize(deserializer); - obj.output = serde::Deserializable::deserialize(deserializer); +Circuit::BlackBoxOp::EmbeddedCurveAdd serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BlackBoxOp::EmbeddedCurveAdd obj; + obj.input1_x = serde::Deserializable::deserialize(deserializer); + obj.input1_y = serde::Deserializable::deserialize(deserializer); + obj.input2_x = serde::Deserializable::deserialize(deserializer); + obj.input2_y = serde::Deserializable::deserialize(deserializer); + obj.result = serde::Deserializable::deserialize(deserializer); return obj; } namespace Circuit { - inline bool operator==(const BlackBoxOp::Keccak256 &lhs, const BlackBoxOp::Keccak256 &rhs) { - if (!(lhs.message == rhs.message)) { return false; } + inline bool operator==(const BlackBoxOp::BigIntAdd &lhs, const BlackBoxOp::BigIntAdd &rhs) { + if (!(lhs.lhs == rhs.lhs)) { return false; } + if (!(lhs.rhs == rhs.rhs)) { return false; } if (!(lhs.output == rhs.output)) { return false; } return true; } - inline std::vector BlackBoxOp::Keccak256::bincodeSerialize() const { + inline std::vector BlackBoxOp::BigIntAdd::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BlackBoxOp::Keccak256 BlackBoxOp::Keccak256::bincodeDeserialize(std::vector input) { + inline BlackBoxOp::BigIntAdd BlackBoxOp::BigIntAdd::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -3104,37 +3601,40 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BlackBoxOp::Keccak256 &obj, Serializer &serializer) { - serde::Serializable::serialize(obj.message, serializer); +void serde::Serializable::serialize(const Circuit::BlackBoxOp::BigIntAdd &obj, Serializer &serializer) { + serde::Serializable::serialize(obj.lhs, serializer); + serde::Serializable::serialize(obj.rhs, serializer); serde::Serializable::serialize(obj.output, serializer); } template <> template -Circuit::BlackBoxOp::Keccak256 serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BlackBoxOp::Keccak256 obj; - obj.message = serde::Deserializable::deserialize(deserializer); +Circuit::BlackBoxOp::BigIntAdd serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BlackBoxOp::BigIntAdd obj; + obj.lhs = serde::Deserializable::deserialize(deserializer); + obj.rhs = serde::Deserializable::deserialize(deserializer); obj.output = serde::Deserializable::deserialize(deserializer); return obj; } namespace Circuit { - inline bool operator==(const BlackBoxOp::Keccakf1600 &lhs, const BlackBoxOp::Keccakf1600 &rhs) { - if (!(lhs.message == rhs.message)) { return false; } + inline bool operator==(const BlackBoxOp::BigIntSub &lhs, const BlackBoxOp::BigIntSub &rhs) { + if (!(lhs.lhs == rhs.lhs)) { return false; } + if (!(lhs.rhs == rhs.rhs)) { return false; } if (!(lhs.output == rhs.output)) { return false; } return true; } - inline std::vector BlackBoxOp::Keccakf1600::bincodeSerialize() const { + inline std::vector BlackBoxOp::BigIntSub::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BlackBoxOp::Keccakf1600 BlackBoxOp::Keccakf1600::bincodeDeserialize(std::vector input) { + inline BlackBoxOp::BigIntSub BlackBoxOp::BigIntSub::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -3145,40 +3645,40 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BlackBoxOp::Keccakf1600 &obj, Serializer &serializer) { - serde::Serializable::serialize(obj.message, serializer); +void serde::Serializable::serialize(const Circuit::BlackBoxOp::BigIntSub &obj, Serializer &serializer) { + serde::Serializable::serialize(obj.lhs, serializer); + serde::Serializable::serialize(obj.rhs, serializer); serde::Serializable::serialize(obj.output, serializer); } template <> template -Circuit::BlackBoxOp::Keccakf1600 serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BlackBoxOp::Keccakf1600 obj; - obj.message = serde::Deserializable::deserialize(deserializer); +Circuit::BlackBoxOp::BigIntSub serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BlackBoxOp::BigIntSub obj; + obj.lhs = serde::Deserializable::deserialize(deserializer); + obj.rhs = serde::Deserializable::deserialize(deserializer); obj.output = serde::Deserializable::deserialize(deserializer); return obj; } namespace Circuit { - inline bool operator==(const BlackBoxOp::EcdsaSecp256k1 &lhs, const BlackBoxOp::EcdsaSecp256k1 &rhs) { - if (!(lhs.hashed_msg == rhs.hashed_msg)) { return false; } - if (!(lhs.public_key_x == rhs.public_key_x)) { return false; } - if (!(lhs.public_key_y == rhs.public_key_y)) { return false; } - if (!(lhs.signature == rhs.signature)) { return false; } - if (!(lhs.result == rhs.result)) { return false; } + inline bool operator==(const BlackBoxOp::BigIntMul &lhs, const BlackBoxOp::BigIntMul &rhs) { + if (!(lhs.lhs == rhs.lhs)) { return false; } + if (!(lhs.rhs == rhs.rhs)) { return false; } + if (!(lhs.output == rhs.output)) { return false; } return true; } - inline std::vector BlackBoxOp::EcdsaSecp256k1::bincodeSerialize() const { + inline std::vector BlackBoxOp::BigIntMul::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BlackBoxOp::EcdsaSecp256k1 BlackBoxOp::EcdsaSecp256k1::bincodeDeserialize(std::vector input) { + inline BlackBoxOp::BigIntMul BlackBoxOp::BigIntMul::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -3189,46 +3689,40 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BlackBoxOp::EcdsaSecp256k1 &obj, Serializer &serializer) { - serde::Serializable::serialize(obj.hashed_msg, serializer); - serde::Serializable::serialize(obj.public_key_x, serializer); - serde::Serializable::serialize(obj.public_key_y, serializer); - serde::Serializable::serialize(obj.signature, serializer); - serde::Serializable::serialize(obj.result, serializer); +void serde::Serializable::serialize(const Circuit::BlackBoxOp::BigIntMul &obj, Serializer &serializer) { + serde::Serializable::serialize(obj.lhs, serializer); + serde::Serializable::serialize(obj.rhs, serializer); + serde::Serializable::serialize(obj.output, serializer); } template <> template -Circuit::BlackBoxOp::EcdsaSecp256k1 serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BlackBoxOp::EcdsaSecp256k1 obj; - obj.hashed_msg = serde::Deserializable::deserialize(deserializer); - obj.public_key_x = serde::Deserializable::deserialize(deserializer); - obj.public_key_y = serde::Deserializable::deserialize(deserializer); - obj.signature = serde::Deserializable::deserialize(deserializer); - obj.result = serde::Deserializable::deserialize(deserializer); +Circuit::BlackBoxOp::BigIntMul serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BlackBoxOp::BigIntMul obj; + obj.lhs = serde::Deserializable::deserialize(deserializer); + obj.rhs = serde::Deserializable::deserialize(deserializer); + obj.output = serde::Deserializable::deserialize(deserializer); return obj; } namespace Circuit { - inline bool operator==(const BlackBoxOp::EcdsaSecp256r1 &lhs, const BlackBoxOp::EcdsaSecp256r1 &rhs) { - if (!(lhs.hashed_msg == rhs.hashed_msg)) { return false; } - if (!(lhs.public_key_x == rhs.public_key_x)) { return false; } - if (!(lhs.public_key_y == rhs.public_key_y)) { return false; } - if (!(lhs.signature == rhs.signature)) { return false; } - if (!(lhs.result == rhs.result)) { return false; } + inline bool operator==(const BlackBoxOp::BigIntDiv &lhs, const BlackBoxOp::BigIntDiv &rhs) { + if (!(lhs.lhs == rhs.lhs)) { return false; } + if (!(lhs.rhs == rhs.rhs)) { return false; } + if (!(lhs.output == rhs.output)) { return false; } return true; } - inline std::vector BlackBoxOp::EcdsaSecp256r1::bincodeSerialize() const { + inline std::vector BlackBoxOp::BigIntDiv::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BlackBoxOp::EcdsaSecp256r1 BlackBoxOp::EcdsaSecp256r1::bincodeDeserialize(std::vector input) { + inline BlackBoxOp::BigIntDiv BlackBoxOp::BigIntDiv::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -3239,46 +3733,40 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BlackBoxOp::EcdsaSecp256r1 &obj, Serializer &serializer) { - serde::Serializable::serialize(obj.hashed_msg, serializer); - serde::Serializable::serialize(obj.public_key_x, serializer); - serde::Serializable::serialize(obj.public_key_y, serializer); - serde::Serializable::serialize(obj.signature, serializer); - serde::Serializable::serialize(obj.result, serializer); +void serde::Serializable::serialize(const Circuit::BlackBoxOp::BigIntDiv &obj, Serializer &serializer) { + serde::Serializable::serialize(obj.lhs, serializer); + serde::Serializable::serialize(obj.rhs, serializer); + serde::Serializable::serialize(obj.output, serializer); } template <> template -Circuit::BlackBoxOp::EcdsaSecp256r1 serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BlackBoxOp::EcdsaSecp256r1 obj; - obj.hashed_msg = serde::Deserializable::deserialize(deserializer); - obj.public_key_x = serde::Deserializable::deserialize(deserializer); - obj.public_key_y = serde::Deserializable::deserialize(deserializer); - obj.signature = serde::Deserializable::deserialize(deserializer); - obj.result = serde::Deserializable::deserialize(deserializer); +Circuit::BlackBoxOp::BigIntDiv serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BlackBoxOp::BigIntDiv obj; + obj.lhs = serde::Deserializable::deserialize(deserializer); + obj.rhs = serde::Deserializable::deserialize(deserializer); + obj.output = serde::Deserializable::deserialize(deserializer); return obj; } namespace Circuit { - inline bool operator==(const BlackBoxOp::SchnorrVerify &lhs, const BlackBoxOp::SchnorrVerify &rhs) { - if (!(lhs.public_key_x == rhs.public_key_x)) { return false; } - if (!(lhs.public_key_y == rhs.public_key_y)) { return false; } - if (!(lhs.message == rhs.message)) { return false; } - if (!(lhs.signature == rhs.signature)) { return false; } - if (!(lhs.result == rhs.result)) { return false; } + inline bool operator==(const BlackBoxOp::BigIntFromLeBytes &lhs, const BlackBoxOp::BigIntFromLeBytes &rhs) { + if (!(lhs.inputs == rhs.inputs)) { return false; } + if (!(lhs.modulus == rhs.modulus)) { return false; } + if (!(lhs.output == rhs.output)) { return false; } return true; } - inline std::vector BlackBoxOp::SchnorrVerify::bincodeSerialize() const { + inline std::vector BlackBoxOp::BigIntFromLeBytes::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BlackBoxOp::SchnorrVerify BlackBoxOp::SchnorrVerify::bincodeDeserialize(std::vector input) { + inline BlackBoxOp::BigIntFromLeBytes BlackBoxOp::BigIntFromLeBytes::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -3289,44 +3777,39 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BlackBoxOp::SchnorrVerify &obj, Serializer &serializer) { - serde::Serializable::serialize(obj.public_key_x, serializer); - serde::Serializable::serialize(obj.public_key_y, serializer); - serde::Serializable::serialize(obj.message, serializer); - serde::Serializable::serialize(obj.signature, serializer); - serde::Serializable::serialize(obj.result, serializer); +void serde::Serializable::serialize(const Circuit::BlackBoxOp::BigIntFromLeBytes &obj, Serializer &serializer) { + serde::Serializable::serialize(obj.inputs, serializer); + serde::Serializable::serialize(obj.modulus, serializer); + serde::Serializable::serialize(obj.output, serializer); } template <> template -Circuit::BlackBoxOp::SchnorrVerify serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BlackBoxOp::SchnorrVerify obj; - obj.public_key_x = serde::Deserializable::deserialize(deserializer); - obj.public_key_y = serde::Deserializable::deserialize(deserializer); - obj.message = serde::Deserializable::deserialize(deserializer); - obj.signature = serde::Deserializable::deserialize(deserializer); - obj.result = serde::Deserializable::deserialize(deserializer); +Circuit::BlackBoxOp::BigIntFromLeBytes serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BlackBoxOp::BigIntFromLeBytes obj; + obj.inputs = serde::Deserializable::deserialize(deserializer); + obj.modulus = serde::Deserializable::deserialize(deserializer); + obj.output = serde::Deserializable::deserialize(deserializer); return obj; } namespace Circuit { - inline bool operator==(const BlackBoxOp::PedersenCommitment &lhs, const BlackBoxOp::PedersenCommitment &rhs) { - if (!(lhs.inputs == rhs.inputs)) { return false; } - if (!(lhs.domain_separator == rhs.domain_separator)) { return false; } + inline bool operator==(const BlackBoxOp::BigIntToLeBytes &lhs, const BlackBoxOp::BigIntToLeBytes &rhs) { + if (!(lhs.input == rhs.input)) { return false; } if (!(lhs.output == rhs.output)) { return false; } return true; } - inline std::vector BlackBoxOp::PedersenCommitment::bincodeSerialize() const { + inline std::vector BlackBoxOp::BigIntToLeBytes::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BlackBoxOp::PedersenCommitment BlackBoxOp::PedersenCommitment::bincodeDeserialize(std::vector input) { + inline BlackBoxOp::BigIntToLeBytes BlackBoxOp::BigIntToLeBytes::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -3337,40 +3820,38 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BlackBoxOp::PedersenCommitment &obj, Serializer &serializer) { - serde::Serializable::serialize(obj.inputs, serializer); - serde::Serializable::serialize(obj.domain_separator, serializer); +void serde::Serializable::serialize(const Circuit::BlackBoxOp::BigIntToLeBytes &obj, Serializer &serializer) { + serde::Serializable::serialize(obj.input, serializer); serde::Serializable::serialize(obj.output, serializer); } template <> template -Circuit::BlackBoxOp::PedersenCommitment serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BlackBoxOp::PedersenCommitment obj; - obj.inputs = serde::Deserializable::deserialize(deserializer); - obj.domain_separator = serde::Deserializable::deserialize(deserializer); +Circuit::BlackBoxOp::BigIntToLeBytes serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BlackBoxOp::BigIntToLeBytes obj; + obj.input = serde::Deserializable::deserialize(deserializer); obj.output = serde::Deserializable::deserialize(deserializer); return obj; } namespace Circuit { - inline bool operator==(const BlackBoxOp::PedersenHash &lhs, const BlackBoxOp::PedersenHash &rhs) { - if (!(lhs.inputs == rhs.inputs)) { return false; } - if (!(lhs.domain_separator == rhs.domain_separator)) { return false; } + inline bool operator==(const BlackBoxOp::Poseidon2Permutation &lhs, const BlackBoxOp::Poseidon2Permutation &rhs) { + if (!(lhs.message == rhs.message)) { return false; } if (!(lhs.output == rhs.output)) { return false; } + if (!(lhs.len == rhs.len)) { return false; } return true; } - inline std::vector BlackBoxOp::PedersenHash::bincodeSerialize() const { + inline std::vector BlackBoxOp::Poseidon2Permutation::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BlackBoxOp::PedersenHash BlackBoxOp::PedersenHash::bincodeDeserialize(std::vector input) { + inline BlackBoxOp::Poseidon2Permutation BlackBoxOp::Poseidon2Permutation::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -3381,40 +3862,40 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BlackBoxOp::PedersenHash &obj, Serializer &serializer) { - serde::Serializable::serialize(obj.inputs, serializer); - serde::Serializable::serialize(obj.domain_separator, serializer); +void serde::Serializable::serialize(const Circuit::BlackBoxOp::Poseidon2Permutation &obj, Serializer &serializer) { + serde::Serializable::serialize(obj.message, serializer); serde::Serializable::serialize(obj.output, serializer); + serde::Serializable::serialize(obj.len, serializer); } template <> template -Circuit::BlackBoxOp::PedersenHash serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BlackBoxOp::PedersenHash obj; - obj.inputs = serde::Deserializable::deserialize(deserializer); - obj.domain_separator = serde::Deserializable::deserialize(deserializer); +Circuit::BlackBoxOp::Poseidon2Permutation serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BlackBoxOp::Poseidon2Permutation obj; + obj.message = serde::Deserializable::deserialize(deserializer); obj.output = serde::Deserializable::deserialize(deserializer); + obj.len = serde::Deserializable::deserialize(deserializer); return obj; } namespace Circuit { - inline bool operator==(const BlackBoxOp::FixedBaseScalarMul &lhs, const BlackBoxOp::FixedBaseScalarMul &rhs) { - if (!(lhs.low == rhs.low)) { return false; } - if (!(lhs.high == rhs.high)) { return false; } - if (!(lhs.result == rhs.result)) { return false; } + inline bool operator==(const BlackBoxOp::Sha256Compression &lhs, const BlackBoxOp::Sha256Compression &rhs) { + if (!(lhs.input == rhs.input)) { return false; } + if (!(lhs.hash_values == rhs.hash_values)) { return false; } + if (!(lhs.output == rhs.output)) { return false; } return true; } - inline std::vector BlackBoxOp::FixedBaseScalarMul::bincodeSerialize() const { + inline std::vector BlackBoxOp::Sha256Compression::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BlackBoxOp::FixedBaseScalarMul BlackBoxOp::FixedBaseScalarMul::bincodeDeserialize(std::vector input) { + inline BlackBoxOp::Sha256Compression BlackBoxOp::Sha256Compression::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -3425,42 +3906,38 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BlackBoxOp::FixedBaseScalarMul &obj, Serializer &serializer) { - serde::Serializable::serialize(obj.low, serializer); - serde::Serializable::serialize(obj.high, serializer); - serde::Serializable::serialize(obj.result, serializer); +void serde::Serializable::serialize(const Circuit::BlackBoxOp::Sha256Compression &obj, Serializer &serializer) { + serde::Serializable::serialize(obj.input, serializer); + serde::Serializable::serialize(obj.hash_values, serializer); + serde::Serializable::serialize(obj.output, serializer); } template <> template -Circuit::BlackBoxOp::FixedBaseScalarMul serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BlackBoxOp::FixedBaseScalarMul obj; - obj.low = serde::Deserializable::deserialize(deserializer); - obj.high = serde::Deserializable::deserialize(deserializer); - obj.result = serde::Deserializable::deserialize(deserializer); +Circuit::BlackBoxOp::Sha256Compression serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BlackBoxOp::Sha256Compression obj; + obj.input = serde::Deserializable::deserialize(deserializer); + obj.hash_values = serde::Deserializable::deserialize(deserializer); + obj.output = serde::Deserializable::deserialize(deserializer); return obj; } namespace Circuit { - inline bool operator==(const BlackBoxOp::EmbeddedCurveAdd &lhs, const BlackBoxOp::EmbeddedCurveAdd &rhs) { - if (!(lhs.input1_x == rhs.input1_x)) { return false; } - if (!(lhs.input1_y == rhs.input1_y)) { return false; } - if (!(lhs.input2_x == rhs.input2_x)) { return false; } - if (!(lhs.input2_y == rhs.input2_y)) { return false; } - if (!(lhs.result == rhs.result)) { return false; } + inline bool operator==(const BlockId &lhs, const BlockId &rhs) { + if (!(lhs.value == rhs.value)) { return false; } return true; } - inline std::vector BlackBoxOp::EmbeddedCurveAdd::bincodeSerialize() const { + inline std::vector BlockId::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BlackBoxOp::EmbeddedCurveAdd BlackBoxOp::EmbeddedCurveAdd::bincodeDeserialize(std::vector input) { + inline BlockId BlockId::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -3471,44 +3948,41 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BlackBoxOp::EmbeddedCurveAdd &obj, Serializer &serializer) { - serde::Serializable::serialize(obj.input1_x, serializer); - serde::Serializable::serialize(obj.input1_y, serializer); - serde::Serializable::serialize(obj.input2_x, serializer); - serde::Serializable::serialize(obj.input2_y, serializer); - serde::Serializable::serialize(obj.result, serializer); +void serde::Serializable::serialize(const Circuit::BlockId &obj, Serializer &serializer) { + serializer.increase_container_depth(); + serde::Serializable::serialize(obj.value, serializer); + serializer.decrease_container_depth(); } template <> template -Circuit::BlackBoxOp::EmbeddedCurveAdd serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BlackBoxOp::EmbeddedCurveAdd obj; - obj.input1_x = serde::Deserializable::deserialize(deserializer); - obj.input1_y = serde::Deserializable::deserialize(deserializer); - obj.input2_x = serde::Deserializable::deserialize(deserializer); - obj.input2_y = serde::Deserializable::deserialize(deserializer); - obj.result = serde::Deserializable::deserialize(deserializer); +Circuit::BlockId serde::Deserializable::deserialize(Deserializer &deserializer) { + deserializer.increase_container_depth(); + Circuit::BlockId obj; + obj.value = serde::Deserializable::deserialize(deserializer); + deserializer.decrease_container_depth(); return obj; } namespace Circuit { - inline bool operator==(const BlackBoxOp::BigIntAdd &lhs, const BlackBoxOp::BigIntAdd &rhs) { - if (!(lhs.lhs == rhs.lhs)) { return false; } - if (!(lhs.rhs == rhs.rhs)) { return false; } - if (!(lhs.output == rhs.output)) { return false; } + inline bool operator==(const Brillig &lhs, const Brillig &rhs) { + if (!(lhs.inputs == rhs.inputs)) { return false; } + if (!(lhs.outputs == rhs.outputs)) { return false; } + if (!(lhs.bytecode == rhs.bytecode)) { return false; } + if (!(lhs.predicate == rhs.predicate)) { return false; } return true; } - inline std::vector BlackBoxOp::BigIntAdd::bincodeSerialize() const { + inline std::vector Brillig::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BlackBoxOp::BigIntAdd BlackBoxOp::BigIntAdd::bincodeDeserialize(std::vector input) { + inline Brillig Brillig::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -3519,40 +3993,44 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BlackBoxOp::BigIntAdd &obj, Serializer &serializer) { - serde::Serializable::serialize(obj.lhs, serializer); - serde::Serializable::serialize(obj.rhs, serializer); - serde::Serializable::serialize(obj.output, serializer); +void serde::Serializable::serialize(const Circuit::Brillig &obj, Serializer &serializer) { + serializer.increase_container_depth(); + serde::Serializable::serialize(obj.inputs, serializer); + serde::Serializable::serialize(obj.outputs, serializer); + serde::Serializable::serialize(obj.bytecode, serializer); + serde::Serializable::serialize(obj.predicate, serializer); + serializer.decrease_container_depth(); } template <> template -Circuit::BlackBoxOp::BigIntAdd serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BlackBoxOp::BigIntAdd obj; - obj.lhs = serde::Deserializable::deserialize(deserializer); - obj.rhs = serde::Deserializable::deserialize(deserializer); - obj.output = serde::Deserializable::deserialize(deserializer); +Circuit::Brillig serde::Deserializable::deserialize(Deserializer &deserializer) { + deserializer.increase_container_depth(); + Circuit::Brillig obj; + obj.inputs = serde::Deserializable::deserialize(deserializer); + obj.outputs = serde::Deserializable::deserialize(deserializer); + obj.bytecode = serde::Deserializable::deserialize(deserializer); + obj.predicate = serde::Deserializable::deserialize(deserializer); + deserializer.decrease_container_depth(); return obj; } namespace Circuit { - inline bool operator==(const BlackBoxOp::BigIntNeg &lhs, const BlackBoxOp::BigIntNeg &rhs) { - if (!(lhs.lhs == rhs.lhs)) { return false; } - if (!(lhs.rhs == rhs.rhs)) { return false; } - if (!(lhs.output == rhs.output)) { return false; } + inline bool operator==(const BrilligInputs &lhs, const BrilligInputs &rhs) { + if (!(lhs.value == rhs.value)) { return false; } return true; } - inline std::vector BlackBoxOp::BigIntNeg::bincodeSerialize() const { + inline std::vector BrilligInputs::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BlackBoxOp::BigIntNeg BlackBoxOp::BigIntNeg::bincodeDeserialize(std::vector input) { + inline BrilligInputs BrilligInputs::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -3563,40 +4041,38 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BlackBoxOp::BigIntNeg &obj, Serializer &serializer) { - serde::Serializable::serialize(obj.lhs, serializer); - serde::Serializable::serialize(obj.rhs, serializer); - serde::Serializable::serialize(obj.output, serializer); +void serde::Serializable::serialize(const Circuit::BrilligInputs &obj, Serializer &serializer) { + serializer.increase_container_depth(); + serde::Serializable::serialize(obj.value, serializer); + serializer.decrease_container_depth(); } template <> template -Circuit::BlackBoxOp::BigIntNeg serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BlackBoxOp::BigIntNeg obj; - obj.lhs = serde::Deserializable::deserialize(deserializer); - obj.rhs = serde::Deserializable::deserialize(deserializer); - obj.output = serde::Deserializable::deserialize(deserializer); +Circuit::BrilligInputs serde::Deserializable::deserialize(Deserializer &deserializer) { + deserializer.increase_container_depth(); + Circuit::BrilligInputs obj; + obj.value = serde::Deserializable::deserialize(deserializer); + deserializer.decrease_container_depth(); return obj; } namespace Circuit { - inline bool operator==(const BlackBoxOp::BigIntMul &lhs, const BlackBoxOp::BigIntMul &rhs) { - if (!(lhs.lhs == rhs.lhs)) { return false; } - if (!(lhs.rhs == rhs.rhs)) { return false; } - if (!(lhs.output == rhs.output)) { return false; } + inline bool operator==(const BrilligInputs::Single &lhs, const BrilligInputs::Single &rhs) { + if (!(lhs.value == rhs.value)) { return false; } return true; } - inline std::vector BlackBoxOp::BigIntMul::bincodeSerialize() const { + inline std::vector BrilligInputs::Single::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BlackBoxOp::BigIntMul BlackBoxOp::BigIntMul::bincodeDeserialize(std::vector input) { + inline BrilligInputs::Single BrilligInputs::Single::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -3604,43 +4080,37 @@ namespace Circuit { } } // end of namespace Circuit - -template <> -template -void serde::Serializable::serialize(const Circuit::BlackBoxOp::BigIntMul &obj, Serializer &serializer) { - serde::Serializable::serialize(obj.lhs, serializer); - serde::Serializable::serialize(obj.rhs, serializer); - serde::Serializable::serialize(obj.output, serializer); + +template <> +template +void serde::Serializable::serialize(const Circuit::BrilligInputs::Single &obj, Serializer &serializer) { + serde::Serializable::serialize(obj.value, serializer); } template <> template -Circuit::BlackBoxOp::BigIntMul serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BlackBoxOp::BigIntMul obj; - obj.lhs = serde::Deserializable::deserialize(deserializer); - obj.rhs = serde::Deserializable::deserialize(deserializer); - obj.output = serde::Deserializable::deserialize(deserializer); +Circuit::BrilligInputs::Single serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BrilligInputs::Single obj; + obj.value = serde::Deserializable::deserialize(deserializer); return obj; } namespace Circuit { - inline bool operator==(const BlackBoxOp::BigIntDiv &lhs, const BlackBoxOp::BigIntDiv &rhs) { - if (!(lhs.lhs == rhs.lhs)) { return false; } - if (!(lhs.rhs == rhs.rhs)) { return false; } - if (!(lhs.output == rhs.output)) { return false; } + inline bool operator==(const BrilligInputs::Array &lhs, const BrilligInputs::Array &rhs) { + if (!(lhs.value == rhs.value)) { return false; } return true; } - inline std::vector BlackBoxOp::BigIntDiv::bincodeSerialize() const { + inline std::vector BrilligInputs::Array::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BlackBoxOp::BigIntDiv BlackBoxOp::BigIntDiv::bincodeDeserialize(std::vector input) { + inline BrilligInputs::Array BrilligInputs::Array::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -3651,40 +4121,34 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BlackBoxOp::BigIntDiv &obj, Serializer &serializer) { - serde::Serializable::serialize(obj.lhs, serializer); - serde::Serializable::serialize(obj.rhs, serializer); - serde::Serializable::serialize(obj.output, serializer); +void serde::Serializable::serialize(const Circuit::BrilligInputs::Array &obj, Serializer &serializer) { + serde::Serializable::serialize(obj.value, serializer); } template <> template -Circuit::BlackBoxOp::BigIntDiv serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BlackBoxOp::BigIntDiv obj; - obj.lhs = serde::Deserializable::deserialize(deserializer); - obj.rhs = serde::Deserializable::deserialize(deserializer); - obj.output = serde::Deserializable::deserialize(deserializer); +Circuit::BrilligInputs::Array serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BrilligInputs::Array obj; + obj.value = serde::Deserializable::deserialize(deserializer); return obj; } namespace Circuit { - inline bool operator==(const BlackBoxOp::BigIntFromLeBytes &lhs, const BlackBoxOp::BigIntFromLeBytes &rhs) { - if (!(lhs.inputs == rhs.inputs)) { return false; } - if (!(lhs.modulus == rhs.modulus)) { return false; } - if (!(lhs.output == rhs.output)) { return false; } + inline bool operator==(const BrilligInputs::MemoryArray &lhs, const BrilligInputs::MemoryArray &rhs) { + if (!(lhs.value == rhs.value)) { return false; } return true; } - inline std::vector BlackBoxOp::BigIntFromLeBytes::bincodeSerialize() const { + inline std::vector BrilligInputs::MemoryArray::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BlackBoxOp::BigIntFromLeBytes BlackBoxOp::BigIntFromLeBytes::bincodeDeserialize(std::vector input) { + inline BrilligInputs::MemoryArray BrilligInputs::MemoryArray::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -3695,39 +4159,34 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BlackBoxOp::BigIntFromLeBytes &obj, Serializer &serializer) { - serde::Serializable::serialize(obj.inputs, serializer); - serde::Serializable::serialize(obj.modulus, serializer); - serde::Serializable::serialize(obj.output, serializer); +void serde::Serializable::serialize(const Circuit::BrilligInputs::MemoryArray &obj, Serializer &serializer) { + serde::Serializable::serialize(obj.value, serializer); } template <> template -Circuit::BlackBoxOp::BigIntFromLeBytes serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BlackBoxOp::BigIntFromLeBytes obj; - obj.inputs = serde::Deserializable::deserialize(deserializer); - obj.modulus = serde::Deserializable::deserialize(deserializer); - obj.output = serde::Deserializable::deserialize(deserializer); +Circuit::BrilligInputs::MemoryArray serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BrilligInputs::MemoryArray obj; + obj.value = serde::Deserializable::deserialize(deserializer); return obj; } namespace Circuit { - inline bool operator==(const BlackBoxOp::BigIntToLeBytes &lhs, const BlackBoxOp::BigIntToLeBytes &rhs) { - if (!(lhs.input == rhs.input)) { return false; } - if (!(lhs.output == rhs.output)) { return false; } + inline bool operator==(const BrilligOpcode &lhs, const BrilligOpcode &rhs) { + if (!(lhs.value == rhs.value)) { return false; } return true; } - inline std::vector BlackBoxOp::BigIntToLeBytes::bincodeSerialize() const { + inline std::vector BrilligOpcode::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BlackBoxOp::BigIntToLeBytes BlackBoxOp::BigIntToLeBytes::bincodeDeserialize(std::vector input) { + inline BrilligOpcode BrilligOpcode::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -3738,38 +4197,41 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BlackBoxOp::BigIntToLeBytes &obj, Serializer &serializer) { - serde::Serializable::serialize(obj.input, serializer); - serde::Serializable::serialize(obj.output, serializer); +void serde::Serializable::serialize(const Circuit::BrilligOpcode &obj, Serializer &serializer) { + serializer.increase_container_depth(); + serde::Serializable::serialize(obj.value, serializer); + serializer.decrease_container_depth(); } template <> template -Circuit::BlackBoxOp::BigIntToLeBytes serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BlackBoxOp::BigIntToLeBytes obj; - obj.input = serde::Deserializable::deserialize(deserializer); - obj.output = serde::Deserializable::deserialize(deserializer); +Circuit::BrilligOpcode serde::Deserializable::deserialize(Deserializer &deserializer) { + deserializer.increase_container_depth(); + Circuit::BrilligOpcode obj; + obj.value = serde::Deserializable::deserialize(deserializer); + deserializer.decrease_container_depth(); return obj; } namespace Circuit { - inline bool operator==(const BlackBoxOp::Poseidon2Permutation &lhs, const BlackBoxOp::Poseidon2Permutation &rhs) { - if (!(lhs.message == rhs.message)) { return false; } - if (!(lhs.output == rhs.output)) { return false; } - if (!(lhs.len == rhs.len)) { return false; } + inline bool operator==(const BrilligOpcode::BinaryFieldOp &lhs, const BrilligOpcode::BinaryFieldOp &rhs) { + if (!(lhs.destination == rhs.destination)) { return false; } + if (!(lhs.op == rhs.op)) { return false; } + if (!(lhs.lhs == rhs.lhs)) { return false; } + if (!(lhs.rhs == rhs.rhs)) { return false; } return true; } - inline std::vector BlackBoxOp::Poseidon2Permutation::bincodeSerialize() const { + inline std::vector BrilligOpcode::BinaryFieldOp::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BlackBoxOp::Poseidon2Permutation BlackBoxOp::Poseidon2Permutation::bincodeDeserialize(std::vector input) { + inline BrilligOpcode::BinaryFieldOp BrilligOpcode::BinaryFieldOp::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -3780,40 +4242,44 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BlackBoxOp::Poseidon2Permutation &obj, Serializer &serializer) { - serde::Serializable::serialize(obj.message, serializer); - serde::Serializable::serialize(obj.output, serializer); - serde::Serializable::serialize(obj.len, serializer); +void serde::Serializable::serialize(const Circuit::BrilligOpcode::BinaryFieldOp &obj, Serializer &serializer) { + serde::Serializable::serialize(obj.destination, serializer); + serde::Serializable::serialize(obj.op, serializer); + serde::Serializable::serialize(obj.lhs, serializer); + serde::Serializable::serialize(obj.rhs, serializer); } template <> template -Circuit::BlackBoxOp::Poseidon2Permutation serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BlackBoxOp::Poseidon2Permutation obj; - obj.message = serde::Deserializable::deserialize(deserializer); - obj.output = serde::Deserializable::deserialize(deserializer); - obj.len = serde::Deserializable::deserialize(deserializer); +Circuit::BrilligOpcode::BinaryFieldOp serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BrilligOpcode::BinaryFieldOp obj; + obj.destination = serde::Deserializable::deserialize(deserializer); + obj.op = serde::Deserializable::deserialize(deserializer); + obj.lhs = serde::Deserializable::deserialize(deserializer); + obj.rhs = serde::Deserializable::deserialize(deserializer); return obj; } namespace Circuit { - inline bool operator==(const BlackBoxOp::Sha256Compression &lhs, const BlackBoxOp::Sha256Compression &rhs) { - if (!(lhs.input == rhs.input)) { return false; } - if (!(lhs.hash_values == rhs.hash_values)) { return false; } - if (!(lhs.output == rhs.output)) { return false; } + inline bool operator==(const BrilligOpcode::BinaryIntOp &lhs, const BrilligOpcode::BinaryIntOp &rhs) { + if (!(lhs.destination == rhs.destination)) { return false; } + if (!(lhs.op == rhs.op)) { return false; } + if (!(lhs.bit_size == rhs.bit_size)) { return false; } + if (!(lhs.lhs == rhs.lhs)) { return false; } + if (!(lhs.rhs == rhs.rhs)) { return false; } return true; } - inline std::vector BlackBoxOp::Sha256Compression::bincodeSerialize() const { + inline std::vector BrilligOpcode::BinaryIntOp::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BlackBoxOp::Sha256Compression BlackBoxOp::Sha256Compression::bincodeDeserialize(std::vector input) { + inline BrilligOpcode::BinaryIntOp BrilligOpcode::BinaryIntOp::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -3824,38 +4290,44 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BlackBoxOp::Sha256Compression &obj, Serializer &serializer) { - serde::Serializable::serialize(obj.input, serializer); - serde::Serializable::serialize(obj.hash_values, serializer); - serde::Serializable::serialize(obj.output, serializer); +void serde::Serializable::serialize(const Circuit::BrilligOpcode::BinaryIntOp &obj, Serializer &serializer) { + serde::Serializable::serialize(obj.destination, serializer); + serde::Serializable::serialize(obj.op, serializer); + serde::Serializable::serialize(obj.bit_size, serializer); + serde::Serializable::serialize(obj.lhs, serializer); + serde::Serializable::serialize(obj.rhs, serializer); } template <> template -Circuit::BlackBoxOp::Sha256Compression serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BlackBoxOp::Sha256Compression obj; - obj.input = serde::Deserializable::deserialize(deserializer); - obj.hash_values = serde::Deserializable::deserialize(deserializer); - obj.output = serde::Deserializable::deserialize(deserializer); +Circuit::BrilligOpcode::BinaryIntOp serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BrilligOpcode::BinaryIntOp obj; + obj.destination = serde::Deserializable::deserialize(deserializer); + obj.op = serde::Deserializable::deserialize(deserializer); + obj.bit_size = serde::Deserializable::deserialize(deserializer); + obj.lhs = serde::Deserializable::deserialize(deserializer); + obj.rhs = serde::Deserializable::deserialize(deserializer); return obj; } namespace Circuit { - inline bool operator==(const BlockId &lhs, const BlockId &rhs) { - if (!(lhs.value == rhs.value)) { return false; } + inline bool operator==(const BrilligOpcode::Cast &lhs, const BrilligOpcode::Cast &rhs) { + if (!(lhs.destination == rhs.destination)) { return false; } + if (!(lhs.source == rhs.source)) { return false; } + if (!(lhs.bit_size == rhs.bit_size)) { return false; } return true; } - inline std::vector BlockId::bincodeSerialize() const { + inline std::vector BrilligOpcode::Cast::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BlockId BlockId::bincodeDeserialize(std::vector input) { + inline BrilligOpcode::Cast BrilligOpcode::Cast::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -3866,41 +4338,39 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BlockId &obj, Serializer &serializer) { - serializer.increase_container_depth(); - serde::Serializable::serialize(obj.value, serializer); - serializer.decrease_container_depth(); +void serde::Serializable::serialize(const Circuit::BrilligOpcode::Cast &obj, Serializer &serializer) { + serde::Serializable::serialize(obj.destination, serializer); + serde::Serializable::serialize(obj.source, serializer); + serde::Serializable::serialize(obj.bit_size, serializer); } template <> template -Circuit::BlockId serde::Deserializable::deserialize(Deserializer &deserializer) { - deserializer.increase_container_depth(); - Circuit::BlockId obj; - obj.value = serde::Deserializable::deserialize(deserializer); - deserializer.decrease_container_depth(); +Circuit::BrilligOpcode::Cast serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BrilligOpcode::Cast obj; + obj.destination = serde::Deserializable::deserialize(deserializer); + obj.source = serde::Deserializable::deserialize(deserializer); + obj.bit_size = serde::Deserializable::deserialize(deserializer); return obj; } namespace Circuit { - inline bool operator==(const Brillig &lhs, const Brillig &rhs) { - if (!(lhs.inputs == rhs.inputs)) { return false; } - if (!(lhs.outputs == rhs.outputs)) { return false; } - if (!(lhs.bytecode == rhs.bytecode)) { return false; } - if (!(lhs.predicate == rhs.predicate)) { return false; } + inline bool operator==(const BrilligOpcode::JumpIfNot &lhs, const BrilligOpcode::JumpIfNot &rhs) { + if (!(lhs.condition == rhs.condition)) { return false; } + if (!(lhs.location == rhs.location)) { return false; } return true; } - inline std::vector Brillig::bincodeSerialize() const { + inline std::vector BrilligOpcode::JumpIfNot::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline Brillig Brillig::bincodeDeserialize(std::vector input) { + inline BrilligOpcode::JumpIfNot BrilligOpcode::JumpIfNot::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -3911,44 +4381,37 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::Brillig &obj, Serializer &serializer) { - serializer.increase_container_depth(); - serde::Serializable::serialize(obj.inputs, serializer); - serde::Serializable::serialize(obj.outputs, serializer); - serde::Serializable::serialize(obj.bytecode, serializer); - serde::Serializable::serialize(obj.predicate, serializer); - serializer.decrease_container_depth(); +void serde::Serializable::serialize(const Circuit::BrilligOpcode::JumpIfNot &obj, Serializer &serializer) { + serde::Serializable::serialize(obj.condition, serializer); + serde::Serializable::serialize(obj.location, serializer); } template <> template -Circuit::Brillig serde::Deserializable::deserialize(Deserializer &deserializer) { - deserializer.increase_container_depth(); - Circuit::Brillig obj; - obj.inputs = serde::Deserializable::deserialize(deserializer); - obj.outputs = serde::Deserializable::deserialize(deserializer); - obj.bytecode = serde::Deserializable::deserialize(deserializer); - obj.predicate = serde::Deserializable::deserialize(deserializer); - deserializer.decrease_container_depth(); +Circuit::BrilligOpcode::JumpIfNot serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BrilligOpcode::JumpIfNot obj; + obj.condition = serde::Deserializable::deserialize(deserializer); + obj.location = serde::Deserializable::deserialize(deserializer); return obj; } namespace Circuit { - inline bool operator==(const BrilligInputs &lhs, const BrilligInputs &rhs) { - if (!(lhs.value == rhs.value)) { return false; } + inline bool operator==(const BrilligOpcode::JumpIf &lhs, const BrilligOpcode::JumpIf &rhs) { + if (!(lhs.condition == rhs.condition)) { return false; } + if (!(lhs.location == rhs.location)) { return false; } return true; } - inline std::vector BrilligInputs::bincodeSerialize() const { + inline std::vector BrilligOpcode::JumpIf::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BrilligInputs BrilligInputs::bincodeDeserialize(std::vector input) { + inline BrilligOpcode::JumpIf BrilligOpcode::JumpIf::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -3959,38 +4422,36 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BrilligInputs &obj, Serializer &serializer) { - serializer.increase_container_depth(); - serde::Serializable::serialize(obj.value, serializer); - serializer.decrease_container_depth(); +void serde::Serializable::serialize(const Circuit::BrilligOpcode::JumpIf &obj, Serializer &serializer) { + serde::Serializable::serialize(obj.condition, serializer); + serde::Serializable::serialize(obj.location, serializer); } -template <> -template -Circuit::BrilligInputs serde::Deserializable::deserialize(Deserializer &deserializer) { - deserializer.increase_container_depth(); - Circuit::BrilligInputs obj; - obj.value = serde::Deserializable::deserialize(deserializer); - deserializer.decrease_container_depth(); +template <> +template +Circuit::BrilligOpcode::JumpIf serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BrilligOpcode::JumpIf obj; + obj.condition = serde::Deserializable::deserialize(deserializer); + obj.location = serde::Deserializable::deserialize(deserializer); return obj; } namespace Circuit { - inline bool operator==(const BrilligInputs::Single &lhs, const BrilligInputs::Single &rhs) { - if (!(lhs.value == rhs.value)) { return false; } + inline bool operator==(const BrilligOpcode::Jump &lhs, const BrilligOpcode::Jump &rhs) { + if (!(lhs.location == rhs.location)) { return false; } return true; } - inline std::vector BrilligInputs::Single::bincodeSerialize() const { + inline std::vector BrilligOpcode::Jump::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BrilligInputs::Single BrilligInputs::Single::bincodeDeserialize(std::vector input) { + inline BrilligOpcode::Jump BrilligOpcode::Jump::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -4001,34 +4462,36 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BrilligInputs::Single &obj, Serializer &serializer) { - serde::Serializable::serialize(obj.value, serializer); +void serde::Serializable::serialize(const Circuit::BrilligOpcode::Jump &obj, Serializer &serializer) { + serde::Serializable::serialize(obj.location, serializer); } template <> template -Circuit::BrilligInputs::Single serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BrilligInputs::Single obj; - obj.value = serde::Deserializable::deserialize(deserializer); +Circuit::BrilligOpcode::Jump serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BrilligOpcode::Jump obj; + obj.location = serde::Deserializable::deserialize(deserializer); return obj; } namespace Circuit { - inline bool operator==(const BrilligInputs::Array &lhs, const BrilligInputs::Array &rhs) { - if (!(lhs.value == rhs.value)) { return false; } + inline bool operator==(const BrilligOpcode::CalldataCopy &lhs, const BrilligOpcode::CalldataCopy &rhs) { + if (!(lhs.destination_address == rhs.destination_address)) { return false; } + if (!(lhs.size == rhs.size)) { return false; } + if (!(lhs.offset == rhs.offset)) { return false; } return true; } - inline std::vector BrilligInputs::Array::bincodeSerialize() const { + inline std::vector BrilligOpcode::CalldataCopy::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BrilligInputs::Array BrilligInputs::Array::bincodeDeserialize(std::vector input) { + inline BrilligOpcode::CalldataCopy BrilligOpcode::CalldataCopy::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -4039,34 +4502,38 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BrilligInputs::Array &obj, Serializer &serializer) { - serde::Serializable::serialize(obj.value, serializer); +void serde::Serializable::serialize(const Circuit::BrilligOpcode::CalldataCopy &obj, Serializer &serializer) { + serde::Serializable::serialize(obj.destination_address, serializer); + serde::Serializable::serialize(obj.size, serializer); + serde::Serializable::serialize(obj.offset, serializer); } template <> template -Circuit::BrilligInputs::Array serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BrilligInputs::Array obj; - obj.value = serde::Deserializable::deserialize(deserializer); +Circuit::BrilligOpcode::CalldataCopy serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BrilligOpcode::CalldataCopy obj; + obj.destination_address = serde::Deserializable::deserialize(deserializer); + obj.size = serde::Deserializable::deserialize(deserializer); + obj.offset = serde::Deserializable::deserialize(deserializer); return obj; } namespace Circuit { - inline bool operator==(const BrilligOpcode &lhs, const BrilligOpcode &rhs) { - if (!(lhs.value == rhs.value)) { return false; } + inline bool operator==(const BrilligOpcode::Call &lhs, const BrilligOpcode::Call &rhs) { + if (!(lhs.location == rhs.location)) { return false; } return true; } - inline std::vector BrilligOpcode::bincodeSerialize() const { + inline std::vector BrilligOpcode::Call::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BrilligOpcode BrilligOpcode::bincodeDeserialize(std::vector input) { + inline BrilligOpcode::Call BrilligOpcode::Call::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -4077,41 +4544,36 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BrilligOpcode &obj, Serializer &serializer) { - serializer.increase_container_depth(); - serde::Serializable::serialize(obj.value, serializer); - serializer.decrease_container_depth(); +void serde::Serializable::serialize(const Circuit::BrilligOpcode::Call &obj, Serializer &serializer) { + serde::Serializable::serialize(obj.location, serializer); } template <> template -Circuit::BrilligOpcode serde::Deserializable::deserialize(Deserializer &deserializer) { - deserializer.increase_container_depth(); - Circuit::BrilligOpcode obj; - obj.value = serde::Deserializable::deserialize(deserializer); - deserializer.decrease_container_depth(); +Circuit::BrilligOpcode::Call serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BrilligOpcode::Call obj; + obj.location = serde::Deserializable::deserialize(deserializer); return obj; } namespace Circuit { - inline bool operator==(const BrilligOpcode::BinaryFieldOp &lhs, const BrilligOpcode::BinaryFieldOp &rhs) { + inline bool operator==(const BrilligOpcode::Const &lhs, const BrilligOpcode::Const &rhs) { if (!(lhs.destination == rhs.destination)) { return false; } - if (!(lhs.op == rhs.op)) { return false; } - if (!(lhs.lhs == rhs.lhs)) { return false; } - if (!(lhs.rhs == rhs.rhs)) { return false; } + if (!(lhs.bit_size == rhs.bit_size)) { return false; } + if (!(lhs.value == rhs.value)) { return false; } return true; } - inline std::vector BrilligOpcode::BinaryFieldOp::bincodeSerialize() const { + inline std::vector BrilligOpcode::Const::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BrilligOpcode::BinaryFieldOp BrilligOpcode::BinaryFieldOp::bincodeDeserialize(std::vector input) { + inline BrilligOpcode::Const BrilligOpcode::Const::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -4122,44 +4584,37 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BrilligOpcode::BinaryFieldOp &obj, Serializer &serializer) { +void serde::Serializable::serialize(const Circuit::BrilligOpcode::Const &obj, Serializer &serializer) { serde::Serializable::serialize(obj.destination, serializer); - serde::Serializable::serialize(obj.op, serializer); - serde::Serializable::serialize(obj.lhs, serializer); - serde::Serializable::serialize(obj.rhs, serializer); + serde::Serializable::serialize(obj.bit_size, serializer); + serde::Serializable::serialize(obj.value, serializer); } template <> template -Circuit::BrilligOpcode::BinaryFieldOp serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BrilligOpcode::BinaryFieldOp obj; +Circuit::BrilligOpcode::Const serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BrilligOpcode::Const obj; obj.destination = serde::Deserializable::deserialize(deserializer); - obj.op = serde::Deserializable::deserialize(deserializer); - obj.lhs = serde::Deserializable::deserialize(deserializer); - obj.rhs = serde::Deserializable::deserialize(deserializer); + obj.bit_size = serde::Deserializable::deserialize(deserializer); + obj.value = serde::Deserializable::deserialize(deserializer); return obj; } namespace Circuit { - inline bool operator==(const BrilligOpcode::BinaryIntOp &lhs, const BrilligOpcode::BinaryIntOp &rhs) { - if (!(lhs.destination == rhs.destination)) { return false; } - if (!(lhs.op == rhs.op)) { return false; } - if (!(lhs.bit_size == rhs.bit_size)) { return false; } - if (!(lhs.lhs == rhs.lhs)) { return false; } - if (!(lhs.rhs == rhs.rhs)) { return false; } + inline bool operator==(const BrilligOpcode::Return &lhs, const BrilligOpcode::Return &rhs) { return true; } - inline std::vector BrilligOpcode::BinaryIntOp::bincodeSerialize() const { + inline std::vector BrilligOpcode::Return::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BrilligOpcode::BinaryIntOp BrilligOpcode::BinaryIntOp::bincodeDeserialize(std::vector input) { + inline BrilligOpcode::Return BrilligOpcode::Return::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -4170,43 +4625,36 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BrilligOpcode::BinaryIntOp &obj, Serializer &serializer) { - serde::Serializable::serialize(obj.destination, serializer); - serde::Serializable::serialize(obj.op, serializer); - serde::Serializable::serialize(obj.bit_size, serializer); - serde::Serializable::serialize(obj.lhs, serializer); - serde::Serializable::serialize(obj.rhs, serializer); +void serde::Serializable::serialize(const Circuit::BrilligOpcode::Return &obj, Serializer &serializer) { } template <> template -Circuit::BrilligOpcode::BinaryIntOp serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BrilligOpcode::BinaryIntOp obj; - obj.destination = serde::Deserializable::deserialize(deserializer); - obj.op = serde::Deserializable::deserialize(deserializer); - obj.bit_size = serde::Deserializable::deserialize(deserializer); - obj.lhs = serde::Deserializable::deserialize(deserializer); - obj.rhs = serde::Deserializable::deserialize(deserializer); +Circuit::BrilligOpcode::Return serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BrilligOpcode::Return obj; return obj; } namespace Circuit { - inline bool operator==(const BrilligOpcode::JumpIfNot &lhs, const BrilligOpcode::JumpIfNot &rhs) { - if (!(lhs.condition == rhs.condition)) { return false; } - if (!(lhs.location == rhs.location)) { return false; } + inline bool operator==(const BrilligOpcode::ForeignCall &lhs, const BrilligOpcode::ForeignCall &rhs) { + if (!(lhs.function == rhs.function)) { return false; } + if (!(lhs.destinations == rhs.destinations)) { return false; } + if (!(lhs.destination_value_types == rhs.destination_value_types)) { return false; } + if (!(lhs.inputs == rhs.inputs)) { return false; } + if (!(lhs.input_value_types == rhs.input_value_types)) { return false; } return true; } - inline std::vector BrilligOpcode::JumpIfNot::bincodeSerialize() const { + inline std::vector BrilligOpcode::ForeignCall::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BrilligOpcode::JumpIfNot BrilligOpcode::JumpIfNot::bincodeDeserialize(std::vector input) { + inline BrilligOpcode::ForeignCall BrilligOpcode::ForeignCall::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -4217,37 +4665,43 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BrilligOpcode::JumpIfNot &obj, Serializer &serializer) { - serde::Serializable::serialize(obj.condition, serializer); - serde::Serializable::serialize(obj.location, serializer); +void serde::Serializable::serialize(const Circuit::BrilligOpcode::ForeignCall &obj, Serializer &serializer) { + serde::Serializable::serialize(obj.function, serializer); + serde::Serializable::serialize(obj.destinations, serializer); + serde::Serializable::serialize(obj.destination_value_types, serializer); + serde::Serializable::serialize(obj.inputs, serializer); + serde::Serializable::serialize(obj.input_value_types, serializer); } template <> template -Circuit::BrilligOpcode::JumpIfNot serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BrilligOpcode::JumpIfNot obj; - obj.condition = serde::Deserializable::deserialize(deserializer); - obj.location = serde::Deserializable::deserialize(deserializer); +Circuit::BrilligOpcode::ForeignCall serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BrilligOpcode::ForeignCall obj; + obj.function = serde::Deserializable::deserialize(deserializer); + obj.destinations = serde::Deserializable::deserialize(deserializer); + obj.destination_value_types = serde::Deserializable::deserialize(deserializer); + obj.inputs = serde::Deserializable::deserialize(deserializer); + obj.input_value_types = serde::Deserializable::deserialize(deserializer); return obj; } namespace Circuit { - inline bool operator==(const BrilligOpcode::JumpIf &lhs, const BrilligOpcode::JumpIf &rhs) { - if (!(lhs.condition == rhs.condition)) { return false; } - if (!(lhs.location == rhs.location)) { return false; } + inline bool operator==(const BrilligOpcode::Mov &lhs, const BrilligOpcode::Mov &rhs) { + if (!(lhs.destination == rhs.destination)) { return false; } + if (!(lhs.source == rhs.source)) { return false; } return true; } - inline std::vector BrilligOpcode::JumpIf::bincodeSerialize() const { + inline std::vector BrilligOpcode::Mov::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BrilligOpcode::JumpIf BrilligOpcode::JumpIf::bincodeDeserialize(std::vector input) { + inline BrilligOpcode::Mov BrilligOpcode::Mov::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -4258,36 +4712,37 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BrilligOpcode::JumpIf &obj, Serializer &serializer) { - serde::Serializable::serialize(obj.condition, serializer); - serde::Serializable::serialize(obj.location, serializer); +void serde::Serializable::serialize(const Circuit::BrilligOpcode::Mov &obj, Serializer &serializer) { + serde::Serializable::serialize(obj.destination, serializer); + serde::Serializable::serialize(obj.source, serializer); } template <> template -Circuit::BrilligOpcode::JumpIf serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BrilligOpcode::JumpIf obj; - obj.condition = serde::Deserializable::deserialize(deserializer); - obj.location = serde::Deserializable::deserialize(deserializer); +Circuit::BrilligOpcode::Mov serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BrilligOpcode::Mov obj; + obj.destination = serde::Deserializable::deserialize(deserializer); + obj.source = serde::Deserializable::deserialize(deserializer); return obj; } namespace Circuit { - inline bool operator==(const BrilligOpcode::Jump &lhs, const BrilligOpcode::Jump &rhs) { - if (!(lhs.location == rhs.location)) { return false; } + inline bool operator==(const BrilligOpcode::Load &lhs, const BrilligOpcode::Load &rhs) { + if (!(lhs.destination == rhs.destination)) { return false; } + if (!(lhs.source_pointer == rhs.source_pointer)) { return false; } return true; } - inline std::vector BrilligOpcode::Jump::bincodeSerialize() const { + inline std::vector BrilligOpcode::Load::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BrilligOpcode::Jump BrilligOpcode::Jump::bincodeDeserialize(std::vector input) { + inline BrilligOpcode::Load BrilligOpcode::Load::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -4298,34 +4753,37 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BrilligOpcode::Jump &obj, Serializer &serializer) { - serde::Serializable::serialize(obj.location, serializer); +void serde::Serializable::serialize(const Circuit::BrilligOpcode::Load &obj, Serializer &serializer) { + serde::Serializable::serialize(obj.destination, serializer); + serde::Serializable::serialize(obj.source_pointer, serializer); } template <> template -Circuit::BrilligOpcode::Jump serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BrilligOpcode::Jump obj; - obj.location = serde::Deserializable::deserialize(deserializer); +Circuit::BrilligOpcode::Load serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BrilligOpcode::Load obj; + obj.destination = serde::Deserializable::deserialize(deserializer); + obj.source_pointer = serde::Deserializable::deserialize(deserializer); return obj; } namespace Circuit { - inline bool operator==(const BrilligOpcode::Call &lhs, const BrilligOpcode::Call &rhs) { - if (!(lhs.location == rhs.location)) { return false; } + inline bool operator==(const BrilligOpcode::Store &lhs, const BrilligOpcode::Store &rhs) { + if (!(lhs.destination_pointer == rhs.destination_pointer)) { return false; } + if (!(lhs.source == rhs.source)) { return false; } return true; } - inline std::vector BrilligOpcode::Call::bincodeSerialize() const { + inline std::vector BrilligOpcode::Store::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BrilligOpcode::Call BrilligOpcode::Call::bincodeDeserialize(std::vector input) { + inline BrilligOpcode::Store BrilligOpcode::Store::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -4336,35 +4794,36 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BrilligOpcode::Call &obj, Serializer &serializer) { - serde::Serializable::serialize(obj.location, serializer); +void serde::Serializable::serialize(const Circuit::BrilligOpcode::Store &obj, Serializer &serializer) { + serde::Serializable::serialize(obj.destination_pointer, serializer); + serde::Serializable::serialize(obj.source, serializer); } template <> template -Circuit::BrilligOpcode::Call serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BrilligOpcode::Call obj; - obj.location = serde::Deserializable::deserialize(deserializer); +Circuit::BrilligOpcode::Store serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BrilligOpcode::Store obj; + obj.destination_pointer = serde::Deserializable::deserialize(deserializer); + obj.source = serde::Deserializable::deserialize(deserializer); return obj; } namespace Circuit { - inline bool operator==(const BrilligOpcode::Const &lhs, const BrilligOpcode::Const &rhs) { - if (!(lhs.destination == rhs.destination)) { return false; } + inline bool operator==(const BrilligOpcode::BlackBox &lhs, const BrilligOpcode::BlackBox &rhs) { if (!(lhs.value == rhs.value)) { return false; } return true; } - inline std::vector BrilligOpcode::Const::bincodeSerialize() const { + inline std::vector BrilligOpcode::BlackBox::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BrilligOpcode::Const BrilligOpcode::Const::bincodeDeserialize(std::vector input) { + inline BrilligOpcode::BlackBox BrilligOpcode::BlackBox::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -4375,35 +4834,33 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BrilligOpcode::Const &obj, Serializer &serializer) { - serde::Serializable::serialize(obj.destination, serializer); +void serde::Serializable::serialize(const Circuit::BrilligOpcode::BlackBox &obj, Serializer &serializer) { serde::Serializable::serialize(obj.value, serializer); } template <> template -Circuit::BrilligOpcode::Const serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BrilligOpcode::Const obj; - obj.destination = serde::Deserializable::deserialize(deserializer); +Circuit::BrilligOpcode::BlackBox serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BrilligOpcode::BlackBox obj; obj.value = serde::Deserializable::deserialize(deserializer); return obj; } namespace Circuit { - inline bool operator==(const BrilligOpcode::Return &lhs, const BrilligOpcode::Return &rhs) { + inline bool operator==(const BrilligOpcode::Trap &lhs, const BrilligOpcode::Trap &rhs) { return true; } - inline std::vector BrilligOpcode::Return::bincodeSerialize() const { + inline std::vector BrilligOpcode::Trap::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BrilligOpcode::Return BrilligOpcode::Return::bincodeDeserialize(std::vector input) { + inline BrilligOpcode::Trap BrilligOpcode::Trap::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -4414,34 +4871,33 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BrilligOpcode::Return &obj, Serializer &serializer) { +void serde::Serializable::serialize(const Circuit::BrilligOpcode::Trap &obj, Serializer &serializer) { } template <> template -Circuit::BrilligOpcode::Return serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BrilligOpcode::Return obj; +Circuit::BrilligOpcode::Trap serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BrilligOpcode::Trap obj; return obj; } namespace Circuit { - inline bool operator==(const BrilligOpcode::ForeignCall &lhs, const BrilligOpcode::ForeignCall &rhs) { - if (!(lhs.function == rhs.function)) { return false; } - if (!(lhs.destinations == rhs.destinations)) { return false; } - if (!(lhs.inputs == rhs.inputs)) { return false; } + inline bool operator==(const BrilligOpcode::Stop &lhs, const BrilligOpcode::Stop &rhs) { + if (!(lhs.return_data_offset == rhs.return_data_offset)) { return false; } + if (!(lhs.return_data_size == rhs.return_data_size)) { return false; } return true; } - inline std::vector BrilligOpcode::ForeignCall::bincodeSerialize() const { + inline std::vector BrilligOpcode::Stop::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BrilligOpcode::ForeignCall BrilligOpcode::ForeignCall::bincodeDeserialize(std::vector input) { + inline BrilligOpcode::Stop BrilligOpcode::Stop::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -4452,39 +4908,36 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BrilligOpcode::ForeignCall &obj, Serializer &serializer) { - serde::Serializable::serialize(obj.function, serializer); - serde::Serializable::serialize(obj.destinations, serializer); - serde::Serializable::serialize(obj.inputs, serializer); +void serde::Serializable::serialize(const Circuit::BrilligOpcode::Stop &obj, Serializer &serializer) { + serde::Serializable::serialize(obj.return_data_offset, serializer); + serde::Serializable::serialize(obj.return_data_size, serializer); } template <> template -Circuit::BrilligOpcode::ForeignCall serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BrilligOpcode::ForeignCall obj; - obj.function = serde::Deserializable::deserialize(deserializer); - obj.destinations = serde::Deserializable::deserialize(deserializer); - obj.inputs = serde::Deserializable::deserialize(deserializer); +Circuit::BrilligOpcode::Stop serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BrilligOpcode::Stop obj; + obj.return_data_offset = serde::Deserializable::deserialize(deserializer); + obj.return_data_size = serde::Deserializable::deserialize(deserializer); return obj; } namespace Circuit { - inline bool operator==(const BrilligOpcode::Mov &lhs, const BrilligOpcode::Mov &rhs) { - if (!(lhs.destination == rhs.destination)) { return false; } - if (!(lhs.source == rhs.source)) { return false; } + inline bool operator==(const BrilligOutputs &lhs, const BrilligOutputs &rhs) { + if (!(lhs.value == rhs.value)) { return false; } return true; } - inline std::vector BrilligOpcode::Mov::bincodeSerialize() const { + inline std::vector BrilligOutputs::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BrilligOpcode::Mov BrilligOpcode::Mov::bincodeDeserialize(std::vector input) { + inline BrilligOutputs BrilligOutputs::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -4495,37 +4948,38 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BrilligOpcode::Mov &obj, Serializer &serializer) { - serde::Serializable::serialize(obj.destination, serializer); - serde::Serializable::serialize(obj.source, serializer); +void serde::Serializable::serialize(const Circuit::BrilligOutputs &obj, Serializer &serializer) { + serializer.increase_container_depth(); + serde::Serializable::serialize(obj.value, serializer); + serializer.decrease_container_depth(); } template <> template -Circuit::BrilligOpcode::Mov serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BrilligOpcode::Mov obj; - obj.destination = serde::Deserializable::deserialize(deserializer); - obj.source = serde::Deserializable::deserialize(deserializer); +Circuit::BrilligOutputs serde::Deserializable::deserialize(Deserializer &deserializer) { + deserializer.increase_container_depth(); + Circuit::BrilligOutputs obj; + obj.value = serde::Deserializable::deserialize(deserializer); + deserializer.decrease_container_depth(); return obj; } namespace Circuit { - inline bool operator==(const BrilligOpcode::Load &lhs, const BrilligOpcode::Load &rhs) { - if (!(lhs.destination == rhs.destination)) { return false; } - if (!(lhs.source_pointer == rhs.source_pointer)) { return false; } + inline bool operator==(const BrilligOutputs::Simple &lhs, const BrilligOutputs::Simple &rhs) { + if (!(lhs.value == rhs.value)) { return false; } return true; } - inline std::vector BrilligOpcode::Load::bincodeSerialize() const { + inline std::vector BrilligOutputs::Simple::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BrilligOpcode::Load BrilligOpcode::Load::bincodeDeserialize(std::vector input) { + inline BrilligOutputs::Simple BrilligOutputs::Simple::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -4536,37 +4990,34 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BrilligOpcode::Load &obj, Serializer &serializer) { - serde::Serializable::serialize(obj.destination, serializer); - serde::Serializable::serialize(obj.source_pointer, serializer); +void serde::Serializable::serialize(const Circuit::BrilligOutputs::Simple &obj, Serializer &serializer) { + serde::Serializable::serialize(obj.value, serializer); } template <> template -Circuit::BrilligOpcode::Load serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BrilligOpcode::Load obj; - obj.destination = serde::Deserializable::deserialize(deserializer); - obj.source_pointer = serde::Deserializable::deserialize(deserializer); +Circuit::BrilligOutputs::Simple serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BrilligOutputs::Simple obj; + obj.value = serde::Deserializable::deserialize(deserializer); return obj; } namespace Circuit { - inline bool operator==(const BrilligOpcode::Store &lhs, const BrilligOpcode::Store &rhs) { - if (!(lhs.destination_pointer == rhs.destination_pointer)) { return false; } - if (!(lhs.source == rhs.source)) { return false; } + inline bool operator==(const BrilligOutputs::Array &lhs, const BrilligOutputs::Array &rhs) { + if (!(lhs.value == rhs.value)) { return false; } return true; } - inline std::vector BrilligOpcode::Store::bincodeSerialize() const { + inline std::vector BrilligOutputs::Array::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BrilligOpcode::Store BrilligOpcode::Store::bincodeDeserialize(std::vector input) { + inline BrilligOutputs::Array BrilligOutputs::Array::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -4577,36 +5028,41 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BrilligOpcode::Store &obj, Serializer &serializer) { - serde::Serializable::serialize(obj.destination_pointer, serializer); - serde::Serializable::serialize(obj.source, serializer); +void serde::Serializable::serialize(const Circuit::BrilligOutputs::Array &obj, Serializer &serializer) { + serde::Serializable::serialize(obj.value, serializer); } template <> template -Circuit::BrilligOpcode::Store serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BrilligOpcode::Store obj; - obj.destination_pointer = serde::Deserializable::deserialize(deserializer); - obj.source = serde::Deserializable::deserialize(deserializer); +Circuit::BrilligOutputs::Array serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::BrilligOutputs::Array obj; + obj.value = serde::Deserializable::deserialize(deserializer); return obj; } namespace Circuit { - inline bool operator==(const BrilligOpcode::BlackBox &lhs, const BrilligOpcode::BlackBox &rhs) { - if (!(lhs.value == rhs.value)) { return false; } + inline bool operator==(const Circuit &lhs, const Circuit &rhs) { + if (!(lhs.current_witness_index == rhs.current_witness_index)) { return false; } + if (!(lhs.opcodes == rhs.opcodes)) { return false; } + if (!(lhs.expression_width == rhs.expression_width)) { return false; } + if (!(lhs.private_parameters == rhs.private_parameters)) { return false; } + if (!(lhs.public_parameters == rhs.public_parameters)) { return false; } + if (!(lhs.return_values == rhs.return_values)) { return false; } + if (!(lhs.assert_messages == rhs.assert_messages)) { return false; } + if (!(lhs.recursive == rhs.recursive)) { return false; } return true; } - inline std::vector BrilligOpcode::BlackBox::bincodeSerialize() const { + inline std::vector Circuit::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BrilligOpcode::BlackBox BrilligOpcode::BlackBox::bincodeDeserialize(std::vector input) { + inline Circuit Circuit::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -4617,33 +5073,52 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BrilligOpcode::BlackBox &obj, Serializer &serializer) { - serde::Serializable::serialize(obj.value, serializer); +void serde::Serializable::serialize(const Circuit::Circuit &obj, Serializer &serializer) { + serializer.increase_container_depth(); + serde::Serializable::serialize(obj.current_witness_index, serializer); + serde::Serializable::serialize(obj.opcodes, serializer); + serde::Serializable::serialize(obj.expression_width, serializer); + serde::Serializable::serialize(obj.private_parameters, serializer); + serde::Serializable::serialize(obj.public_parameters, serializer); + serde::Serializable::serialize(obj.return_values, serializer); + serde::Serializable::serialize(obj.assert_messages, serializer); + serde::Serializable::serialize(obj.recursive, serializer); + serializer.decrease_container_depth(); } template <> template -Circuit::BrilligOpcode::BlackBox serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BrilligOpcode::BlackBox obj; - obj.value = serde::Deserializable::deserialize(deserializer); +Circuit::Circuit serde::Deserializable::deserialize(Deserializer &deserializer) { + deserializer.increase_container_depth(); + Circuit::Circuit obj; + obj.current_witness_index = serde::Deserializable::deserialize(deserializer); + obj.opcodes = serde::Deserializable::deserialize(deserializer); + obj.expression_width = serde::Deserializable::deserialize(deserializer); + obj.private_parameters = serde::Deserializable::deserialize(deserializer); + obj.public_parameters = serde::Deserializable::deserialize(deserializer); + obj.return_values = serde::Deserializable::deserialize(deserializer); + obj.assert_messages = serde::Deserializable::deserialize(deserializer); + obj.recursive = serde::Deserializable::deserialize(deserializer); + deserializer.decrease_container_depth(); return obj; } namespace Circuit { - inline bool operator==(const BrilligOpcode::Trap &lhs, const BrilligOpcode::Trap &rhs) { + inline bool operator==(const Directive &lhs, const Directive &rhs) { + if (!(lhs.value == rhs.value)) { return false; } return true; } - inline std::vector BrilligOpcode::Trap::bincodeSerialize() const { + inline std::vector Directive::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BrilligOpcode::Trap BrilligOpcode::Trap::bincodeDeserialize(std::vector input) { + inline Directive Directive::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -4654,31 +5129,40 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BrilligOpcode::Trap &obj, Serializer &serializer) { +void serde::Serializable::serialize(const Circuit::Directive &obj, Serializer &serializer) { + serializer.increase_container_depth(); + serde::Serializable::serialize(obj.value, serializer); + serializer.decrease_container_depth(); } template <> template -Circuit::BrilligOpcode::Trap serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BrilligOpcode::Trap obj; +Circuit::Directive serde::Deserializable::deserialize(Deserializer &deserializer) { + deserializer.increase_container_depth(); + Circuit::Directive obj; + obj.value = serde::Deserializable::deserialize(deserializer); + deserializer.decrease_container_depth(); return obj; } namespace Circuit { - inline bool operator==(const BrilligOpcode::Stop &lhs, const BrilligOpcode::Stop &rhs) { + inline bool operator==(const Directive::ToLeRadix &lhs, const Directive::ToLeRadix &rhs) { + if (!(lhs.a == rhs.a)) { return false; } + if (!(lhs.b == rhs.b)) { return false; } + if (!(lhs.radix == rhs.radix)) { return false; } return true; } - inline std::vector BrilligOpcode::Stop::bincodeSerialize() const { + inline std::vector Directive::ToLeRadix::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BrilligOpcode::Stop BrilligOpcode::Stop::bincodeDeserialize(std::vector input) { + inline Directive::ToLeRadix Directive::ToLeRadix::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -4689,32 +5173,40 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BrilligOpcode::Stop &obj, Serializer &serializer) { +void serde::Serializable::serialize(const Circuit::Directive::ToLeRadix &obj, Serializer &serializer) { + serde::Serializable::serialize(obj.a, serializer); + serde::Serializable::serialize(obj.b, serializer); + serde::Serializable::serialize(obj.radix, serializer); } template <> template -Circuit::BrilligOpcode::Stop serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BrilligOpcode::Stop obj; +Circuit::Directive::ToLeRadix serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::Directive::ToLeRadix obj; + obj.a = serde::Deserializable::deserialize(deserializer); + obj.b = serde::Deserializable::deserialize(deserializer); + obj.radix = serde::Deserializable::deserialize(deserializer); return obj; } namespace Circuit { - inline bool operator==(const BrilligOutputs &lhs, const BrilligOutputs &rhs) { - if (!(lhs.value == rhs.value)) { return false; } + inline bool operator==(const Expression &lhs, const Expression &rhs) { + if (!(lhs.mul_terms == rhs.mul_terms)) { return false; } + if (!(lhs.linear_combinations == rhs.linear_combinations)) { return false; } + if (!(lhs.q_c == rhs.q_c)) { return false; } return true; } - inline std::vector BrilligOutputs::bincodeSerialize() const { + inline std::vector Expression::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BrilligOutputs BrilligOutputs::bincodeDeserialize(std::vector input) { + inline Expression Expression::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -4725,38 +5217,42 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BrilligOutputs &obj, Serializer &serializer) { +void serde::Serializable::serialize(const Circuit::Expression &obj, Serializer &serializer) { serializer.increase_container_depth(); - serde::Serializable::serialize(obj.value, serializer); + serde::Serializable::serialize(obj.mul_terms, serializer); + serde::Serializable::serialize(obj.linear_combinations, serializer); + serde::Serializable::serialize(obj.q_c, serializer); serializer.decrease_container_depth(); } template <> template -Circuit::BrilligOutputs serde::Deserializable::deserialize(Deserializer &deserializer) { +Circuit::Expression serde::Deserializable::deserialize(Deserializer &deserializer) { deserializer.increase_container_depth(); - Circuit::BrilligOutputs obj; - obj.value = serde::Deserializable::deserialize(deserializer); + Circuit::Expression obj; + obj.mul_terms = serde::Deserializable::deserialize(deserializer); + obj.linear_combinations = serde::Deserializable::deserialize(deserializer); + obj.q_c = serde::Deserializable::deserialize(deserializer); deserializer.decrease_container_depth(); return obj; } namespace Circuit { - inline bool operator==(const BrilligOutputs::Simple &lhs, const BrilligOutputs::Simple &rhs) { + inline bool operator==(const ExpressionWidth &lhs, const ExpressionWidth &rhs) { if (!(lhs.value == rhs.value)) { return false; } return true; } - inline std::vector BrilligOutputs::Simple::bincodeSerialize() const { + inline std::vector ExpressionWidth::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BrilligOutputs::Simple BrilligOutputs::Simple::bincodeDeserialize(std::vector input) { + inline ExpressionWidth ExpressionWidth::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -4767,34 +5263,37 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BrilligOutputs::Simple &obj, Serializer &serializer) { +void serde::Serializable::serialize(const Circuit::ExpressionWidth &obj, Serializer &serializer) { + serializer.increase_container_depth(); serde::Serializable::serialize(obj.value, serializer); + serializer.decrease_container_depth(); } template <> template -Circuit::BrilligOutputs::Simple serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BrilligOutputs::Simple obj; +Circuit::ExpressionWidth serde::Deserializable::deserialize(Deserializer &deserializer) { + deserializer.increase_container_depth(); + Circuit::ExpressionWidth obj; obj.value = serde::Deserializable::deserialize(deserializer); + deserializer.decrease_container_depth(); return obj; } namespace Circuit { - inline bool operator==(const BrilligOutputs::Array &lhs, const BrilligOutputs::Array &rhs) { - if (!(lhs.value == rhs.value)) { return false; } + inline bool operator==(const ExpressionWidth::Unbounded &lhs, const ExpressionWidth::Unbounded &rhs) { return true; } - inline std::vector BrilligOutputs::Array::bincodeSerialize() const { + inline std::vector ExpressionWidth::Unbounded::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline BrilligOutputs::Array BrilligOutputs::Array::bincodeDeserialize(std::vector input) { + inline ExpressionWidth::Unbounded ExpressionWidth::Unbounded::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -4805,39 +5304,32 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::BrilligOutputs::Array &obj, Serializer &serializer) { - serde::Serializable::serialize(obj.value, serializer); +void serde::Serializable::serialize(const Circuit::ExpressionWidth::Unbounded &obj, Serializer &serializer) { } template <> template -Circuit::BrilligOutputs::Array serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::BrilligOutputs::Array obj; - obj.value = serde::Deserializable::deserialize(deserializer); +Circuit::ExpressionWidth::Unbounded serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::ExpressionWidth::Unbounded obj; return obj; } namespace Circuit { - inline bool operator==(const Circuit &lhs, const Circuit &rhs) { - if (!(lhs.current_witness_index == rhs.current_witness_index)) { return false; } - if (!(lhs.opcodes == rhs.opcodes)) { return false; } - if (!(lhs.private_parameters == rhs.private_parameters)) { return false; } - if (!(lhs.public_parameters == rhs.public_parameters)) { return false; } - if (!(lhs.return_values == rhs.return_values)) { return false; } - if (!(lhs.assert_messages == rhs.assert_messages)) { return false; } + inline bool operator==(const ExpressionWidth::Bounded &lhs, const ExpressionWidth::Bounded &rhs) { + if (!(lhs.width == rhs.width)) { return false; } return true; } - inline std::vector Circuit::bincodeSerialize() const { + inline std::vector ExpressionWidth::Bounded::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline Circuit Circuit::bincodeDeserialize(std::vector input) { + inline ExpressionWidth::Bounded ExpressionWidth::Bounded::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -4848,48 +5340,35 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::Circuit &obj, Serializer &serializer) { - serializer.increase_container_depth(); - serde::Serializable::serialize(obj.current_witness_index, serializer); - serde::Serializable::serialize(obj.opcodes, serializer); - serde::Serializable::serialize(obj.private_parameters, serializer); - serde::Serializable::serialize(obj.public_parameters, serializer); - serde::Serializable::serialize(obj.return_values, serializer); - serde::Serializable::serialize(obj.assert_messages, serializer); - serializer.decrease_container_depth(); +void serde::Serializable::serialize(const Circuit::ExpressionWidth::Bounded &obj, Serializer &serializer) { + serde::Serializable::serialize(obj.width, serializer); } template <> template -Circuit::Circuit serde::Deserializable::deserialize(Deserializer &deserializer) { - deserializer.increase_container_depth(); - Circuit::Circuit obj; - obj.current_witness_index = serde::Deserializable::deserialize(deserializer); - obj.opcodes = serde::Deserializable::deserialize(deserializer); - obj.private_parameters = serde::Deserializable::deserialize(deserializer); - obj.public_parameters = serde::Deserializable::deserialize(deserializer); - obj.return_values = serde::Deserializable::deserialize(deserializer); - obj.assert_messages = serde::Deserializable::deserialize(deserializer); - deserializer.decrease_container_depth(); +Circuit::ExpressionWidth::Bounded serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::ExpressionWidth::Bounded obj; + obj.width = serde::Deserializable::deserialize(deserializer); return obj; } namespace Circuit { - inline bool operator==(const Directive &lhs, const Directive &rhs) { - if (!(lhs.value == rhs.value)) { return false; } + inline bool operator==(const FunctionInput &lhs, const FunctionInput &rhs) { + if (!(lhs.witness == rhs.witness)) { return false; } + if (!(lhs.num_bits == rhs.num_bits)) { return false; } return true; } - inline std::vector Directive::bincodeSerialize() const { + inline std::vector FunctionInput::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline Directive Directive::bincodeDeserialize(std::vector input) { + inline FunctionInput FunctionInput::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -4900,40 +5379,41 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::Directive &obj, Serializer &serializer) { +void serde::Serializable::serialize(const Circuit::FunctionInput &obj, Serializer &serializer) { serializer.increase_container_depth(); - serde::Serializable::serialize(obj.value, serializer); + serde::Serializable::serialize(obj.witness, serializer); + serde::Serializable::serialize(obj.num_bits, serializer); serializer.decrease_container_depth(); } template <> template -Circuit::Directive serde::Deserializable::deserialize(Deserializer &deserializer) { +Circuit::FunctionInput serde::Deserializable::deserialize(Deserializer &deserializer) { deserializer.increase_container_depth(); - Circuit::Directive obj; - obj.value = serde::Deserializable::deserialize(deserializer); + Circuit::FunctionInput obj; + obj.witness = serde::Deserializable::deserialize(deserializer); + obj.num_bits = serde::Deserializable::deserialize(deserializer); deserializer.decrease_container_depth(); return obj; } namespace Circuit { - inline bool operator==(const Directive::ToLeRadix &lhs, const Directive::ToLeRadix &rhs) { - if (!(lhs.a == rhs.a)) { return false; } - if (!(lhs.b == rhs.b)) { return false; } - if (!(lhs.radix == rhs.radix)) { return false; } + inline bool operator==(const HeapArray &lhs, const HeapArray &rhs) { + if (!(lhs.pointer == rhs.pointer)) { return false; } + if (!(lhs.size == rhs.size)) { return false; } return true; } - inline std::vector Directive::ToLeRadix::bincodeSerialize() const { + inline std::vector HeapArray::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline Directive::ToLeRadix Directive::ToLeRadix::bincodeDeserialize(std::vector input) { + inline HeapArray HeapArray::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -4944,41 +5424,40 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::Directive::ToLeRadix &obj, Serializer &serializer) { - serde::Serializable::serialize(obj.a, serializer); - serde::Serializable::serialize(obj.b, serializer); - serde::Serializable::serialize(obj.radix, serializer); +void serde::Serializable::serialize(const Circuit::HeapArray &obj, Serializer &serializer) { + serializer.increase_container_depth(); + serde::Serializable::serialize(obj.pointer, serializer); + serde::Serializable::serialize(obj.size, serializer); + serializer.decrease_container_depth(); } template <> template -Circuit::Directive::ToLeRadix serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::Directive::ToLeRadix obj; - obj.a = serde::Deserializable::deserialize(deserializer); - obj.b = serde::Deserializable::deserialize(deserializer); - obj.radix = serde::Deserializable::deserialize(deserializer); +Circuit::HeapArray serde::Deserializable::deserialize(Deserializer &deserializer) { + deserializer.increase_container_depth(); + Circuit::HeapArray obj; + obj.pointer = serde::Deserializable::deserialize(deserializer); + obj.size = serde::Deserializable::deserialize(deserializer); + deserializer.decrease_container_depth(); return obj; } namespace Circuit { - inline bool operator==(const Directive::PermutationSort &lhs, const Directive::PermutationSort &rhs) { - if (!(lhs.inputs == rhs.inputs)) { return false; } - if (!(lhs.tuple == rhs.tuple)) { return false; } - if (!(lhs.bits == rhs.bits)) { return false; } - if (!(lhs.sort_by == rhs.sort_by)) { return false; } + inline bool operator==(const HeapValueType &lhs, const HeapValueType &rhs) { + if (!(lhs.value == rhs.value)) { return false; } return true; } - inline std::vector Directive::PermutationSort::bincodeSerialize() const { + inline std::vector HeapValueType::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline Directive::PermutationSort Directive::PermutationSort::bincodeDeserialize(std::vector input) { + inline HeapValueType HeapValueType::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -4989,42 +5468,37 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::Directive::PermutationSort &obj, Serializer &serializer) { - serde::Serializable::serialize(obj.inputs, serializer); - serde::Serializable::serialize(obj.tuple, serializer); - serde::Serializable::serialize(obj.bits, serializer); - serde::Serializable::serialize(obj.sort_by, serializer); +void serde::Serializable::serialize(const Circuit::HeapValueType &obj, Serializer &serializer) { + serializer.increase_container_depth(); + serde::Serializable::serialize(obj.value, serializer); + serializer.decrease_container_depth(); } template <> template -Circuit::Directive::PermutationSort serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::Directive::PermutationSort obj; - obj.inputs = serde::Deserializable::deserialize(deserializer); - obj.tuple = serde::Deserializable::deserialize(deserializer); - obj.bits = serde::Deserializable::deserialize(deserializer); - obj.sort_by = serde::Deserializable::deserialize(deserializer); +Circuit::HeapValueType serde::Deserializable::deserialize(Deserializer &deserializer) { + deserializer.increase_container_depth(); + Circuit::HeapValueType obj; + obj.value = serde::Deserializable::deserialize(deserializer); + deserializer.decrease_container_depth(); return obj; } namespace Circuit { - inline bool operator==(const Expression &lhs, const Expression &rhs) { - if (!(lhs.mul_terms == rhs.mul_terms)) { return false; } - if (!(lhs.linear_combinations == rhs.linear_combinations)) { return false; } - if (!(lhs.q_c == rhs.q_c)) { return false; } + inline bool operator==(const HeapValueType::Simple &lhs, const HeapValueType::Simple &rhs) { return true; } - inline std::vector Expression::bincodeSerialize() const { + inline std::vector HeapValueType::Simple::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline Expression Expression::bincodeDeserialize(std::vector input) { + inline HeapValueType::Simple HeapValueType::Simple::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -5035,43 +5509,33 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::Expression &obj, Serializer &serializer) { - serializer.increase_container_depth(); - serde::Serializable::serialize(obj.mul_terms, serializer); - serde::Serializable::serialize(obj.linear_combinations, serializer); - serde::Serializable::serialize(obj.q_c, serializer); - serializer.decrease_container_depth(); +void serde::Serializable::serialize(const Circuit::HeapValueType::Simple &obj, Serializer &serializer) { } template <> template -Circuit::Expression serde::Deserializable::deserialize(Deserializer &deserializer) { - deserializer.increase_container_depth(); - Circuit::Expression obj; - obj.mul_terms = serde::Deserializable::deserialize(deserializer); - obj.linear_combinations = serde::Deserializable::deserialize(deserializer); - obj.q_c = serde::Deserializable::deserialize(deserializer); - deserializer.decrease_container_depth(); +Circuit::HeapValueType::Simple serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::HeapValueType::Simple obj; return obj; } namespace Circuit { - inline bool operator==(const FunctionInput &lhs, const FunctionInput &rhs) { - if (!(lhs.witness == rhs.witness)) { return false; } - if (!(lhs.num_bits == rhs.num_bits)) { return false; } + inline bool operator==(const HeapValueType::Array &lhs, const HeapValueType::Array &rhs) { + if (!(lhs.value_types == rhs.value_types)) { return false; } + if (!(lhs.size == rhs.size)) { return false; } return true; } - inline std::vector FunctionInput::bincodeSerialize() const { + inline std::vector HeapValueType::Array::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline FunctionInput FunctionInput::bincodeDeserialize(std::vector input) { + inline HeapValueType::Array HeapValueType::Array::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -5082,41 +5546,36 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::FunctionInput &obj, Serializer &serializer) { - serializer.increase_container_depth(); - serde::Serializable::serialize(obj.witness, serializer); - serde::Serializable::serialize(obj.num_bits, serializer); - serializer.decrease_container_depth(); +void serde::Serializable::serialize(const Circuit::HeapValueType::Array &obj, Serializer &serializer) { + serde::Serializable::serialize(obj.value_types, serializer); + serde::Serializable::serialize(obj.size, serializer); } template <> template -Circuit::FunctionInput serde::Deserializable::deserialize(Deserializer &deserializer) { - deserializer.increase_container_depth(); - Circuit::FunctionInput obj; - obj.witness = serde::Deserializable::deserialize(deserializer); - obj.num_bits = serde::Deserializable::deserialize(deserializer); - deserializer.decrease_container_depth(); +Circuit::HeapValueType::Array serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::HeapValueType::Array obj; + obj.value_types = serde::Deserializable::deserialize(deserializer); + obj.size = serde::Deserializable::deserialize(deserializer); return obj; } namespace Circuit { - inline bool operator==(const HeapArray &lhs, const HeapArray &rhs) { - if (!(lhs.pointer == rhs.pointer)) { return false; } - if (!(lhs.size == rhs.size)) { return false; } + inline bool operator==(const HeapValueType::Vector &lhs, const HeapValueType::Vector &rhs) { + if (!(lhs.value_types == rhs.value_types)) { return false; } return true; } - inline std::vector HeapArray::bincodeSerialize() const { + inline std::vector HeapValueType::Vector::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline HeapArray HeapArray::bincodeDeserialize(std::vector input) { + inline HeapValueType::Vector HeapValueType::Vector::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -5127,21 +5586,15 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::HeapArray &obj, Serializer &serializer) { - serializer.increase_container_depth(); - serde::Serializable::serialize(obj.pointer, serializer); - serde::Serializable::serialize(obj.size, serializer); - serializer.decrease_container_depth(); +void serde::Serializable::serialize(const Circuit::HeapValueType::Vector &obj, Serializer &serializer) { + serde::Serializable::serialize(obj.value_types, serializer); } template <> template -Circuit::HeapArray serde::Deserializable::deserialize(Deserializer &deserializer) { - deserializer.increase_container_depth(); - Circuit::HeapArray obj; - obj.pointer = serde::Deserializable::deserialize(deserializer); - obj.size = serde::Deserializable::deserialize(deserializer); - deserializer.decrease_container_depth(); +Circuit::HeapValueType::Vector serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::HeapValueType::Vector obj; + obj.value_types = serde::Deserializable::deserialize(deserializer); return obj; } @@ -5238,6 +5691,48 @@ Circuit::MemOp serde::Deserializable::deserialize(Deserializer & return obj; } +namespace Circuit { + + inline bool operator==(const MemoryAddress &lhs, const MemoryAddress &rhs) { + if (!(lhs.value == rhs.value)) { return false; } + return true; + } + + inline std::vector MemoryAddress::bincodeSerialize() const { + auto serializer = serde::BincodeSerializer(); + serde::Serializable::serialize(*this, serializer); + return std::move(serializer).bytes(); + } + + inline MemoryAddress MemoryAddress::bincodeDeserialize(std::vector input) { + auto deserializer = serde::BincodeDeserializer(input); + auto value = serde::Deserializable::deserialize(deserializer); + if (deserializer.get_buffer_offset() < input.size()) { + throw serde::deserialization_error("Some input bytes were not read"); + } + return value; + } + +} // end of namespace Circuit + +template <> +template +void serde::Serializable::serialize(const Circuit::MemoryAddress &obj, Serializer &serializer) { + serializer.increase_container_depth(); + serde::Serializable::serialize(obj.value, serializer); + serializer.decrease_container_depth(); +} + +template <> +template +Circuit::MemoryAddress serde::Deserializable::deserialize(Deserializer &deserializer) { + deserializer.increase_container_depth(); + Circuit::MemoryAddress obj; + obj.value = serde::Deserializable::deserialize(deserializer); + deserializer.decrease_container_depth(); + return obj; +} + namespace Circuit { inline bool operator==(const Opcode &lhs, const Opcode &rhs) { @@ -5682,20 +6177,20 @@ Circuit::PublicInputs serde::Deserializable::deserialize( namespace Circuit { - inline bool operator==(const RegisterIndex &lhs, const RegisterIndex &rhs) { - if (!(lhs.value == rhs.value)) { return false; } + inline bool operator==(const Value &lhs, const Value &rhs) { + if (!(lhs.inner == rhs.inner)) { return false; } return true; } - inline std::vector RegisterIndex::bincodeSerialize() const { + inline std::vector Value::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline RegisterIndex RegisterIndex::bincodeDeserialize(std::vector input) { + inline Value Value::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -5706,38 +6201,38 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::RegisterIndex &obj, Serializer &serializer) { +void serde::Serializable::serialize(const Circuit::Value &obj, Serializer &serializer) { serializer.increase_container_depth(); - serde::Serializable::serialize(obj.value, serializer); + serde::Serializable::serialize(obj.inner, serializer); serializer.decrease_container_depth(); } template <> template -Circuit::RegisterIndex serde::Deserializable::deserialize(Deserializer &deserializer) { +Circuit::Value serde::Deserializable::deserialize(Deserializer &deserializer) { deserializer.increase_container_depth(); - Circuit::RegisterIndex obj; - obj.value = serde::Deserializable::deserialize(deserializer); + Circuit::Value obj; + obj.inner = serde::Deserializable::deserialize(deserializer); deserializer.decrease_container_depth(); return obj; } namespace Circuit { - inline bool operator==(const RegisterOrMemory &lhs, const RegisterOrMemory &rhs) { + inline bool operator==(const ValueOrArray &lhs, const ValueOrArray &rhs) { if (!(lhs.value == rhs.value)) { return false; } return true; } - inline std::vector RegisterOrMemory::bincodeSerialize() const { + inline std::vector ValueOrArray::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline RegisterOrMemory RegisterOrMemory::bincodeDeserialize(std::vector input) { + inline ValueOrArray ValueOrArray::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -5748,7 +6243,7 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::RegisterOrMemory &obj, Serializer &serializer) { +void serde::Serializable::serialize(const Circuit::ValueOrArray &obj, Serializer &serializer) { serializer.increase_container_depth(); serde::Serializable::serialize(obj.value, serializer); serializer.decrease_container_depth(); @@ -5756,9 +6251,9 @@ void serde::Serializable::serialize(const Circuit::Re template <> template -Circuit::RegisterOrMemory serde::Deserializable::deserialize(Deserializer &deserializer) { +Circuit::ValueOrArray serde::Deserializable::deserialize(Deserializer &deserializer) { deserializer.increase_container_depth(); - Circuit::RegisterOrMemory obj; + Circuit::ValueOrArray obj; obj.value = serde::Deserializable::deserialize(deserializer); deserializer.decrease_container_depth(); return obj; @@ -5766,20 +6261,20 @@ Circuit::RegisterOrMemory serde::Deserializable::dese namespace Circuit { - inline bool operator==(const RegisterOrMemory::RegisterIndex &lhs, const RegisterOrMemory::RegisterIndex &rhs) { + inline bool operator==(const ValueOrArray::MemoryAddress &lhs, const ValueOrArray::MemoryAddress &rhs) { if (!(lhs.value == rhs.value)) { return false; } return true; } - inline std::vector RegisterOrMemory::RegisterIndex::bincodeSerialize() const { + inline std::vector ValueOrArray::MemoryAddress::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline RegisterOrMemory::RegisterIndex RegisterOrMemory::RegisterIndex::bincodeDeserialize(std::vector input) { + inline ValueOrArray::MemoryAddress ValueOrArray::MemoryAddress::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -5790,34 +6285,34 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::RegisterOrMemory::RegisterIndex &obj, Serializer &serializer) { +void serde::Serializable::serialize(const Circuit::ValueOrArray::MemoryAddress &obj, Serializer &serializer) { serde::Serializable::serialize(obj.value, serializer); } template <> template -Circuit::RegisterOrMemory::RegisterIndex serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::RegisterOrMemory::RegisterIndex obj; +Circuit::ValueOrArray::MemoryAddress serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::ValueOrArray::MemoryAddress obj; obj.value = serde::Deserializable::deserialize(deserializer); return obj; } namespace Circuit { - inline bool operator==(const RegisterOrMemory::HeapArray &lhs, const RegisterOrMemory::HeapArray &rhs) { + inline bool operator==(const ValueOrArray::HeapArray &lhs, const ValueOrArray::HeapArray &rhs) { if (!(lhs.value == rhs.value)) { return false; } return true; } - inline std::vector RegisterOrMemory::HeapArray::bincodeSerialize() const { + inline std::vector ValueOrArray::HeapArray::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline RegisterOrMemory::HeapArray RegisterOrMemory::HeapArray::bincodeDeserialize(std::vector input) { + inline ValueOrArray::HeapArray ValueOrArray::HeapArray::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -5828,34 +6323,34 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::RegisterOrMemory::HeapArray &obj, Serializer &serializer) { +void serde::Serializable::serialize(const Circuit::ValueOrArray::HeapArray &obj, Serializer &serializer) { serde::Serializable::serialize(obj.value, serializer); } template <> template -Circuit::RegisterOrMemory::HeapArray serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::RegisterOrMemory::HeapArray obj; +Circuit::ValueOrArray::HeapArray serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::ValueOrArray::HeapArray obj; obj.value = serde::Deserializable::deserialize(deserializer); return obj; } namespace Circuit { - inline bool operator==(const RegisterOrMemory::HeapVector &lhs, const RegisterOrMemory::HeapVector &rhs) { + inline bool operator==(const ValueOrArray::HeapVector &lhs, const ValueOrArray::HeapVector &rhs) { if (!(lhs.value == rhs.value)) { return false; } return true; } - inline std::vector RegisterOrMemory::HeapVector::bincodeSerialize() const { + inline std::vector ValueOrArray::HeapVector::bincodeSerialize() const { auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); + serde::Serializable::serialize(*this, serializer); return std::move(serializer).bytes(); } - inline RegisterOrMemory::HeapVector RegisterOrMemory::HeapVector::bincodeDeserialize(std::vector input) { + inline ValueOrArray::HeapVector ValueOrArray::HeapVector::bincodeDeserialize(std::vector input) { auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); + auto value = serde::Deserializable::deserialize(deserializer); if (deserializer.get_buffer_offset() < input.size()) { throw serde::deserialization_error("Some input bytes were not read"); } @@ -5866,60 +6361,18 @@ namespace Circuit { template <> template -void serde::Serializable::serialize(const Circuit::RegisterOrMemory::HeapVector &obj, Serializer &serializer) { +void serde::Serializable::serialize(const Circuit::ValueOrArray::HeapVector &obj, Serializer &serializer) { serde::Serializable::serialize(obj.value, serializer); } template <> template -Circuit::RegisterOrMemory::HeapVector serde::Deserializable::deserialize(Deserializer &deserializer) { - Circuit::RegisterOrMemory::HeapVector obj; +Circuit::ValueOrArray::HeapVector serde::Deserializable::deserialize(Deserializer &deserializer) { + Circuit::ValueOrArray::HeapVector obj; obj.value = serde::Deserializable::deserialize(deserializer); return obj; } -namespace Circuit { - - inline bool operator==(const Value &lhs, const Value &rhs) { - if (!(lhs.inner == rhs.inner)) { return false; } - return true; - } - - inline std::vector Value::bincodeSerialize() const { - auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); - return std::move(serializer).bytes(); - } - - inline Value Value::bincodeDeserialize(std::vector input) { - auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); - if (deserializer.get_buffer_offset() < input.size()) { - throw serde::deserialization_error("Some input bytes were not read"); - } - return value; - } - -} // end of namespace Circuit - -template <> -template -void serde::Serializable::serialize(const Circuit::Value &obj, Serializer &serializer) { - serializer.increase_container_depth(); - serde::Serializable::serialize(obj.inner, serializer); - serializer.decrease_container_depth(); -} - -template <> -template -Circuit::Value serde::Deserializable::deserialize(Deserializer &deserializer) { - deserializer.increase_container_depth(); - Circuit::Value obj; - obj.inner = serde::Deserializable::deserialize(deserializer); - deserializer.decrease_container_depth(); - return obj; -} - namespace Circuit { inline bool operator==(const Witness &lhs, const Witness &rhs) { diff --git a/acvm-repo/acir/src/circuit/black_box_functions.rs b/acvm-repo/acir/src/circuit/black_box_functions.rs index 97b4759d350..0a7ee244a5e 100644 --- a/acvm-repo/acir/src/circuit/black_box_functions.rs +++ b/acvm-repo/acir/src/circuit/black_box_functions.rs @@ -50,7 +50,7 @@ pub enum BlackBoxFunc { /// BigInt addition BigIntAdd, /// BigInt subtraction - BigIntNeg, + BigIntSub, /// BigInt multiplication BigIntMul, /// BigInt division @@ -91,7 +91,7 @@ impl BlackBoxFunc { BlackBoxFunc::RecursiveAggregation => "recursive_aggregation", BlackBoxFunc::EcdsaSecp256r1 => "ecdsa_secp256r1", BlackBoxFunc::BigIntAdd => "bigint_add", - BlackBoxFunc::BigIntNeg => "bigint_neg", + BlackBoxFunc::BigIntSub => "bigint_sub", BlackBoxFunc::BigIntMul => "bigint_mul", BlackBoxFunc::BigIntDiv => "bigint_div", BlackBoxFunc::BigIntFromLeBytes => "bigint_from_le_bytes", @@ -120,7 +120,7 @@ impl BlackBoxFunc { "keccakf1600" => Some(BlackBoxFunc::Keccakf1600), "recursive_aggregation" => Some(BlackBoxFunc::RecursiveAggregation), "bigint_add" => Some(BlackBoxFunc::BigIntAdd), - "bigint_neg" => Some(BlackBoxFunc::BigIntNeg), + "bigint_sub" => Some(BlackBoxFunc::BigIntSub), "bigint_mul" => Some(BlackBoxFunc::BigIntMul), "bigint_div" => Some(BlackBoxFunc::BigIntDiv), "bigint_from_le_bytes" => Some(BlackBoxFunc::BigIntFromLeBytes), diff --git a/acvm-repo/acir/src/circuit/brillig.rs b/acvm-repo/acir/src/circuit/brillig.rs index 63c6ad2a3d4..f394a46ff82 100644 --- a/acvm-repo/acir/src/circuit/brillig.rs +++ b/acvm-repo/acir/src/circuit/brillig.rs @@ -1,3 +1,4 @@ +use super::opcodes::BlockId; use crate::native_types::{Expression, Witness}; use brillig::Opcode as BrilligOpcode; use serde::{Deserialize, Serialize}; @@ -8,6 +9,7 @@ use serde::{Deserialize, Serialize}; pub enum BrilligInputs { Single(Expression), Array(Vec), + MemoryArray(BlockId), } /// Outputs for the Brillig VM. Once the VM has completed diff --git a/acvm-repo/acir/src/circuit/directives.rs b/acvm-repo/acir/src/circuit/directives.rs index 2486f4cfb83..099d0634399 100644 --- a/acvm-repo/acir/src/circuit/directives.rs +++ b/acvm-repo/acir/src/circuit/directives.rs @@ -7,18 +7,5 @@ use serde::{Deserialize, Serialize}; /// In the future, this can be replaced with asm non-determinism blocks pub enum Directive { //decomposition of a: a=\sum b[i]*radix^i where b is an array of witnesses < radix in little endian form - ToLeRadix { - a: Expression, - b: Vec, - radix: u32, - }, - - // Sort directive, using a sorting network - // This directive is used to generate the values of the control bits for the sorting network such that its outputs are properly sorted according to sort_by - PermutationSort { - inputs: Vec>, // Array of tuples to sort - tuple: u32, // tuple size; if 1 then inputs is a single array [a0,a1,..], if 2 then inputs=[(a0,b0),..] is [a0,b0,a1,b1,..], etc.. - bits: Vec, // control bits of the network which permutes the inputs into its sorted version - sort_by: Vec, // specify primary index to sort by, then the secondary,... For instance, if tuple is 2 and sort_by is [1,0], then a=[(a0,b0),..] is sorted by bi and then ai. - }, + ToLeRadix { a: Expression, b: Vec, radix: u32 }, } diff --git a/acvm-repo/acir/src/circuit/mod.rs b/acvm-repo/acir/src/circuit/mod.rs index b248b30b1d9..7e6cbf23803 100644 --- a/acvm-repo/acir/src/circuit/mod.rs +++ b/acvm-repo/acir/src/circuit/mod.rs @@ -15,12 +15,30 @@ use serde::{de::Error as DeserializationError, Deserialize, Deserializer, Serial use std::collections::BTreeSet; +/// Specifies the maximum width of the expressions which will be constrained. +/// +/// Unbounded Expressions are useful if you are eventually going to pass the ACIR +/// into a proving system which supports R1CS. +/// +/// Bounded Expressions are useful if you are eventually going to pass the ACIR +/// into a proving system which supports PLONK, where arithmetic expressions have a +/// finite fan-in. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Default)] +pub enum ExpressionWidth { + #[default] + Unbounded, + Bounded { + width: usize, + }, +} + #[derive(Clone, PartialEq, Eq, Serialize, Deserialize, Default)] pub struct Circuit { // current_witness_index is the highest witness index in the circuit. The next witness to be added to this circuit // will take on this value. (The value is cached here as an optimization.) pub current_witness_index: u32, pub opcodes: Vec, + pub expression_width: ExpressionWidth, /// The set of private inputs to the circuit. pub private_parameters: BTreeSet, @@ -38,7 +56,16 @@ pub struct Circuit { // Note: This should be a BTreeMap, but serde-reflect is creating invalid // c++ code at the moment when it is, due to OpcodeLocation needing a comparison // implementation which is never generated. + // + // TODO: These are only used for constraints that are explicitly created during code generation (such as index out of bounds on slices) + // TODO: We should move towards having all the checks being evaluated in the same manner + // TODO: as runtime assert messages specified by the user. This will also be a breaking change as the `Circuit` structure will change. pub assert_messages: Vec<(OpcodeLocation, String)>, + + /// States whether the backend should use a SNARK recursion friendly prover. + /// If implemented by a backend, this means that proofs generated with this circuit + /// will be friendly for recursively verifying inside of another SNARK. + pub recursive: bool, } impl Circuit { @@ -235,7 +262,7 @@ mod tests { opcodes::{BlackBoxFuncCall, FunctionInput}, Circuit, Compression, Opcode, PublicInputs, }; - use crate::native_types::Witness; + use crate::{circuit::ExpressionWidth, native_types::Witness}; use acir_field::FieldElement; fn and_opcode() -> Opcode { @@ -313,11 +340,13 @@ mod tests { fn serialization_roundtrip() { let circuit = Circuit { current_witness_index: 5, + expression_width: ExpressionWidth::Unbounded, opcodes: vec![and_opcode(), range_opcode()], private_parameters: BTreeSet::new(), public_parameters: PublicInputs(BTreeSet::from_iter(vec![Witness(2), Witness(12)])), return_values: PublicInputs(BTreeSet::from_iter(vec![Witness(4), Witness(12)])), assert_messages: Default::default(), + recursive: false, }; fn read_write(circuit: Circuit) -> (Circuit, Circuit) { @@ -334,6 +363,7 @@ mod tests { fn test_serialize() { let circuit = Circuit { current_witness_index: 0, + expression_width: ExpressionWidth::Unbounded, opcodes: vec![ Opcode::AssertZero(crate::native_types::Expression { mul_terms: vec![], @@ -348,6 +378,7 @@ mod tests { public_parameters: PublicInputs(BTreeSet::from_iter(vec![Witness(2)])), return_values: PublicInputs(BTreeSet::from_iter(vec![Witness(2)])), assert_messages: Default::default(), + recursive: false, }; let json = serde_json::to_string_pretty(&circuit).unwrap(); diff --git a/acvm-repo/acir/src/circuit/opcodes.rs b/acvm-repo/acir/src/circuit/opcodes.rs index 5aab9d4d472..f725ba8c32a 100644 --- a/acvm-repo/acir/src/circuit/opcodes.rs +++ b/acvm-repo/acir/src/circuit/opcodes.rs @@ -8,6 +8,7 @@ mod memory_operation; pub use black_box_function_call::{BlackBoxFuncCall, FunctionInput}; pub use memory_operation::{BlockId, MemOp}; +#[allow(clippy::large_enum_variant)] #[derive(Clone, PartialEq, Eq, Serialize, Deserialize)] pub enum Opcode { AssertZero(Expression), @@ -59,20 +60,6 @@ impl std::fmt::Display for Opcode { b.last().unwrap().witness_index(), ) } - Opcode::Directive(Directive::PermutationSort { inputs: a, tuple, bits, sort_by }) => { - write!(f, "DIR::PERMUTATIONSORT ")?; - write!( - f, - "(permutation size: {} {}-tuples, sort_by: {:#?}, bits: [_{}..._{}]))", - a.len(), - tuple, - sort_by, - // (Note): the bits do not have contiguous index but there are too many for display - bits.first().unwrap().witness_index(), - bits.last().unwrap().witness_index(), - ) - } - Opcode::Brillig(brillig) => { write!(f, "BRILLIG: ")?; writeln!(f, "inputs: {:?}", brillig.inputs)?; diff --git a/acvm-repo/acir/src/circuit/opcodes/black_box_function_call.rs b/acvm-repo/acir/src/circuit/opcodes/black_box_function_call.rs index ba4964c8912..8a0c4692282 100644 --- a/acvm-repo/acir/src/circuit/opcodes/black_box_function_call.rs +++ b/acvm-repo/acir/src/circuit/opcodes/black_box_function_call.rs @@ -120,7 +120,7 @@ pub enum BlackBoxFuncCall { rhs: u32, output: u32, }, - BigIntNeg { + BigIntSub { lhs: u32, rhs: u32, output: u32, @@ -193,7 +193,7 @@ impl BlackBoxFuncCall { BlackBoxFuncCall::Keccakf1600 { .. } => BlackBoxFunc::Keccakf1600, BlackBoxFuncCall::RecursiveAggregation { .. } => BlackBoxFunc::RecursiveAggregation, BlackBoxFuncCall::BigIntAdd { .. } => BlackBoxFunc::BigIntAdd, - BlackBoxFuncCall::BigIntNeg { .. } => BlackBoxFunc::BigIntNeg, + BlackBoxFuncCall::BigIntSub { .. } => BlackBoxFunc::BigIntSub, BlackBoxFuncCall::BigIntMul { .. } => BlackBoxFunc::BigIntMul, BlackBoxFuncCall::BigIntDiv { .. } => BlackBoxFunc::BigIntDiv, BlackBoxFuncCall::BigIntFromLeBytes { .. } => BlackBoxFunc::BigIntFromLeBytes, @@ -217,13 +217,15 @@ impl BlackBoxFuncCall { | BlackBoxFuncCall::PedersenCommitment { inputs, .. } | BlackBoxFuncCall::PedersenHash { inputs, .. } | BlackBoxFuncCall::BigIntFromLeBytes { inputs, .. } - | BlackBoxFuncCall::Poseidon2Permutation { inputs, .. } - | BlackBoxFuncCall::Sha256Compression { inputs, .. } => inputs.to_vec(), + | BlackBoxFuncCall::Poseidon2Permutation { inputs, .. } => inputs.to_vec(), + BlackBoxFuncCall::Sha256Compression { inputs, hash_values, .. } => { + inputs.iter().chain(hash_values).copied().collect() + } BlackBoxFuncCall::AND { lhs, rhs, .. } | BlackBoxFuncCall::XOR { lhs, rhs, .. } => { vec![*lhs, *rhs] } BlackBoxFuncCall::BigIntAdd { .. } - | BlackBoxFuncCall::BigIntNeg { .. } + | BlackBoxFuncCall::BigIntSub { .. } | BlackBoxFuncCall::BigIntMul { .. } | BlackBoxFuncCall::BigIntDiv { .. } | BlackBoxFuncCall::BigIntToLeBytes { .. } => Vec::new(), @@ -328,7 +330,7 @@ impl BlackBoxFuncCall { | BlackBoxFuncCall::RecursiveAggregation { .. } | BlackBoxFuncCall::BigIntFromLeBytes { .. } | BlackBoxFuncCall::BigIntAdd { .. } - | BlackBoxFuncCall::BigIntNeg { .. } + | BlackBoxFuncCall::BigIntSub { .. } | BlackBoxFuncCall::BigIntMul { .. } | BlackBoxFuncCall::BigIntDiv { .. } => { vec![] diff --git a/acvm-repo/acir/src/circuit/opcodes/memory_operation.rs b/acvm-repo/acir/src/circuit/opcodes/memory_operation.rs index 9e45dc4ee8c..0e94c0f051e 100644 --- a/acvm-repo/acir/src/circuit/opcodes/memory_operation.rs +++ b/acvm-repo/acir/src/circuit/opcodes/memory_operation.rs @@ -1,7 +1,7 @@ use crate::native_types::{Expression, Witness}; use serde::{Deserialize, Serialize}; -#[derive(Clone, PartialEq, Eq, Serialize, Deserialize, Hash, Copy, Default)] +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize, Hash, Copy, Default)] pub struct BlockId(pub u32); /// Operation on a block of memory diff --git a/acvm-repo/acir/src/lib.rs b/acvm-repo/acir/src/lib.rs index b7bcaa0c5c0..c7be5026850 100644 --- a/acvm-repo/acir/src/lib.rs +++ b/acvm-repo/acir/src/lib.rs @@ -32,7 +32,8 @@ mod reflection { }; use brillig::{ - BinaryFieldOp, BinaryIntOp, BlackBoxOp, Opcode as BrilligOpcode, RegisterOrMemory, + BinaryFieldOp, BinaryIntOp, BlackBoxOp, HeapValueType, Opcode as BrilligOpcode, + ValueOrArray, }; use serde_reflection::{Tracer, TracerConfig}; @@ -41,7 +42,7 @@ mod reflection { brillig::{BrilligInputs, BrilligOutputs}, directives::Directive, opcodes::BlackBoxFuncCall, - Circuit, Opcode, OpcodeLocation, + Circuit, ExpressionWidth, Opcode, OpcodeLocation, }, native_types::{Witness, WitnessMap}, }; @@ -59,6 +60,7 @@ mod reflection { let mut tracer = Tracer::new(TracerConfig::default()); tracer.trace_simple_type::().unwrap(); + tracer.trace_simple_type::().unwrap(); tracer.trace_simple_type::().unwrap(); tracer.trace_simple_type::().unwrap(); tracer.trace_simple_type::().unwrap(); @@ -69,7 +71,8 @@ mod reflection { tracer.trace_simple_type::().unwrap(); tracer.trace_simple_type::().unwrap(); tracer.trace_simple_type::().unwrap(); - tracer.trace_simple_type::().unwrap(); + tracer.trace_simple_type::().unwrap(); + tracer.trace_simple_type::().unwrap(); let registry = tracer.registry().unwrap(); diff --git a/acvm-repo/acir/tests/test_program_serialization.rs b/acvm-repo/acir/tests/test_program_serialization.rs index 7d3b7b32d35..2c8ad2b9986 100644 --- a/acvm-repo/acir/tests/test_program_serialization.rs +++ b/acvm-repo/acir/tests/test_program_serialization.rs @@ -20,7 +20,7 @@ use acir::{ native_types::{Expression, Witness}, }; use acir_field::FieldElement; -use brillig::{HeapArray, RegisterIndex, RegisterOrMemory}; +use brillig::{HeapArray, HeapValueType, MemoryAddress, ValueOrArray}; #[test] fn addition_circuit() { @@ -45,12 +45,12 @@ fn addition_circuit() { let bytes = Circuit::serialize_circuit(&circuit); let expected_serialization: Vec = vec![ - 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 173, 144, 187, 13, 192, 32, 12, 68, 249, 100, 32, 27, - 219, 96, 119, 89, 37, 40, 176, 255, 8, 17, 18, 5, 74, 202, 240, 154, 235, 158, 238, 238, - 112, 206, 121, 247, 37, 206, 60, 103, 194, 63, 208, 111, 116, 133, 197, 69, 144, 153, 91, - 73, 13, 9, 47, 72, 86, 85, 128, 165, 102, 69, 69, 81, 185, 147, 18, 53, 101, 45, 86, 173, - 128, 33, 83, 195, 46, 70, 125, 202, 226, 190, 94, 16, 166, 103, 108, 13, 203, 151, 254, - 245, 233, 224, 1, 1, 52, 166, 127, 120, 1, 0, 0, + 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 173, 208, 49, 14, 192, 32, 8, 5, 80, 212, 30, 8, 4, 20, + 182, 94, 165, 166, 122, 255, 35, 52, 77, 28, 76, 58, 214, 191, 124, 166, 23, 242, 15, 0, 8, + 240, 77, 154, 125, 206, 198, 127, 161, 176, 209, 138, 139, 197, 88, 68, 122, 205, 157, 152, + 46, 204, 222, 76, 81, 180, 21, 35, 35, 53, 189, 179, 49, 119, 19, 171, 222, 188, 162, 147, + 112, 167, 161, 206, 99, 98, 105, 223, 95, 248, 26, 113, 90, 97, 185, 97, 217, 56, 173, 35, + 63, 243, 81, 87, 163, 125, 1, 0, 0, ]; assert_eq!(bytes, expected_serialization) @@ -75,9 +75,9 @@ fn fixed_base_scalar_mul_circuit() { let bytes = Circuit::serialize_circuit(&circuit); let expected_serialization: Vec = vec![ - 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 77, 138, 91, 10, 0, 48, 12, 194, 178, 215, 215, 46, 189, - 163, 175, 165, 10, 21, 36, 10, 57, 192, 160, 146, 188, 226, 139, 78, 113, 69, 183, 190, 61, - 111, 218, 182, 231, 124, 122, 8, 177, 65, 92, 0, 0, 0, + 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 77, 138, 91, 10, 0, 32, 16, 2, 109, 171, 175, 46, 221, + 209, 247, 229, 130, 130, 140, 200, 92, 0, 11, 157, 228, 35, 127, 212, 200, 29, 61, 116, 76, + 220, 217, 250, 171, 91, 113, 160, 66, 104, 242, 97, 0, 0, 0, ]; assert_eq!(bytes, expected_serialization) @@ -102,9 +102,9 @@ fn pedersen_circuit() { let bytes = Circuit::serialize_circuit(&circuit); let expected_serialization: Vec = vec![ - 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 93, 138, 9, 10, 0, 64, 8, 2, 103, 15, 232, 255, 31, 142, - 138, 10, 34, 65, 84, 198, 15, 28, 82, 145, 178, 182, 86, 191, 238, 183, 24, 131, 205, 79, - 203, 0, 166, 242, 158, 93, 92, 0, 0, 0, + 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 93, 74, 135, 9, 0, 48, 8, 75, 171, 224, 255, 15, 139, + 27, 196, 64, 200, 100, 0, 15, 133, 80, 57, 89, 219, 127, 39, 173, 126, 235, 236, 247, 151, + 48, 224, 71, 90, 33, 97, 0, 0, 0, ]; assert_eq!(bytes, expected_serialization) @@ -143,22 +143,22 @@ fn schnorr_verify_circuit() { let bytes = Circuit::serialize_circuit(&circuit); let expected_serialization: Vec = vec![ - 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 77, 210, 87, 78, 2, 1, 20, 134, 209, 177, 247, 222, 123, - 71, 68, 68, 68, 68, 68, 68, 68, 68, 68, 221, 133, 251, 95, 130, 145, 27, 206, 36, 78, 50, - 57, 16, 94, 200, 253, 191, 159, 36, 73, 134, 146, 193, 19, 142, 243, 183, 255, 14, 179, - 233, 247, 145, 254, 59, 217, 127, 71, 57, 198, 113, 78, 48, 125, 167, 56, 205, 25, 206, - 114, 142, 243, 92, 224, 34, 151, 184, 204, 21, 174, 114, 141, 235, 220, 224, 38, 183, 184, - 205, 29, 238, 114, 143, 251, 60, 224, 33, 143, 120, 204, 19, 158, 242, 140, 25, 158, 51, - 203, 11, 230, 120, 201, 60, 175, 88, 224, 53, 139, 188, 97, 137, 183, 44, 243, 142, 21, - 222, 179, 202, 7, 214, 248, 200, 58, 159, 216, 224, 51, 155, 124, 97, 235, 223, 142, 241, - 188, 250, 222, 230, 27, 59, 124, 103, 151, 31, 236, 241, 147, 95, 252, 246, 57, 158, 104, - 47, 186, 139, 214, 162, 179, 104, 44, 250, 74, 219, 154, 242, 63, 162, 165, 232, 40, 26, - 138, 126, 162, 157, 232, 38, 154, 137, 94, 162, 149, 232, 36, 26, 137, 62, 162, 141, 232, - 34, 154, 136, 30, 162, 133, 232, 32, 26, 136, 253, 99, 251, 195, 100, 176, 121, 236, 29, - 91, 159, 218, 56, 99, 219, 172, 77, 115, 182, 204, 219, 176, 96, 187, 162, 205, 74, 182, - 42, 219, 168, 98, 155, 170, 77, 106, 182, 168, 219, 160, 225, 246, 77, 55, 111, 185, 113, - 219, 109, 59, 110, 218, 117, 203, 158, 27, 166, 55, 75, 239, 150, 184, 101, 250, 252, 1, - 55, 204, 92, 74, 220, 3, 0, 0, + 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 77, 210, 7, 74, 3, 1, 20, 69, 209, 177, 247, 222, 123, + 239, 189, 119, 141, 93, 99, 220, 133, 251, 95, 130, 152, 103, 78, 32, 3, 195, 33, 4, 66, + 248, 239, 254, 20, 69, 209, 84, 212, 158, 216, 206, 223, 234, 219, 204, 146, 239, 91, 170, + 111, 103, 245, 109, 101, 27, 219, 217, 193, 250, 219, 197, 110, 246, 176, 151, 125, 236, + 231, 0, 7, 57, 196, 97, 142, 112, 148, 99, 28, 231, 4, 39, 57, 197, 105, 206, 112, 150, + 115, 156, 231, 2, 23, 185, 196, 101, 174, 112, 149, 107, 92, 231, 6, 55, 185, 197, 109, + 238, 112, 151, 123, 220, 231, 1, 15, 121, 196, 99, 158, 240, 148, 103, 60, 231, 5, 47, 121, + 197, 107, 222, 240, 150, 119, 188, 231, 3, 75, 124, 228, 83, 195, 142, 121, 158, 125, 126, + 225, 43, 223, 248, 206, 15, 126, 178, 204, 47, 86, 248, 237, 119, 43, 76, 127, 105, 47, + 189, 165, 181, 116, 150, 198, 234, 125, 117, 249, 47, 233, 41, 45, 165, 163, 52, 148, 126, + 210, 78, 186, 73, 51, 233, 37, 173, 164, 147, 52, 146, 62, 210, 70, 186, 72, 19, 233, 33, + 45, 164, 131, 52, 144, 253, 23, 139, 218, 238, 217, 60, 123, 103, 235, 236, 156, 141, 179, + 239, 166, 93, 183, 237, 185, 107, 199, 125, 251, 29, 218, 237, 216, 94, 167, 118, 58, 183, + 207, 165, 93, 174, 237, 113, 107, 135, 123, 247, 47, 185, 251, 147, 59, 191, 184, 239, 155, + 187, 126, 184, 103, 217, 29, 235, 55, 171, 223, 173, 104, 184, 231, 255, 243, 7, 236, 52, + 239, 128, 225, 3, 0, 0, ]; assert_eq!(bytes, expected_serialization) @@ -173,15 +173,25 @@ fn simple_brillig_foreign_call() { inputs: vec![ BrilligInputs::Single(w_input.into()), // Input Register 0, ], - // This tells the BrilligSolver which witnesses its output registers correspond to + // This tells the BrilligSolver which witnesses its output values correspond to outputs: vec![ BrilligOutputs::Simple(w_inverted), // Output Register 1 ], - bytecode: vec![brillig::Opcode::ForeignCall { - function: "invert".into(), - destinations: vec![RegisterOrMemory::RegisterIndex(RegisterIndex::from(0))], - inputs: vec![RegisterOrMemory::RegisterIndex(RegisterIndex::from(0))], - }], + bytecode: vec![ + brillig::Opcode::CalldataCopy { + destination_address: MemoryAddress(0), + size: 1, + offset: 0, + }, + brillig::Opcode::ForeignCall { + function: "invert".into(), + destinations: vec![ValueOrArray::MemoryAddress(MemoryAddress::from(0))], + destination_value_types: vec![HeapValueType::Simple], + inputs: vec![ValueOrArray::MemoryAddress(MemoryAddress::from(0))], + input_value_types: vec![HeapValueType::Simple], + }, + brillig::Opcode::Stop { return_data_offset: 0, return_data_size: 1 }, + ], predicate: None, }; @@ -196,10 +206,11 @@ fn simple_brillig_foreign_call() { let bytes = Circuit::serialize_circuit(&circuit); let expected_serialization: Vec = vec![ - 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 173, 143, 49, 10, 64, 33, 12, 67, 99, 63, 124, 60, 142, - 222, 192, 203, 56, 184, 56, 136, 120, 126, 5, 21, 226, 160, 139, 62, 40, 13, 45, 132, 68, - 3, 80, 232, 124, 164, 153, 121, 115, 99, 155, 59, 172, 122, 231, 101, 56, 175, 80, 86, 221, - 230, 31, 58, 196, 226, 83, 62, 53, 91, 16, 122, 10, 246, 84, 99, 243, 0, 30, 59, 1, 0, 0, + 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 173, 143, 177, 10, 192, 32, 16, 67, 227, 21, 74, 233, + 212, 79, 177, 127, 208, 159, 233, 224, 226, 32, 226, 247, 139, 168, 16, 68, 93, 244, 45, + 119, 228, 142, 144, 92, 0, 20, 50, 7, 237, 76, 213, 190, 50, 245, 26, 175, 218, 231, 165, + 57, 175, 148, 14, 137, 179, 147, 191, 114, 211, 221, 216, 240, 59, 63, 107, 221, 115, 104, + 181, 103, 244, 43, 36, 10, 38, 68, 108, 25, 253, 238, 136, 1, 0, 0, ]; assert_eq!(bytes, expected_serialization) @@ -221,39 +232,64 @@ fn complex_brillig_foreign_call() { let brillig_data = Brillig { inputs: vec![ - // Input Register 0 + // Input 0,1,2 BrilligInputs::Array(vec![ Expression::from(a), Expression::from(b), Expression::from(c), ]), - // Input Register 1 + // Input 3 BrilligInputs::Single(Expression { mul_terms: vec![], linear_combinations: vec![(fe_1, a), (fe_1, b), (fe_1, c)], q_c: fe_0, }), ], - // This tells the BrilligSolver which witnesses its output registers correspond to + // This tells the BrilligSolver which witnesses its output values correspond to outputs: vec![ - BrilligOutputs::Array(vec![a_times_2, b_times_3, c_times_4]), // Output Register 0 - BrilligOutputs::Simple(a_plus_b_plus_c), // Output Register 1 - BrilligOutputs::Simple(a_plus_b_plus_c_times_2), // Output Register 2 + BrilligOutputs::Array(vec![a_times_2, b_times_3, c_times_4]), // Output 0,1,2 + BrilligOutputs::Simple(a_plus_b_plus_c), // Output 3 + BrilligOutputs::Simple(a_plus_b_plus_c_times_2), // Output 4 ], bytecode: vec![ + brillig::Opcode::CalldataCopy { + destination_address: MemoryAddress(32), + size: 3, + offset: 0, + }, + brillig::Opcode::Const { + destination: MemoryAddress(0), + value: brillig::Value::from(32_usize), + bit_size: 32, + }, + brillig::Opcode::CalldataCopy { + destination_address: MemoryAddress(1), + size: 1, + offset: 3, + }, // Oracles are named 'foreign calls' in brillig brillig::Opcode::ForeignCall { function: "complex".into(), inputs: vec![ - RegisterOrMemory::HeapArray(HeapArray { pointer: 0.into(), size: 3 }), - RegisterOrMemory::RegisterIndex(RegisterIndex::from(1)), + ValueOrArray::HeapArray(HeapArray { pointer: 0.into(), size: 3 }), + ValueOrArray::MemoryAddress(MemoryAddress::from(1)), + ], + input_value_types: vec![ + HeapValueType::Array { size: 3, value_types: vec![HeapValueType::Simple] }, + HeapValueType::Simple, ], destinations: vec![ - RegisterOrMemory::HeapArray(HeapArray { pointer: 0.into(), size: 3 }), - RegisterOrMemory::RegisterIndex(RegisterIndex::from(1)), - RegisterOrMemory::RegisterIndex(RegisterIndex::from(2)), + ValueOrArray::HeapArray(HeapArray { pointer: 0.into(), size: 3 }), + ValueOrArray::MemoryAddress(MemoryAddress::from(35)), + ValueOrArray::MemoryAddress(MemoryAddress::from(36)), + ], + destination_value_types: vec![ + HeapValueType::Array { size: 3, value_types: vec![HeapValueType::Simple] }, + HeapValueType::Simple, + HeapValueType::Simple, ], }, + brillig::Opcode::Stop { return_data_offset: 32, return_data_size: 5 }, ], predicate: None, }; @@ -269,13 +305,15 @@ fn complex_brillig_foreign_call() { let bytes = Circuit::serialize_circuit(&circuit); let expected_serialization: Vec = vec![ - 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 213, 83, 219, 10, 128, 48, 8, 117, 174, 139, 159, 179, - 254, 160, 127, 137, 222, 138, 122, 236, 243, 19, 114, 32, 22, 244, 144, 131, 118, 64, 156, - 178, 29, 14, 59, 74, 0, 16, 224, 66, 228, 64, 57, 7, 169, 53, 242, 189, 81, 114, 250, 134, - 33, 248, 113, 165, 82, 26, 177, 2, 141, 177, 128, 198, 60, 15, 63, 245, 219, 211, 23, 215, - 255, 139, 15, 251, 211, 112, 180, 28, 157, 212, 189, 100, 82, 179, 64, 170, 63, 109, 235, - 190, 204, 135, 166, 178, 150, 216, 62, 154, 252, 250, 70, 147, 35, 220, 119, 93, 227, 4, - 182, 131, 81, 25, 36, 4, 0, 0, + 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 213, 84, 75, 10, 132, 48, 12, 125, 177, 163, 35, 179, + 154, 35, 8, 51, 7, 232, 204, 9, 188, 139, 184, 83, 116, 233, 241, 173, 152, 98, 12, 213, + 141, 21, 244, 65, 232, 39, 175, 233, 35, 73, 155, 3, 32, 204, 48, 206, 18, 158, 19, 175, + 37, 60, 175, 228, 209, 30, 195, 143, 226, 197, 178, 103, 105, 76, 110, 160, 209, 156, 160, + 209, 247, 195, 69, 235, 29, 179, 46, 81, 243, 103, 2, 239, 231, 225, 44, 117, 150, 241, + 250, 201, 99, 206, 251, 96, 95, 161, 242, 14, 193, 243, 40, 162, 105, 253, 219, 12, 75, 47, + 146, 186, 251, 37, 116, 86, 93, 219, 55, 245, 96, 20, 85, 75, 253, 136, 249, 87, 249, 105, + 231, 220, 4, 249, 237, 132, 56, 20, 224, 109, 113, 223, 88, 82, 153, 34, 64, 34, 14, 164, + 69, 172, 48, 2, 23, 243, 6, 31, 25, 5, 0, 0, ]; assert_eq!(bytes, expected_serialization) @@ -307,11 +345,10 @@ fn memory_op_circuit() { let bytes = Circuit::serialize_circuit(&circuit); let expected_serialization: Vec = vec![ - 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 213, 146, 49, 14, 0, 32, 8, 3, 139, 192, 127, 240, 7, - 254, 255, 85, 198, 136, 9, 131, 155, 48, 216, 165, 76, 77, 57, 80, 0, 140, 45, 117, 111, - 238, 228, 179, 224, 174, 225, 110, 111, 234, 213, 185, 148, 156, 203, 121, 89, 86, 13, 215, - 126, 131, 43, 153, 187, 115, 40, 185, 62, 153, 3, 136, 83, 60, 30, 96, 2, 12, 235, 225, - 124, 14, 3, 0, 0, + 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 213, 145, 187, 17, 0, 32, 8, 67, 195, 111, 31, 220, 192, + 253, 167, 178, 144, 2, 239, 236, 132, 194, 52, 129, 230, 93, 8, 6, 64, 176, 101, 225, 28, + 78, 49, 43, 238, 154, 225, 254, 166, 209, 205, 165, 98, 174, 212, 177, 188, 187, 92, 255, + 173, 92, 173, 190, 93, 82, 80, 78, 123, 14, 127, 60, 97, 1, 210, 144, 46, 242, 19, 3, 0, 0, ]; assert_eq!(bytes, expected_serialization) diff --git a/acvm-repo/acir_field/Cargo.toml b/acvm-repo/acir_field/Cargo.toml index dde121f4029..6f4971770bd 100644 --- a/acvm-repo/acir_field/Cargo.toml +++ b/acvm-repo/acir_field/Cargo.toml @@ -2,7 +2,7 @@ name = "acir_field" description = "The field implementation being used by ACIR." # x-release-please-start-version -version = "0.39.0" +version = "0.40.0" # x-release-please-end authors.workspace = true edition.workspace = true diff --git a/acvm-repo/acir_field/src/generic_ark.rs b/acvm-repo/acir_field/src/generic_ark.rs index 542e291982b..3178011a075 100644 --- a/acvm-repo/acir_field/src/generic_ark.rs +++ b/acvm-repo/acir_field/src/generic_ark.rs @@ -175,6 +175,10 @@ impl FieldElement { self == &Self::one() } + pub fn is_negative(&self) -> bool { + self.neg().num_bits() < self.num_bits() + } + pub fn pow(&self, exponent: &Self) -> Self { FieldElement(self.0.pow(exponent.0.into_bigint())) } @@ -240,6 +244,12 @@ impl FieldElement { self.fits_in_u128().then(|| self.to_u128()) } + pub fn to_i128(self) -> i128 { + let is_negative = self.is_negative(); + let bytes = if is_negative { self.neg() } else { self }.to_be_bytes(); + i128::from_be_bytes(bytes[16..32].try_into().unwrap()) * if is_negative { -1 } else { 1 } + } + pub fn try_to_u64(&self) -> Option { (self.num_bits() <= 64).then(|| self.to_u128() as u64) } @@ -419,63 +429,6 @@ impl SubAssign for FieldElement { } } -#[cfg(test)] -mod tests { - #[test] - fn and() { - let max = 10_000u32; - - let num_bits = (std::mem::size_of::() * 8) as u32 - max.leading_zeros(); - - for x in 0..max { - let x = crate::generic_ark::FieldElement::::from(x as i128); - let res = x.and(&x, num_bits); - assert_eq!(res.to_be_bytes(), x.to_be_bytes()); - } - } - - #[test] - fn serialize_fixed_test_vectors() { - // Serialized field elements from of 0, -1, -2, -3 - let hex_strings = vec![ - "0000000000000000000000000000000000000000000000000000000000000000", - "30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000000", - "30644e72e131a029b85045b68181585d2833e84879b9709143e1f593efffffff", - "30644e72e131a029b85045b68181585d2833e84879b9709143e1f593effffffe", - ]; - - for (i, string) in hex_strings.into_iter().enumerate() { - let minus_i_field_element = - -crate::generic_ark::FieldElement::::from(i as i128); - assert_eq!(minus_i_field_element.to_hex(), string); - } - } - - #[test] - fn deserialize_even_and_odd_length_hex() { - // Test cases of (odd, even) length hex strings - let hex_strings = - vec![("0x0", "0x00"), ("0x1", "0x01"), ("0x002", "0x0002"), ("0x00003", "0x000003")]; - for (i, case) in hex_strings.into_iter().enumerate() { - let i_field_element = - crate::generic_ark::FieldElement::::from(i as i128); - let odd_field_element = - crate::generic_ark::FieldElement::::from_hex(case.0).unwrap(); - let even_field_element = - crate::generic_ark::FieldElement::::from_hex(case.1).unwrap(); - - assert_eq!(i_field_element, odd_field_element); - assert_eq!(odd_field_element, even_field_element); - } - } - - #[test] - fn max_num_bits_smoke() { - let max_num_bits_bn254 = crate::generic_ark::FieldElement::::max_num_bits(); - assert_eq!(max_num_bits_bn254, 254); - } -} - fn mask_vector_le(bytes: &mut [u8], num_bits: usize) { // reverse to big endian format bytes.reverse(); @@ -533,3 +486,60 @@ fn superscript(n: u64) -> String { panic!("{}", n.to_string() + " can't be converted to superscript."); } } + +#[cfg(test)] +mod tests { + #[test] + fn and() { + let max = 10_000u32; + + let num_bits = (std::mem::size_of::() * 8) as u32 - max.leading_zeros(); + + for x in 0..max { + let x = crate::generic_ark::FieldElement::::from(x as i128); + let res = x.and(&x, num_bits); + assert_eq!(res.to_be_bytes(), x.to_be_bytes()); + } + } + + #[test] + fn serialize_fixed_test_vectors() { + // Serialized field elements from of 0, -1, -2, -3 + let hex_strings = vec![ + "0000000000000000000000000000000000000000000000000000000000000000", + "30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000000", + "30644e72e131a029b85045b68181585d2833e84879b9709143e1f593efffffff", + "30644e72e131a029b85045b68181585d2833e84879b9709143e1f593effffffe", + ]; + + for (i, string) in hex_strings.into_iter().enumerate() { + let minus_i_field_element = + -crate::generic_ark::FieldElement::::from(i as i128); + assert_eq!(minus_i_field_element.to_hex(), string); + } + } + + #[test] + fn deserialize_even_and_odd_length_hex() { + // Test cases of (odd, even) length hex strings + let hex_strings = + vec![("0x0", "0x00"), ("0x1", "0x01"), ("0x002", "0x0002"), ("0x00003", "0x000003")]; + for (i, case) in hex_strings.into_iter().enumerate() { + let i_field_element = + crate::generic_ark::FieldElement::::from(i as i128); + let odd_field_element = + crate::generic_ark::FieldElement::::from_hex(case.0).unwrap(); + let even_field_element = + crate::generic_ark::FieldElement::::from_hex(case.1).unwrap(); + + assert_eq!(i_field_element, odd_field_element); + assert_eq!(odd_field_element, even_field_element); + } + } + + #[test] + fn max_num_bits_smoke() { + let max_num_bits_bn254 = crate::generic_ark::FieldElement::::max_num_bits(); + assert_eq!(max_num_bits_bn254, 254); + } +} diff --git a/acvm-repo/acvm/Cargo.toml b/acvm-repo/acvm/Cargo.toml index a40148a01ef..fce9a8e8e8b 100644 --- a/acvm-repo/acvm/Cargo.toml +++ b/acvm-repo/acvm/Cargo.toml @@ -2,7 +2,7 @@ name = "acvm" description = "The virtual machine that processes ACIR given a backend/proof system." # x-release-please-start-version -version = "0.39.0" +version = "0.40.0" # x-release-please-end authors.workspace = true edition.workspace = true diff --git a/acvm-repo/acvm/src/compiler/mod.rs b/acvm-repo/acvm/src/compiler/mod.rs index ccb043914d6..6543c70958b 100644 --- a/acvm-repo/acvm/src/compiler/mod.rs +++ b/acvm-repo/acvm/src/compiler/mod.rs @@ -1,8 +1,6 @@ use std::collections::HashMap; -use acir::circuit::{Circuit, OpcodeLocation}; - -use crate::ExpressionWidth; +use acir::circuit::{Circuit, ExpressionWidth, OpcodeLocation}; // The various passes that we can use over ACIR mod optimizers; diff --git a/acvm-repo/acvm/src/compiler/optimizers/constant_backpropagation.rs b/acvm-repo/acvm/src/compiler/optimizers/constant_backpropagation.rs new file mode 100644 index 00000000000..0e7d28104da --- /dev/null +++ b/acvm-repo/acvm/src/compiler/optimizers/constant_backpropagation.rs @@ -0,0 +1,331 @@ +use std::collections::{BTreeMap, BTreeSet, HashMap}; + +use crate::{ + compiler::optimizers::GeneralOptimizer, + pwg::{ + arithmetic::ExpressionSolver, blackbox::solve_range_opcode, directives::solve_directives, + BrilligSolver, BrilligSolverStatus, + }, +}; +use acir::{ + circuit::{ + brillig::{Brillig, BrilligInputs, BrilligOutputs}, + directives::Directive, + opcodes::BlackBoxFuncCall, + Circuit, Opcode, + }, + native_types::{Expression, Witness, WitnessMap}, +}; +use acvm_blackbox_solver::StubbedBlackBoxSolver; + +/// `ConstantBackpropagationOptimizer` will attempt to determine any constant witnesses within the program. +/// It does this by attempting to solve the program without any inputs (i.e. using an empty witness map), +/// any values which it can determine are then enforced to be constant values. +/// +/// The optimizer will then replace any witnesses wherever they appear within the circuit with these constant values. +/// This is repeated until the circuit stabilizes. +pub(crate) struct ConstantBackpropagationOptimizer { + circuit: Circuit, +} + +impl ConstantBackpropagationOptimizer { + /// Creates a new `ConstantBackpropagationOptimizer` + pub(crate) fn new(circuit: Circuit) -> Self { + Self { circuit } + } + + fn gather_known_witnesses(&self) -> (WitnessMap, BTreeSet) { + // We do not want to affect the circuit's interface so avoid optimizing away these witnesses. + let mut required_witnesses: BTreeSet = self + .circuit + .private_parameters + .union(&self.circuit.public_parameters.0) + .chain(&self.circuit.return_values.0) + .copied() + .collect(); + + for opcode in &self.circuit.opcodes { + match &opcode { + Opcode::BlackBoxFuncCall(func_call) => { + required_witnesses.extend( + func_call.get_inputs_vec().into_iter().map(|func_input| func_input.witness), + ); + required_witnesses.extend(func_call.get_outputs_vec()); + } + + Opcode::MemoryInit { init, .. } => { + required_witnesses.extend(init); + } + + Opcode::MemoryOp { op, .. } => { + required_witnesses.insert(op.index.to_witness().unwrap()); + required_witnesses.insert(op.value.to_witness().unwrap()); + } + + _ => (), + }; + } + + let mut known_witnesses = WitnessMap::new(); + for opcode in self.circuit.opcodes.iter().rev() { + if let Opcode::AssertZero(expr) = opcode { + let solve_result = ExpressionSolver::solve(&mut known_witnesses, expr); + // It doesn't matter what the result is. We expect most opcodes to not be solved successfully so we discard errors. + // At the same time, if the expression can be solved then we track this by the updates to `known_witnesses` + drop(solve_result); + } + } + + // We want to retain any references to required witnesses so we "forget" these assignments. + let known_witnesses: BTreeMap<_, _> = known_witnesses + .into_iter() + .filter(|(witness, _)| !required_witnesses.contains(witness)) + .collect(); + + (known_witnesses.into(), required_witnesses) + } + + /// Returns a `Circuit` where with any constant witnesses replaced with the constant they resolve to. + #[tracing::instrument(level = "trace", skip_all)] + pub(crate) fn backpropagate_constants( + circuit: Circuit, + order_list: Vec, + ) -> (Circuit, Vec) { + let old_circuit_size = circuit.opcodes.len(); + + let optimizer = Self::new(circuit); + let (circuit, order_list) = optimizer.backpropagate_constants_iteration(order_list); + + let new_circuit_size = circuit.opcodes.len(); + if new_circuit_size < old_circuit_size { + Self::backpropagate_constants(circuit, order_list) + } else { + (circuit, order_list) + } + } + + /// Applies a single round of constant backpropagation to a `Circuit`. + pub(crate) fn backpropagate_constants_iteration( + mut self, + order_list: Vec, + ) -> (Circuit, Vec) { + let (mut known_witnesses, required_witnesses) = self.gather_known_witnesses(); + + let opcodes = std::mem::take(&mut self.circuit.opcodes); + + fn remap_expression(known_witnesses: &WitnessMap, expression: Expression) -> Expression { + GeneralOptimizer::optimize(ExpressionSolver::evaluate(&expression, known_witnesses)) + } + + let mut new_order_list = Vec::with_capacity(order_list.len()); + let mut new_opcodes = Vec::with_capacity(opcodes.len()); + for (idx, opcode) in opcodes.into_iter().enumerate() { + let new_opcode = match opcode { + Opcode::AssertZero(expression) => { + let new_expr = remap_expression(&known_witnesses, expression); + if new_expr.is_zero() { + continue; + } + + // Attempt to solve the opcode to see if we can determine the value of any witnesses in the expression. + // We only do this _after_ we apply any simplifications to create the new opcode as we want to + // keep the constraint on the witness which we are solving for here. + let solve_result = ExpressionSolver::solve(&mut known_witnesses, &new_expr); + // It doesn't matter what the result is. We expect most opcodes to not be solved successfully so we discard errors. + // At the same time, if the expression can be solved then we track this by the updates to `known_witnesses` + drop(solve_result); + + Opcode::AssertZero(new_expr) + } + Opcode::Brillig(brillig) => { + let remapped_inputs = brillig + .inputs + .into_iter() + .map(|input| match input { + BrilligInputs::Single(expr) => { + BrilligInputs::Single(remap_expression(&known_witnesses, expr)) + } + BrilligInputs::Array(expr_array) => { + let new_input: Vec<_> = expr_array + .into_iter() + .map(|expr| remap_expression(&known_witnesses, expr)) + .collect(); + + BrilligInputs::Array(new_input) + } + input @ BrilligInputs::MemoryArray(_) => input, + }) + .collect(); + + let remapped_predicate = brillig + .predicate + .map(|predicate| remap_expression(&known_witnesses, predicate)); + + let new_brillig = Brillig { + inputs: remapped_inputs, + predicate: remapped_predicate, + ..brillig + }; + + let brillig_output_is_required_witness = + new_brillig.outputs.iter().any(|output| match output { + BrilligOutputs::Simple(witness) => required_witnesses.contains(witness), + BrilligOutputs::Array(witness_array) => witness_array + .iter() + .any(|witness| required_witnesses.contains(witness)), + }); + + if brillig_output_is_required_witness { + // If one of the brillig opcode's outputs is a required witness then we can't remove the opcode. In this case we can't replace + // all of the uses of this witness with the calculated constant so we'll be attempting to use an uninitialized witness. + // + // We then do not attempt execution of this opcode and just simplify the inputs. + Opcode::Brillig(new_brillig) + } else if let Ok(mut solver) = BrilligSolver::new( + &known_witnesses, + &HashMap::new(), + &new_brillig, + &StubbedBlackBoxSolver, + idx, + ) { + match solver.solve() { + Ok(BrilligSolverStatus::Finished) => { + // Write execution outputs + match solver.finalize(&mut known_witnesses, &new_brillig) { + Ok(()) => { + // If we've managed to execute the brillig opcode at compile time, we can now just write in the + // results as constants for the rest of the circuit. + continue; + } + _ => Opcode::Brillig(new_brillig), + } + } + Ok(BrilligSolverStatus::InProgress) => unreachable!( + "Solver should either finish, block on foreign call, or error." + ), + Ok(BrilligSolverStatus::ForeignCallWait(_)) | Err(_) => { + Opcode::Brillig(new_brillig) + } + } + } else { + Opcode::Brillig(new_brillig) + } + } + + Opcode::Directive(Directive::ToLeRadix { a, b, radix }) => { + if b.iter().all(|output| known_witnesses.contains_key(output)) { + continue; + } else if b.iter().any(|witness| required_witnesses.contains(witness)) { + // If one of the brillig opcode's outputs is a required witness then we can't remove the opcode. In this case we can't replace + // all of the uses of this witness with the calculated constant so we'll be attempting to use an uninitialized witness. + // + // We then do not attempt execution of this opcode and just simplify the inputs. + Opcode::Directive(Directive::ToLeRadix { + a: remap_expression(&known_witnesses, a), + b, + radix, + }) + } else { + let directive = Directive::ToLeRadix { + a: remap_expression(&known_witnesses, a), + b, + radix, + }; + let result = solve_directives(&mut known_witnesses, &directive); + + match result { + Ok(()) => continue, + Err(_) => Opcode::Directive(directive), + } + } + } + + Opcode::BlackBoxFuncCall(BlackBoxFuncCall::RANGE { input }) => { + if solve_range_opcode(&known_witnesses, &input).is_ok() { + continue; + } else { + opcode + } + } + + Opcode::BlackBoxFuncCall(_) + | Opcode::MemoryOp { .. } + | Opcode::MemoryInit { .. } => opcode, + }; + + new_opcodes.push(new_opcode); + new_order_list.push(order_list[idx]); + } + + self.circuit.opcodes = new_opcodes; + + (self.circuit, new_order_list) + } +} + +#[cfg(test)] +mod tests { + use std::collections::BTreeSet; + + use crate::compiler::optimizers::constant_backpropagation::ConstantBackpropagationOptimizer; + use acir::{ + brillig::MemoryAddress, + circuit::{ + brillig::{Brillig, BrilligOutputs}, + opcodes::{BlackBoxFuncCall, FunctionInput}, + Circuit, ExpressionWidth, Opcode, PublicInputs, + }, + native_types::Witness, + }; + use brillig_vm::brillig::Opcode as BrilligOpcode; + + fn test_circuit(opcodes: Vec) -> Circuit { + Circuit { + current_witness_index: 1, + expression_width: ExpressionWidth::Bounded { width: 3 }, + opcodes, + private_parameters: BTreeSet::new(), + public_parameters: PublicInputs::default(), + return_values: PublicInputs::default(), + assert_messages: Default::default(), + recursive: false, + } + } + + #[test] + fn retain_brillig_with_required_witness_outputs() { + let brillig_opcode = Opcode::Brillig(Brillig { + inputs: Vec::new(), + outputs: vec![BrilligOutputs::Simple(Witness(1))], + bytecode: vec![ + BrilligOpcode::Const { + destination: MemoryAddress(0), + bit_size: 32, + value: 1u128.into(), + }, + BrilligOpcode::Stop { return_data_offset: 0, return_data_size: 1 }, + ], + predicate: None, + }); + let blackbox_opcode = Opcode::BlackBoxFuncCall(BlackBoxFuncCall::AND { + lhs: FunctionInput { witness: Witness(1), num_bits: 64 }, + rhs: FunctionInput { witness: Witness(2), num_bits: 64 }, + output: Witness(3), + }); + + let opcodes = vec![brillig_opcode, blackbox_opcode]; + // The optimizer should keep the lowest bit size range constraint + let circuit = test_circuit(opcodes); + let acir_opcode_positions = circuit.opcodes.iter().enumerate().map(|(i, _)| i).collect(); + let optimizer = ConstantBackpropagationOptimizer::new(circuit); + + let (optimized_circuit, _) = + optimizer.backpropagate_constants_iteration(acir_opcode_positions); + + assert_eq!( + optimized_circuit.opcodes.len(), + 2, + "The brillig opcode should not be removed as the output is needed as a witness" + ); + } +} diff --git a/acvm-repo/acvm/src/compiler/optimizers/general.rs b/acvm-repo/acvm/src/compiler/optimizers/general.rs index 2bd781f7bb5..a48a590a05e 100644 --- a/acvm-repo/acvm/src/compiler/optimizers/general.rs +++ b/acvm-repo/acvm/src/compiler/optimizers/general.rs @@ -13,7 +13,8 @@ impl GeneralOptimizer { pub(crate) fn optimize(opcode: Expression) -> Expression { // XXX: Perhaps this optimization can be done on the fly let opcode = remove_zero_coefficients(opcode); - simplify_mul_terms(opcode) + let opcode = simplify_mul_terms(opcode); + simplify_linear_terms(opcode) } } @@ -42,3 +43,20 @@ fn simplify_mul_terms(mut gate: Expression) -> Expression { gate.mul_terms = hash_map.into_iter().map(|((w_l, w_r), scale)| (scale, w_l, w_r)).collect(); gate } + +// Simplifies all linear terms with the same variables +fn simplify_linear_terms(mut gate: Expression) -> Expression { + let mut hash_map: IndexMap = IndexMap::new(); + + // Canonicalize the ordering of the terms, lets just order by variable name + for (scale, witness) in gate.linear_combinations.into_iter() { + *hash_map.entry(witness).or_insert_with(FieldElement::zero) += scale; + } + + gate.linear_combinations = hash_map + .into_iter() + .filter(|(_, scale)| scale != &FieldElement::zero()) + .map(|(witness, scale)| (scale, witness)) + .collect(); + gate +} diff --git a/acvm-repo/acvm/src/compiler/optimizers/mod.rs b/acvm-repo/acvm/src/compiler/optimizers/mod.rs index 923756580b3..599bdabd420 100644 --- a/acvm-repo/acvm/src/compiler/optimizers/mod.rs +++ b/acvm-repo/acvm/src/compiler/optimizers/mod.rs @@ -1,5 +1,6 @@ use acir::circuit::{Circuit, Opcode}; +mod constant_backpropagation; mod general; mod redundant_range; mod unused_memory; @@ -8,6 +9,7 @@ pub(crate) use general::GeneralOptimizer; pub(crate) use redundant_range::RangeOptimizer; use tracing::info; +use self::constant_backpropagation::ConstantBackpropagationOptimizer; use self::unused_memory::UnusedMemoryOptimizer; use super::{transform_assert_messages, AcirTransformationMap}; @@ -26,6 +28,15 @@ pub fn optimize(acir: Circuit) -> (Circuit, AcirTransformationMap) { /// Applies [`ProofSystemCompiler`][crate::ProofSystemCompiler] independent optimizations to a [`Circuit`]. #[tracing::instrument(level = "trace", name = "optimize_acir" skip(acir))] pub(super) fn optimize_internal(acir: Circuit) -> (Circuit, Vec) { + // Track original acir opcode positions throughout the transformation passes of the compilation + // by applying the modifications done to the circuit opcodes and also to the opcode_positions (delete and insert) + let acir_opcode_positions = (0..acir.opcodes.len()).collect(); + + if acir.opcodes.len() == 1 && matches!(acir.opcodes[0], Opcode::Brillig(_)) { + info!("Program is fully unconstrained, skipping optimization pass"); + return (acir, acir_opcode_positions); + } + info!("Number of opcodes before: {}", acir.opcodes.len()); // General optimizer pass @@ -42,20 +53,22 @@ pub(super) fn optimize_internal(acir: Circuit) -> (Circuit, Vec) { .collect(); let acir = Circuit { opcodes, ..acir }; - // Track original acir opcode positions throughout the transformation passes of the compilation - // by applying the modifications done to the circuit opcodes and also to the opcode_positions (delete and insert) - let acir_opcode_positions = (0..acir.opcodes.len()).collect(); - // Unused memory optimization pass let memory_optimizer = UnusedMemoryOptimizer::new(acir); let (acir, acir_opcode_positions) = memory_optimizer.remove_unused_memory_initializations(acir_opcode_positions); + let (acir, acir_opcode_positions) = + ConstantBackpropagationOptimizer::backpropagate_constants(acir, acir_opcode_positions); + // Range optimization pass let range_optimizer = RangeOptimizer::new(acir); let (acir, acir_opcode_positions) = range_optimizer.replace_redundant_ranges(acir_opcode_positions); + let (acir, acir_opcode_positions) = + ConstantBackpropagationOptimizer::backpropagate_constants(acir, acir_opcode_positions); + info!("Number of opcodes after: {}", acir.opcodes.len()); (acir, acir_opcode_positions) diff --git a/acvm-repo/acvm/src/compiler/optimizers/redundant_range.rs b/acvm-repo/acvm/src/compiler/optimizers/redundant_range.rs index ecabd98b3b1..c6ca18d30ae 100644 --- a/acvm-repo/acvm/src/compiler/optimizers/redundant_range.rs +++ b/acvm-repo/acvm/src/compiler/optimizers/redundant_range.rs @@ -72,12 +72,9 @@ impl RangeOptimizer { } } - Opcode::BlackBoxFuncCall(BlackBoxFuncCall::RANGE { input: FunctionInput { witness, num_bits }, - }) => { - Some((*witness, *num_bits)) - } + }) => Some((*witness, *num_bits)), _ => None, }) else { @@ -148,7 +145,7 @@ mod tests { use acir::{ circuit::{ opcodes::{BlackBoxFuncCall, FunctionInput}, - Circuit, Opcode, PublicInputs, + Circuit, ExpressionWidth, Opcode, PublicInputs, }, native_types::{Expression, Witness}, }; @@ -167,11 +164,13 @@ mod tests { Circuit { current_witness_index: 1, + expression_width: ExpressionWidth::Bounded { width: 3 }, opcodes, private_parameters: BTreeSet::new(), public_parameters: PublicInputs::default(), return_values: PublicInputs::default(), assert_messages: Default::default(), + recursive: false, } } diff --git a/acvm-repo/acvm/src/compiler/transformers/mod.rs b/acvm-repo/acvm/src/compiler/transformers/mod.rs index e184401c5d4..214243d9360 100644 --- a/acvm-repo/acvm/src/compiler/transformers/mod.rs +++ b/acvm-repo/acvm/src/compiler/transformers/mod.rs @@ -1,12 +1,10 @@ use acir::{ - circuit::{brillig::BrilligOutputs, directives::Directive, Circuit, Opcode}, + circuit::{brillig::BrilligOutputs, directives::Directive, Circuit, ExpressionWidth, Opcode}, native_types::{Expression, Witness}, FieldElement, }; use indexmap::IndexMap; -use crate::ExpressionWidth; - mod csat; mod r1cs; @@ -44,11 +42,11 @@ pub(super) fn transform_internal( acir_opcode_positions: Vec, ) -> (Circuit, Vec) { let mut transformer = match &expression_width { - crate::ExpressionWidth::Unbounded => { + ExpressionWidth::Unbounded => { let transformer = R1CSTransformer::new(acir); return (transformer.transform(), acir_opcode_positions); } - crate::ExpressionWidth::Bounded { width } => { + ExpressionWidth::Bounded { width } => { let mut csat = CSatTransformer::new(*width); for value in acir.circuit_arguments() { csat.mark_solvable(value); @@ -113,11 +111,6 @@ pub(super) fn transform_internal( transformer.mark_solvable(*witness); } } - Directive::PermutationSort { bits, .. } => { - for witness in bits { - transformer.mark_solvable(*witness); - } - } } new_acir_opcode_positions.push(acir_opcode_positions[index]); transformed_opcodes.push(opcode); @@ -159,6 +152,7 @@ pub(super) fn transform_internal( let acir = Circuit { current_witness_index, + expression_width, opcodes: transformed_opcodes, // The transformer does not add new public inputs ..acir diff --git a/acvm-repo/acvm/src/lib.rs b/acvm-repo/acvm/src/lib.rs index 264479d8a12..00a253fde07 100644 --- a/acvm-repo/acvm/src/lib.rs +++ b/acvm-repo/acvm/src/lib.rs @@ -7,7 +7,6 @@ pub mod compiler; pub mod pwg; pub use acvm_blackbox_solver::{BlackBoxFunctionSolver, BlackBoxResolutionError}; -use core::fmt::Debug; use pwg::OpcodeResolutionError; // re-export acir @@ -17,27 +16,3 @@ pub use acir::FieldElement; pub use brillig_vm; // re-export blackbox solver pub use acvm_blackbox_solver as blackbox_solver; - -/// Specifies the maximum width of the expressions which will be constrained. -/// -/// Unbounded Expressions are useful if you are eventually going to pass the ACIR -/// into a proving system which supports R1CS. -/// -/// Bounded Expressions are useful if you are eventually going to pass the ACIR -/// into a proving system which supports PLONK, where arithmetic expressions have a -/// finite fan-in. -#[derive(Debug, Clone, Copy)] -pub enum ExpressionWidth { - Unbounded, - Bounded { width: usize }, -} - -impl From for ExpressionWidth { - fn from(width: usize) -> ExpressionWidth { - if width == 0 { - ExpressionWidth::Unbounded - } else { - ExpressionWidth::Bounded { width } - } - } -} diff --git a/acvm-repo/acvm/src/pwg/arithmetic.rs b/acvm-repo/acvm/src/pwg/arithmetic.rs index 81462ea495e..dc9e13d44b6 100644 --- a/acvm-repo/acvm/src/pwg/arithmetic.rs +++ b/acvm-repo/acvm/src/pwg/arithmetic.rs @@ -7,7 +7,7 @@ use super::{insert_value, ErrorLocation, OpcodeNotSolvable, OpcodeResolutionErro /// An Expression solver will take a Circuit's assert-zero opcodes with witness assignments /// and create the other witness variables -pub(super) struct ExpressionSolver; +pub(crate) struct ExpressionSolver; #[allow(clippy::enum_variant_names)] pub(super) enum OpcodeStatus { @@ -24,13 +24,18 @@ pub(crate) enum MulTerm { impl ExpressionSolver { /// Derives the rest of the witness based on the initial low level variables - pub(super) fn solve( + pub(crate) fn solve( initial_witness: &mut WitnessMap, opcode: &Expression, ) -> Result<(), OpcodeResolutionError> { let opcode = &ExpressionSolver::evaluate(opcode, initial_witness); // Evaluate multiplication term - let mul_result = ExpressionSolver::solve_mul_term(opcode, initial_witness); + let mul_result = + ExpressionSolver::solve_mul_term(opcode, initial_witness).map_err(|_| { + OpcodeResolutionError::OpcodeNotSolvable( + OpcodeNotSolvable::ExpressionHasTooManyUnknowns(opcode.clone()), + ) + })?; // Evaluate the fan-in terms let opcode_status = ExpressionSolver::solve_fan_in_term(opcode, initial_witness); @@ -54,9 +59,7 @@ impl ExpressionSolver { } } else { let assignment = -total_sum / (q + b); - // Add this into the witness assignments - insert_value(&w1, assignment, initial_witness)?; - Ok(()) + insert_value(&w1, assignment, initial_witness) } } else { // TODO: can we be more specific with this error? @@ -84,9 +87,7 @@ impl ExpressionSolver { } } else { let assignment = -(total_sum / partial_prod); - // Add this into the witness assignments - insert_value(&unknown_var, assignment, initial_witness)?; - Ok(()) + insert_value(&unknown_var, assignment, initial_witness) } } (MulTerm::Solved(a), OpcodeStatus::OpcodeSatisfied(b)) => { @@ -118,9 +119,7 @@ impl ExpressionSolver { } } else { let assignment = -(total_sum / coeff); - // Add this into the witness assignments - insert_value(&unknown_var, assignment, initial_witness)?; - Ok(()) + insert_value(&unknown_var, assignment, initial_witness) } } } @@ -130,16 +129,19 @@ impl ExpressionSolver { /// If the witness values are not known, then the function returns a None /// XXX: Do we need to account for the case where 5xy + 6x = 0 ? We do not know y, but it can be solved given x . But I believe x can be solved with another opcode /// XXX: What about making a mul opcode = a constant 5xy + 7 = 0 ? This is the same as the above. - fn solve_mul_term(arith_opcode: &Expression, witness_assignments: &WitnessMap) -> MulTerm { + fn solve_mul_term( + arith_opcode: &Expression, + witness_assignments: &WitnessMap, + ) -> Result { // First note that the mul term can only contain one/zero term // We are assuming it has been optimized. match arith_opcode.mul_terms.len() { - 0 => MulTerm::Solved(FieldElement::zero()), - 1 => ExpressionSolver::solve_mul_term_helper( + 0 => Ok(MulTerm::Solved(FieldElement::zero())), + 1 => Ok(ExpressionSolver::solve_mul_term_helper( &arith_opcode.mul_terms[0], witness_assignments, - ), - _ => panic!("Mul term in the assert-zero opcode must contain either zero or one term"), + )), + _ => Err(OpcodeStatus::OpcodeUnsolvable), } } @@ -209,7 +211,7 @@ impl ExpressionSolver { } // Partially evaluate the opcode using the known witnesses - pub(super) fn evaluate(expr: &Expression, initial_witness: &WitnessMap) -> Expression { + pub(crate) fn evaluate(expr: &Expression, initial_witness: &WitnessMap) -> Expression { let mut result = Expression::default(); for &(c, w1, w2) in &expr.mul_terms { let mul_result = ExpressionSolver::solve_mul_term_helper(&(c, w1, w2), initial_witness); diff --git a/acvm-repo/acvm/src/pwg/blackbox/bigint.rs b/acvm-repo/acvm/src/pwg/blackbox/bigint.rs new file mode 100644 index 00000000000..f094bb1ba20 --- /dev/null +++ b/acvm-repo/acvm/src/pwg/blackbox/bigint.rs @@ -0,0 +1,120 @@ +use std::collections::HashMap; + +use acir::{ + circuit::opcodes::FunctionInput, + native_types::{Witness, WitnessMap}, + BlackBoxFunc, FieldElement, +}; + +use num_bigint::BigUint; + +use crate::pwg::OpcodeResolutionError; + +/// Resolve BigInt opcodes by storing BigInt values (and their moduli) by their ID in a HashMap: +/// - When it encounters a bigint operation opcode, it performs the operation on the stored values +/// and store the result using the provided ID. +/// - When it gets a to_bytes opcode, it simply looks up the value and resolves the output witness accordingly. +#[derive(Default)] +pub(crate) struct BigIntSolver { + bigint_id_to_value: HashMap, + bigint_id_to_modulus: HashMap, +} + +impl BigIntSolver { + pub(crate) fn get_bigint( + &self, + id: u32, + func: BlackBoxFunc, + ) -> Result { + self.bigint_id_to_value + .get(&id) + .ok_or(OpcodeResolutionError::BlackBoxFunctionFailed( + func, + format!("could not find bigint of id {id}"), + )) + .cloned() + } + + pub(crate) fn get_modulus( + &self, + id: u32, + func: BlackBoxFunc, + ) -> Result { + self.bigint_id_to_modulus + .get(&id) + .ok_or(OpcodeResolutionError::BlackBoxFunctionFailed( + func, + format!("could not find bigint of id {id}"), + )) + .cloned() + } + pub(crate) fn bigint_from_bytes( + &mut self, + inputs: &[FunctionInput], + modulus: &[u8], + output: u32, + initial_witness: &mut WitnessMap, + ) -> Result<(), OpcodeResolutionError> { + let bytes = inputs + .iter() + .map(|input| initial_witness.get(&input.witness).unwrap().to_u128() as u8) + .collect::>(); + let bigint = BigUint::from_bytes_le(&bytes); + self.bigint_id_to_value.insert(output, bigint); + let modulus = BigUint::from_bytes_le(modulus); + self.bigint_id_to_modulus.insert(output, modulus); + Ok(()) + } + + pub(crate) fn bigint_to_bytes( + &self, + input: u32, + outputs: &[Witness], + initial_witness: &mut WitnessMap, + ) -> Result<(), OpcodeResolutionError> { + let bigint = self.get_bigint(input, BlackBoxFunc::BigIntToLeBytes)?; + + let mut bytes = bigint.to_bytes_le(); + while bytes.len() < outputs.len() { + bytes.push(0); + } + bytes.iter().zip(outputs.iter()).for_each(|(byte, output)| { + initial_witness.insert(*output, FieldElement::from(*byte as u128)); + }); + Ok(()) + } + + pub(crate) fn bigint_op( + &mut self, + lhs: u32, + rhs: u32, + output: u32, + func: BlackBoxFunc, + ) -> Result<(), OpcodeResolutionError> { + let modulus = self.get_modulus(lhs, func)?; + let lhs = self.get_bigint(lhs, func)?; + let rhs = self.get_bigint(rhs, func)?; + let mut result = match func { + BlackBoxFunc::BigIntAdd => lhs + rhs, + BlackBoxFunc::BigIntSub => { + if lhs >= rhs { + &lhs - &rhs + } else { + &lhs + &modulus - &rhs + } + } + BlackBoxFunc::BigIntMul => lhs * rhs, + BlackBoxFunc::BigIntDiv => { + lhs * rhs.modpow(&(&modulus - BigUint::from(1_u32)), &modulus) + } //TODO ensure that modulus is prime + _ => unreachable!("ICE - bigint_op must be called for an operation"), + }; + if result > modulus { + let q = &result / &modulus; + result -= q * &modulus; + } + self.bigint_id_to_value.insert(output, result); + self.bigint_id_to_modulus.insert(output, modulus); + Ok(()) + } +} diff --git a/acvm-repo/acvm/src/pwg/blackbox/hash.rs b/acvm-repo/acvm/src/pwg/blackbox/hash.rs index 1ada397fc59..24c835a636a 100644 --- a/acvm-repo/acvm/src/pwg/blackbox/hash.rs +++ b/acvm-repo/acvm/src/pwg/blackbox/hash.rs @@ -3,7 +3,7 @@ use acir::{ native_types::{Witness, WitnessMap}, BlackBoxFunc, FieldElement, }; -use acvm_blackbox_solver::BlackBoxResolutionError; +use acvm_blackbox_solver::{sha256compression, BlackBoxFunctionSolver, BlackBoxResolutionError}; use crate::pwg::{insert_value, witness_to_value}; use crate::OpcodeResolutionError; @@ -86,3 +86,92 @@ fn write_digest_to_outputs( Ok(()) } + +pub(crate) fn solve_sha_256_permutation_opcode( + initial_witness: &mut WitnessMap, + inputs: &[FunctionInput], + hash_values: &[FunctionInput], + outputs: &[Witness], + black_box_func: BlackBoxFunc, +) -> Result<(), OpcodeResolutionError> { + let mut message = [0; 16]; + if inputs.len() != 16 { + return Err(OpcodeResolutionError::BlackBoxFunctionFailed( + black_box_func, + format!("Expected 16 inputs but encountered {}", &message.len()), + )); + } + for (i, input) in inputs.iter().enumerate() { + let value = witness_to_value(initial_witness, input.witness)?; + message[i] = value.to_u128() as u32; + } + + if hash_values.len() != 8 { + return Err(OpcodeResolutionError::BlackBoxFunctionFailed( + black_box_func, + format!("Expected 8 values but encountered {}", hash_values.len()), + )); + } + let mut state = [0; 8]; + for (i, hash) in hash_values.iter().enumerate() { + let value = witness_to_value(initial_witness, hash.witness)?; + state[i] = value.to_u128() as u32; + } + + sha256compression(&mut state, &message); + let outputs: [Witness; 8] = outputs.try_into().map_err(|_| { + OpcodeResolutionError::BlackBoxFunctionFailed( + black_box_func, + format!("Expected 8 outputs but encountered {}", outputs.len()), + ) + })?; + for (output_witness, value) in outputs.iter().zip(state.into_iter()) { + insert_value(output_witness, FieldElement::from(value as u128), initial_witness)?; + } + + Ok(()) +} + +pub(crate) fn solve_poseidon2_permutation_opcode( + backend: &impl BlackBoxFunctionSolver, + initial_witness: &mut WitnessMap, + inputs: &[FunctionInput], + outputs: &[Witness], + len: u32, +) -> Result<(), OpcodeResolutionError> { + if len as usize != inputs.len() { + return Err(OpcodeResolutionError::BlackBoxFunctionFailed( + acir::BlackBoxFunc::Poseidon2Permutation, + format!( + "the number of inputs does not match specified length. {} != {}", + inputs.len(), + len + ), + )); + } + if len as usize != outputs.len() { + return Err(OpcodeResolutionError::BlackBoxFunctionFailed( + acir::BlackBoxFunc::Poseidon2Permutation, + format!( + "the number of outputs does not match specified length. {} != {}", + outputs.len(), + len + ), + )); + } + + // Read witness assignments + let mut state = Vec::new(); + for input in inputs.iter() { + let witness_assignment = witness_to_value(initial_witness, input.witness)?; + state.push(*witness_assignment); + } + + let state = backend.poseidon2_permutation(&state, len)?; + + // Write witness assignments + for (output_witness, value) in outputs.iter().zip(state.into_iter()) { + insert_value(output_witness, value, initial_witness)?; + } + Ok(()) +} diff --git a/acvm-repo/acvm/src/pwg/blackbox/mod.rs b/acvm-repo/acvm/src/pwg/blackbox/mod.rs index 0f026cd274a..6ee926043cd 100644 --- a/acvm-repo/acvm/src/pwg/blackbox/mod.rs +++ b/acvm-repo/acvm/src/pwg/blackbox/mod.rs @@ -5,11 +5,14 @@ use acir::{ }; use acvm_blackbox_solver::{blake2s, blake3, keccak256, keccakf1600, sha256}; -use self::pedersen::pedersen_hash; +use self::{ + bigint::BigIntSolver, hash::solve_poseidon2_permutation_opcode, pedersen::pedersen_hash, +}; use super::{insert_value, OpcodeNotSolvable, OpcodeResolutionError}; use crate::{pwg::witness_to_value, BlackBoxFunctionSolver}; +pub(crate) mod bigint; mod fixed_base_scalar_mul; mod hash; mod logic; @@ -19,10 +22,10 @@ mod signature; use fixed_base_scalar_mul::{embedded_curve_add, fixed_base_scalar_mul}; // Hash functions should eventually be exposed for external consumers. -use hash::solve_generic_256_hash_opcode; +use hash::{solve_generic_256_hash_opcode, solve_sha_256_permutation_opcode}; use logic::{and, xor}; use pedersen::pedersen; -use range::solve_range_opcode; +pub(crate) use range::solve_range_opcode; use signature::{ ecdsa::{secp256k1_prehashed, secp256r1_prehashed}, schnorr::schnorr_verify, @@ -53,6 +56,7 @@ pub(crate) fn solve( backend: &impl BlackBoxFunctionSolver, initial_witness: &mut WitnessMap, bb_func: &BlackBoxFuncCall, + bigint_solver: &mut BigIntSolver, ) -> Result<(), OpcodeResolutionError> { let inputs = bb_func.get_inputs_vec(); if !contains_all_inputs(initial_witness, &inputs) { @@ -190,13 +194,29 @@ pub(crate) fn solve( } // Recursive aggregation will be entirely handled by the backend and is not solved by the ACVM BlackBoxFuncCall::RecursiveAggregation { .. } => Ok(()), - BlackBoxFuncCall::BigIntAdd { .. } => todo!(), - BlackBoxFuncCall::BigIntNeg { .. } => todo!(), - BlackBoxFuncCall::BigIntMul { .. } => todo!(), - BlackBoxFuncCall::BigIntDiv { .. } => todo!(), - BlackBoxFuncCall::BigIntFromLeBytes { .. } => todo!(), - BlackBoxFuncCall::BigIntToLeBytes { .. } => todo!(), - BlackBoxFuncCall::Poseidon2Permutation { .. } => todo!(), - BlackBoxFuncCall::Sha256Compression { .. } => todo!(), + BlackBoxFuncCall::BigIntAdd { lhs, rhs, output } + | BlackBoxFuncCall::BigIntSub { lhs, rhs, output } + | BlackBoxFuncCall::BigIntMul { lhs, rhs, output } + | BlackBoxFuncCall::BigIntDiv { lhs, rhs, output } => { + bigint_solver.bigint_op(*lhs, *rhs, *output, bb_func.get_black_box_func()) + } + BlackBoxFuncCall::BigIntFromLeBytes { inputs, modulus, output } => { + bigint_solver.bigint_from_bytes(inputs, modulus, *output, initial_witness) + } + BlackBoxFuncCall::BigIntToLeBytes { input, outputs } => { + bigint_solver.bigint_to_bytes(*input, outputs, initial_witness) + } + BlackBoxFuncCall::Sha256Compression { inputs, hash_values, outputs } => { + solve_sha_256_permutation_opcode( + initial_witness, + inputs, + hash_values, + outputs, + bb_func.get_black_box_func(), + ) + } + BlackBoxFuncCall::Poseidon2Permutation { inputs, outputs, len } => { + solve_poseidon2_permutation_opcode(backend, initial_witness, inputs, outputs, *len) + } } } diff --git a/acvm-repo/acvm/src/pwg/blackbox/range.rs b/acvm-repo/acvm/src/pwg/blackbox/range.rs index 1b976e30ed5..2afe820b636 100644 --- a/acvm-repo/acvm/src/pwg/blackbox/range.rs +++ b/acvm-repo/acvm/src/pwg/blackbox/range.rs @@ -4,7 +4,7 @@ use crate::{ }; use acir::{circuit::opcodes::FunctionInput, native_types::WitnessMap}; -pub(super) fn solve_range_opcode( +pub(crate) fn solve_range_opcode( initial_witness: &WitnessMap, input: &FunctionInput, ) -> Result<(), OpcodeResolutionError> { diff --git a/acvm-repo/acvm/src/pwg/brillig.rs b/acvm-repo/acvm/src/pwg/brillig.rs index 0db38c776e2..51c7f4c6203 100644 --- a/acvm-repo/acvm/src/pwg/brillig.rs +++ b/acvm-repo/acvm/src/pwg/brillig.rs @@ -1,18 +1,21 @@ +use std::collections::HashMap; + use acir::{ - brillig::{ForeignCallParam, ForeignCallResult, RegisterIndex, Value}, + brillig::{ForeignCallParam, ForeignCallResult, Value}, circuit::{ brillig::{Brillig, BrilligInputs, BrilligOutputs}, + opcodes::BlockId, OpcodeLocation, }, native_types::WitnessMap, FieldElement, }; use acvm_blackbox_solver::BlackBoxFunctionSolver; -use brillig_vm::{Registers, VMStatus, VM}; +use brillig_vm::{VMStatus, VM}; use crate::{pwg::OpcodeNotSolvable, OpcodeResolutionError}; -use super::{get_value, insert_value}; +use super::{get_value, insert_value, memory_op::MemoryOpSolver}; #[derive(Debug)] pub enum BrilligSolverStatus { @@ -62,23 +65,23 @@ impl<'b, B: BlackBoxFunctionSolver> BrilligSolver<'b, B> { /// Constructs a solver for a Brillig block given the bytecode and initial /// witness. - pub(super) fn new( + pub(crate) fn new( initial_witness: &WitnessMap, + memory: &HashMap, brillig: &'b Brillig, bb_solver: &'b B, acir_index: usize, ) -> Result { // Set input values - let mut input_register_values: Vec = Vec::new(); - let mut input_memory: Vec = Vec::new(); + let mut calldata: Vec = Vec::new(); // Each input represents an expression or array of expressions to evaluate. // Iterate over each input and evaluate the expression(s) associated with it. - // Push the results into registers and/or memory. + // Push the results into memory. // If a certain expression is not solvable, we stall the ACVM and do not proceed with Brillig VM execution. for input in &brillig.inputs { match input { BrilligInputs::Single(expr) => match get_value(expr, initial_witness) { - Ok(value) => input_register_values.push(value.into()), + Ok(value) => calldata.push(value.into()), Err(_) => { return Err(OpcodeResolutionError::OpcodeNotSolvable( OpcodeNotSolvable::ExpressionHasTooManyUnknowns(expr.clone()), @@ -87,10 +90,9 @@ impl<'b, B: BlackBoxFunctionSolver> BrilligSolver<'b, B> { }, BrilligInputs::Array(expr_arr) => { // Attempt to fetch all array input values - let memory_pointer = input_memory.len(); for expr in expr_arr.iter() { match get_value(expr, initial_witness) { - Ok(value) => input_memory.push(value.into()), + Ok(value) => calldata.push(value.into()), Err(_) => { return Err(OpcodeResolutionError::OpcodeNotSolvable( OpcodeNotSolvable::ExpressionHasTooManyUnknowns(expr.clone()), @@ -98,28 +100,28 @@ impl<'b, B: BlackBoxFunctionSolver> BrilligSolver<'b, B> { } } } - - // Push value of the array pointer as a register - input_register_values.push(Value::from(memory_pointer)); + } + BrilligInputs::MemoryArray(block_id) => { + let memory_block = memory + .get(block_id) + .ok_or(OpcodeNotSolvable::MissingMemoryBlock(block_id.0))?; + for memory_index in 0..memory_block.block_len { + let memory_value = memory_block + .block_value + .get(&memory_index) + .expect("All memory is initialized on creation"); + calldata.push((*memory_value).into()); + } } } } - // Instantiate a Brillig VM given the solved input registers and memory + // Instantiate a Brillig VM given the solved calldata // along with the Brillig bytecode. - let input_registers = Registers::load(input_register_values); - let vm = VM::new(input_registers, input_memory, &brillig.bytecode, vec![], bb_solver); + let vm = VM::new(calldata, &brillig.bytecode, vec![], bb_solver); Ok(Self { vm, acir_index }) } - pub fn get_registers(&self) -> &Registers { - self.vm.get_registers() - } - - pub fn set_register(&mut self, register_index: usize, value: Value) { - self.vm.set_register(RegisterIndex(register_index), value); - } - pub fn get_memory(&self) -> &[Value] { self.vm.get_memory() } @@ -128,7 +130,11 @@ impl<'b, B: BlackBoxFunctionSolver> BrilligSolver<'b, B> { self.vm.write_memory_at(ptr, value); } - pub(super) fn solve(&mut self) -> Result { + pub fn get_call_stack(&self) -> Vec { + self.vm.get_call_stack() + } + + pub(crate) fn solve(&mut self) -> Result { let status = self.vm.process_opcodes(); self.handle_vm_status(status) } @@ -151,7 +157,7 @@ impl<'b, B: BlackBoxFunctionSolver> BrilligSolver<'b, B> { // Return the "resolution" to the caller who may choose to make subsequent calls // (when it gets foreign call results for example). match vm_status { - VMStatus::Finished => Ok(BrilligSolverStatus::Finished), + VMStatus::Finished { .. } => Ok(BrilligSolverStatus::Finished), VMStatus::InProgress => Ok(BrilligSolverStatus::InProgress), VMStatus::Failure { message, call_stack } => { Err(OpcodeResolutionError::BrilligFunctionFailed { @@ -171,7 +177,7 @@ impl<'b, B: BlackBoxFunctionSolver> BrilligSolver<'b, B> { } } - pub(super) fn finalize( + pub(crate) fn finalize( self, witness: &mut WitnessMap, brillig: &Brillig, @@ -179,8 +185,8 @@ impl<'b, B: BlackBoxFunctionSolver> BrilligSolver<'b, B> { // Finish the Brillig execution by writing the outputs to the witness map let vm_status = self.vm.get_status(); match vm_status { - VMStatus::Finished => { - self.write_brillig_outputs(witness, brillig)?; + VMStatus::Finished { return_data_offset, return_data_size } => { + self.write_brillig_outputs(witness, return_data_offset, return_data_size, brillig)?; Ok(()) } _ => panic!("Brillig VM has not completed execution"), @@ -190,24 +196,32 @@ impl<'b, B: BlackBoxFunctionSolver> BrilligSolver<'b, B> { fn write_brillig_outputs( &self, witness_map: &mut WitnessMap, + return_data_offset: usize, + return_data_size: usize, brillig: &Brillig, ) -> Result<(), OpcodeResolutionError> { // Write VM execution results into the witness map - for (i, output) in brillig.outputs.iter().enumerate() { - let register_value = self.vm.get_registers().get(RegisterIndex::from(i)); + let memory = self.vm.get_memory(); + let mut current_ret_data_idx = return_data_offset; + for output in brillig.outputs.iter() { match output { BrilligOutputs::Simple(witness) => { - insert_value(witness, register_value.to_field(), witness_map)?; + insert_value(witness, memory[current_ret_data_idx].to_field(), witness_map)?; + current_ret_data_idx += 1; } BrilligOutputs::Array(witness_arr) => { - // Treat the register value as a pointer to memory - for (i, witness) in witness_arr.iter().enumerate() { - let value = &self.vm.get_memory()[register_value.to_usize() + i]; + for witness in witness_arr.iter() { + let value = memory[current_ret_data_idx]; insert_value(witness, value.to_field(), witness_map)?; + current_ret_data_idx += 1; } } } } + assert!( + current_ret_data_idx == return_data_offset + return_data_size, + "Brillig VM did not write the expected number of return values" + ); Ok(()) } diff --git a/acvm-repo/acvm/src/pwg/directives/mod.rs b/acvm-repo/acvm/src/pwg/directives/mod.rs index 4605168d98b..ee544521fc7 100644 --- a/acvm-repo/acvm/src/pwg/directives/mod.rs +++ b/acvm-repo/acvm/src/pwg/directives/mod.rs @@ -1,5 +1,3 @@ -use std::cmp::Ordering; - use acir::{circuit::directives::Directive, native_types::WitnessMap, FieldElement}; use num_bigint::BigUint; @@ -7,15 +5,13 @@ use crate::OpcodeResolutionError; use super::{get_value, insert_value, ErrorLocation}; -mod sorting; - /// Attempts to solve the [`Directive`] opcode `directive`. /// If successful, `initial_witness` will be mutated to contain the new witness assignment. /// /// Returns `Ok(OpcodeResolution)` to signal whether the directive was successful solved. /// /// Returns `Err(OpcodeResolutionError)` if a circuit constraint is unsatisfied. -pub(super) fn solve_directives( +pub(crate) fn solve_directives( initial_witness: &mut WitnessMap, directive: &Directive, ) -> Result<(), OpcodeResolutionError> { @@ -48,38 +44,5 @@ pub(super) fn solve_directives( Ok(()) } - Directive::PermutationSort { inputs: a, tuple, bits, sort_by } => { - let mut val_a = Vec::new(); - let mut base = Vec::new(); - for (i, element) in a.iter().enumerate() { - assert_eq!(element.len(), *tuple as usize); - let mut element_val = Vec::with_capacity(*tuple as usize + 1); - for e in element { - element_val.push(get_value(e, initial_witness)?); - } - let field_i = FieldElement::from(i as i128); - element_val.push(field_i); - base.push(field_i); - val_a.push(element_val); - } - val_a.sort_by(|a, b| { - for i in sort_by { - let int_a = BigUint::from_bytes_be(&a[*i as usize].to_be_bytes()); - let int_b = BigUint::from_bytes_be(&b[*i as usize].to_be_bytes()); - let cmp = int_a.cmp(&int_b); - if cmp != Ordering::Equal { - return cmp; - } - } - Ordering::Equal - }); - let b = val_a.iter().map(|a| *a.last().unwrap()).collect(); - let control = sorting::route(base, b); - for (w, value) in bits.iter().zip(control) { - let value = if value { FieldElement::one() } else { FieldElement::zero() }; - insert_value(w, value, initial_witness)?; - } - Ok(()) - } } } diff --git a/acvm-repo/acvm/src/pwg/directives/sorting.rs b/acvm-repo/acvm/src/pwg/directives/sorting.rs deleted file mode 100644 index 2749e88b023..00000000000 --- a/acvm-repo/acvm/src/pwg/directives/sorting.rs +++ /dev/null @@ -1,396 +0,0 @@ -use std::collections::{BTreeMap, BTreeSet}; - -use acir::FieldElement; - -// A sorting network is a graph of connected switches -// It is defined recursively so here we only keep track of the outer layer of switches -struct SortingNetwork { - n: usize, // size of the network - x_inputs: Vec, // inputs of the network - y_inputs: Vec, // outputs of the network - x_values: BTreeMap, // map for matching a y value with a x value - y_values: BTreeMap, // map for matching a x value with a y value - inner_x: Vec, // positions after the switch_x - inner_y: Vec, // positions after the sub-networks, and before the switch_y - switch_x: Vec, // outer switches for the inputs - switch_y: Vec, // outer switches for the outputs - free: BTreeSet, // outer switches available for looping -} - -impl SortingNetwork { - fn new(n: usize) -> SortingNetwork { - let free_len = (n - 1) / 2; - let mut free = BTreeSet::new(); - for i in 0..free_len { - free.insert(i); - } - SortingNetwork { - n, - x_inputs: Vec::with_capacity(n), - y_inputs: Vec::with_capacity(n), - x_values: BTreeMap::new(), - y_values: BTreeMap::new(), - inner_x: Vec::with_capacity(n), - inner_y: Vec::with_capacity(n), - switch_x: Vec::with_capacity(n / 2), - switch_y: Vec::with_capacity(free_len), - free, - } - } - - fn init(&mut self, inputs: Vec, outputs: Vec) { - let n = self.n; - assert_eq!(inputs.len(), outputs.len()); - assert_eq!(inputs.len(), n); - - self.x_inputs = inputs; - self.y_inputs = outputs; - for i in 0..self.n { - self.x_values.insert(self.x_inputs[i], i); - self.y_values.insert(self.y_inputs[i], i); - } - self.switch_x = vec![false; n / 2]; - self.switch_y = vec![false; (n - 1) / 2]; - self.inner_x = vec![FieldElement::zero(); n]; - self.inner_y = vec![FieldElement::zero(); n]; - - //Route the single wires so we do not need to handle this case later on - self.inner_y[n - 1] = self.y_inputs[n - 1]; - if n % 2 == 0 { - self.inner_y[n / 2 - 1] = self.y_inputs[n - 2]; - } else { - self.inner_x[n - 1] = self.x_inputs[n - 1]; - } - } - - //route a wire from outputs to its value in the inputs - fn route_out_wire(&mut self, y: usize, sub: bool) -> usize { - // sub <- y - if self.is_single_y(y) { - assert!(sub); - } else { - let port = y % 2 != 0; - let s1 = sub ^ port; - let inner = self.compute_inner(y, s1); - self.configure_y(y, s1, inner); - } - // x <- sub - let x = self.x_values.remove(&self.y_inputs[y]).unwrap(); - if !self.is_single_x(x) { - let port2 = x % 2 != 0; - let s2 = sub ^ port2; - let inner = self.compute_inner(x, s2); - self.configure_x(x, s2, inner); - } - x - } - - //route a wire from inputs to its value in the outputs - fn route_in_wire(&mut self, x: usize, sub: bool) -> usize { - // x -> sub - assert!(!self.is_single_x(x)); - let port = x % 2 != 0; - let s1 = sub ^ port; - let inner = self.compute_inner(x, s1); - self.configure_x(x, s1, inner); - - // sub -> y - let y = self.y_values.remove(&self.x_inputs[x]).unwrap(); - if !self.is_single_y(y) { - let port = y % 2 != 0; - let s2 = sub ^ port; - let inner = self.compute_inner(y, s2); - self.configure_y(y, s2, inner); - } - y - } - - //update the computed switch and inner values for an input wire - fn configure_x(&mut self, x: usize, switch: bool, inner: usize) { - self.inner_x[inner] = self.x_inputs[x]; - self.switch_x[x / 2] = switch; - } - - //update the computed switch and inner values for an output wire - fn configure_y(&mut self, y: usize, switch: bool, inner: usize) { - self.inner_y[inner] = self.y_inputs[y]; - self.switch_y[y / 2] = switch; - } - - // returns the other wire belonging to the same switch - fn sibling(index: usize) -> usize { - index + 1 - 2 * (index % 2) - } - - // returns a free switch - fn take(&mut self) -> Option { - self.free.first().copied() - } - - fn is_single_x(&self, a: usize) -> bool { - let n = self.x_inputs.len(); - n % 2 == 1 && a == n - 1 - } - - fn is_single_y(&mut self, a: usize) -> bool { - let n = self.x_inputs.len(); - a >= n - 2 + n % 2 - } - - // compute the inner position of idx through its switch - fn compute_inner(&self, idx: usize, switch: bool) -> usize { - if switch ^ (idx % 2 == 1) { - idx / 2 + self.n / 2 - } else { - idx / 2 - } - } - - fn new_start(&mut self) -> (Option, usize) { - let next = self.take(); - if let Some(switch) = next { - (next, 2 * switch) - } else { - (None, 0) - } - } -} - -// Computes the control bits of the sorting network which transform inputs into outputs -// implementation is based on https://www.mdpi.com/2227-7080/10/1/16 -pub(super) fn route(inputs: Vec, outputs: Vec) -> Vec { - assert_eq!(inputs.len(), outputs.len()); - match inputs.len() { - 0 => Vec::new(), - 1 => { - assert_eq!(inputs[0], outputs[0]); - Vec::new() - } - 2 => { - if inputs[0] == outputs[0] { - assert_eq!(inputs[1], outputs[1]); - vec![false] - } else { - assert_eq!(inputs[1], outputs[0]); - assert_eq!(inputs[0], outputs[1]); - vec![true] - } - } - _ => { - let n = inputs.len(); - - let mut result; - let n1 = n / 2; - let in_sub1; - let out_sub1; - let in_sub2; - let out_sub2; - - // process the outer layer in a code block so that the intermediate data is cleared before recursion - { - let mut network = SortingNetwork::new(n); - network.init(inputs, outputs); - - //We start with the last single wire - let mut out_idx = n - 1; - let mut start_sub = true; //it is connected to the lower inner network - let mut switch = None; - let mut start = None; - - while !network.free.is_empty() { - // the processed switch is no more available - if let Some(free_switch) = switch { - network.free.remove(&free_switch); - } - - // connect the output wire to its matching input - let in_idx = network.route_out_wire(out_idx, start_sub); - if network.is_single_x(in_idx) { - start_sub = !start_sub; //We need to restart, but did not complete the loop so we switch the sub network - (start, out_idx) = network.new_start(); - switch = start; - continue; - } - - // loop from the sibling - let next = SortingNetwork::sibling(in_idx); - // connect the input wire to its matching output, using the other sub-network - out_idx = network.route_in_wire(next, !start_sub); - switch = Some(out_idx / 2); - if start == switch || network.is_single_y(out_idx) { - //loop is complete, need a fresh start - (start, out_idx) = network.new_start(); - switch = start; - } else { - // we loop back from the sibling - out_idx = SortingNetwork::sibling(out_idx); - } - } - //All the wires are connected, we can now route the sub-networks - result = network.switch_x; - result.extend(network.switch_y); - in_sub1 = network.inner_x[0..n1].to_vec(); - in_sub2 = network.inner_x[n1..].to_vec(); - out_sub1 = network.inner_y[0..n1].to_vec(); - out_sub2 = network.inner_y[n1..].to_vec(); - } - let s1 = route(in_sub1, out_sub1); - result.extend(s1); - let s2 = route(in_sub2, out_sub2); - result.extend(s2); - result - } - } -} - -#[cfg(test)] -mod tests { - // Silence `unused_crate_dependencies` warning - use paste as _; - use proptest as _; - - use super::route; - use acir::FieldElement; - use rand::prelude::*; - - fn execute_network(config: Vec, inputs: Vec) -> Vec { - let n = inputs.len(); - if n == 1 { - return inputs; - } - let mut in1 = Vec::new(); - let mut in2 = Vec::new(); - //layer 1: - for i in 0..n / 2 { - if config[i] { - in1.push(inputs[2 * i + 1]); - in2.push(inputs[2 * i]); - } else { - in1.push(inputs[2 * i]); - in2.push(inputs[2 * i + 1]); - } - } - if n % 2 == 1 { - in2.push(*inputs.last().unwrap()); - } - let n2 = n / 2 + (n - 1) / 2; - let n3 = n2 + switch_nb(n / 2); - let mut result = Vec::new(); - let out1 = execute_network(config[n2..n3].to_vec(), in1); - let out2 = execute_network(config[n3..].to_vec(), in2); - //last layer: - for i in 0..(n - 1) / 2 { - if config[n / 2 + i] { - result.push(out2[i]); - result.push(out1[i]); - } else { - result.push(out1[i]); - result.push(out2[i]); - } - } - if n % 2 == 0 { - result.push(*out1.last().unwrap()); - result.push(*out2.last().unwrap()); - } else { - result.push(*out2.last().unwrap()); - } - result - } - - // returns the number of switches in the network - fn switch_nb(n: usize) -> usize { - let mut s = 0; - for i in 0..n { - s += f64::from((i + 1) as u32).log2().ceil() as usize; - } - s - } - - #[test] - fn test_route() { - //basic tests - let a = vec![ - FieldElement::from(1_i128), - FieldElement::from(2_i128), - FieldElement::from(3_i128), - ]; - let b = vec![ - FieldElement::from(1_i128), - FieldElement::from(2_i128), - FieldElement::from(3_i128), - ]; - let c = route(a, b); - assert_eq!(c, vec![false, false, false]); - - let a = vec![ - FieldElement::from(1_i128), - FieldElement::from(2_i128), - FieldElement::from(3_i128), - ]; - let b = vec![ - FieldElement::from(1_i128), - FieldElement::from(3_i128), - FieldElement::from(2_i128), - ]; - let c = route(a, b); - assert_eq!(c, vec![false, false, true]); - - let a = vec![ - FieldElement::from(1_i128), - FieldElement::from(2_i128), - FieldElement::from(3_i128), - ]; - let b = vec![ - FieldElement::from(3_i128), - FieldElement::from(2_i128), - FieldElement::from(1_i128), - ]; - let c = route(a, b); - assert_eq!(c, vec![true, true, true]); - - let a = vec![ - FieldElement::from(0_i128), - FieldElement::from(1_i128), - FieldElement::from(2_i128), - FieldElement::from(3_i128), - ]; - let b = vec![ - FieldElement::from(2_i128), - FieldElement::from(3_i128), - FieldElement::from(0_i128), - FieldElement::from(1_i128), - ]; - let c = route(a, b); - assert_eq!(c, vec![false, true, true, true, true]); - - let a = vec![ - FieldElement::from(0_i128), - FieldElement::from(1_i128), - FieldElement::from(2_i128), - FieldElement::from(3_i128), - FieldElement::from(4_i128), - ]; - let b = vec![ - FieldElement::from(0_i128), - FieldElement::from(3_i128), - FieldElement::from(4_i128), - FieldElement::from(2_i128), - FieldElement::from(1_i128), - ]; - let c = route(a, b); - assert_eq!(c, vec![false, false, false, true, false, true, false, true]); - - // random tests - for i in 2..50 { - let mut a = vec![FieldElement::zero()]; - for j in 0..i - 1 { - a.push(a[j] + FieldElement::one()); - } - let mut rng = rand::thread_rng(); - let mut b = a.clone(); - b.shuffle(&mut rng); - let c = route(a.clone(), b.clone()); - assert_eq!(b, execute_network(c, a)); - } - } -} diff --git a/acvm-repo/acvm/src/pwg/memory_op.rs b/acvm-repo/acvm/src/pwg/memory_op.rs index c1da2cd95cf..e51797707a7 100644 --- a/acvm-repo/acvm/src/pwg/memory_op.rs +++ b/acvm-repo/acvm/src/pwg/memory_op.rs @@ -13,9 +13,9 @@ type MemoryIndex = u32; /// Maintains the state for solving [`MemoryInit`][`acir::circuit::Opcode::MemoryInit`] and [`MemoryOp`][`acir::circuit::Opcode::MemoryOp`] opcodes. #[derive(Default)] -pub(super) struct MemoryOpSolver { - block_value: HashMap, - block_len: u32, +pub(crate) struct MemoryOpSolver { + pub(super) block_value: HashMap, + pub(super) block_len: u32, } impl MemoryOpSolver { diff --git a/acvm-repo/acvm/src/pwg/mod.rs b/acvm-repo/acvm/src/pwg/mod.rs index 41b96572658..d8323e5ef5f 100644 --- a/acvm-repo/acvm/src/pwg/mod.rs +++ b/acvm-repo/acvm/src/pwg/mod.rs @@ -10,7 +10,10 @@ use acir::{ }; use acvm_blackbox_solver::BlackBoxResolutionError; -use self::{arithmetic::ExpressionSolver, directives::solve_directives, memory_op::MemoryOpSolver}; +use self::{ + arithmetic::ExpressionSolver, blackbox::bigint::BigIntSolver, directives::solve_directives, + memory_op::MemoryOpSolver, +}; use crate::BlackBoxFunctionSolver; use thiserror::Error; @@ -18,11 +21,11 @@ use thiserror::Error; // arithmetic pub(crate) mod arithmetic; // Brillig bytecode -mod brillig; +pub(crate) mod brillig; // Directives -mod directives; +pub(crate) mod directives; // black box functions -mod blackbox; +pub(crate) mod blackbox; mod memory_op; pub use self::brillig::{BrilligSolver, BrilligSolverStatus}; @@ -76,6 +79,8 @@ pub enum StepResult<'a, B: BlackBoxFunctionSolver> { pub enum OpcodeNotSolvable { #[error("missing assignment for witness index {0}")] MissingAssignment(u32), + #[error("Attempted to load uninitialized memory block")] + MissingMemoryBlock(u32), #[error("expression has too many unknowns {0}")] ExpressionHasTooManyUnknowns(Expression), } @@ -132,6 +137,8 @@ pub struct ACVM<'a, B: BlackBoxFunctionSolver> { /// Stores the solver for memory operations acting on blocks of memory disambiguated by [block][`BlockId`]. block_solvers: HashMap, + bigint_solver: BigIntSolver, + /// A list of opcodes which are to be executed by the ACVM. opcodes: &'a [Opcode], /// Index of the next opcode to be executed. @@ -149,6 +156,7 @@ impl<'a, B: BlackBoxFunctionSolver> ACVM<'a, B> { status, backend, block_solvers: HashMap::default(), + bigint_solver: BigIntSolver::default(), opcodes, instruction_pointer: 0, witness_map: initial_witness, @@ -254,9 +262,12 @@ impl<'a, B: BlackBoxFunctionSolver> ACVM<'a, B> { let resolution = match opcode { Opcode::AssertZero(expr) => ExpressionSolver::solve(&mut self.witness_map, expr), - Opcode::BlackBoxFuncCall(bb_func) => { - blackbox::solve(self.backend, &mut self.witness_map, bb_func) - } + Opcode::BlackBoxFuncCall(bb_func) => blackbox::solve( + self.backend, + &mut self.witness_map, + bb_func, + &mut self.bigint_solver, + ), Opcode::Directive(directive) => solve_directives(&mut self.witness_map, directive), Opcode::MemoryInit { block_id, init } => { let solver = self.block_solvers.entry(*block_id).or_default(); @@ -327,7 +338,13 @@ impl<'a, B: BlackBoxFunctionSolver> ACVM<'a, B> { // there will be a cached `BrilligSolver` to avoid recomputation. let mut solver: BrilligSolver<'_, B> = match self.brillig_solver.take() { Some(solver) => solver, - None => BrilligSolver::new(witness, brillig, self.backend, self.instruction_pointer)?, + None => BrilligSolver::new( + witness, + &self.block_solvers, + brillig, + self.backend, + self.instruction_pointer, + )?, }; match solver.solve()? { BrilligSolverStatus::ForeignCallWait(foreign_call) => { @@ -362,7 +379,13 @@ impl<'a, B: BlackBoxFunctionSolver> ACVM<'a, B> { return StepResult::Status(self.handle_opcode_resolution(resolution)); } - let solver = BrilligSolver::new(witness, brillig, self.backend, self.instruction_pointer); + let solver = BrilligSolver::new( + witness, + &self.block_solvers, + brillig, + self.backend, + self.instruction_pointer, + ); match solver { Ok(solver) => StepResult::IntoBrillig(solver), Err(..) => StepResult::Status(self.handle_opcode_resolution(solver.map(|_| ()))), diff --git a/acvm-repo/acvm/tests/solver.rs b/acvm-repo/acvm/tests/solver.rs index 486e04d5bf1..b267c3005a8 100644 --- a/acvm-repo/acvm/tests/solver.rs +++ b/acvm-repo/acvm/tests/solver.rs @@ -1,7 +1,7 @@ use std::collections::BTreeMap; use acir::{ - brillig::{BinaryFieldOp, Opcode as BrilligOpcode, RegisterIndex, RegisterOrMemory, Value}, + brillig::{BinaryFieldOp, MemoryAddress, Opcode as BrilligOpcode, Value, ValueOrArray}, circuit::{ brillig::{Brillig, BrilligInputs, BrilligOutputs}, opcodes::{BlockId, MemOp}, @@ -13,6 +13,7 @@ use acir::{ use acvm::pwg::{ACVMStatus, ErrorLocation, ForeignCallWaitInfo, OpcodeResolutionError, ACVM}; use acvm_blackbox_solver::StubbedBlackBoxSolver; +use brillig_vm::brillig::HeapValueType; // Reenable these test cases once we move the brillig implementation of inversion down into the acvm stdlib. @@ -37,9 +38,9 @@ fn inversion_brillig_oracle_equivalence() { let equal_opcode = BrilligOpcode::BinaryFieldOp { op: BinaryFieldOp::Equals, - lhs: RegisterIndex::from(0), - rhs: RegisterIndex::from(1), - destination: RegisterIndex::from(2), + lhs: MemoryAddress::from(0), + rhs: MemoryAddress::from(1), + destination: MemoryAddress::from(2), }; let brillig_data = Brillig { @@ -52,20 +53,28 @@ fn inversion_brillig_oracle_equivalence() { }), BrilligInputs::Single(Expression::default()), // Input Register 1 ], - // This tells the BrilligSolver which witnesses its output registers correspond to + // This tells the BrilligSolver which witnesses its output values correspond to outputs: vec![ BrilligOutputs::Simple(w_x_plus_y), // Output Register 0 - from input BrilligOutputs::Simple(w_oracle), // Output Register 1 BrilligOutputs::Simple(w_equal_res), // Output Register 2 ], bytecode: vec![ + BrilligOpcode::CalldataCopy { + destination_address: MemoryAddress(0), + size: 2, + offset: 0, + }, equal_opcode, // Oracles are named 'foreign calls' in brillig BrilligOpcode::ForeignCall { function: "invert".into(), - destinations: vec![RegisterOrMemory::RegisterIndex(RegisterIndex::from(1))], - inputs: vec![RegisterOrMemory::RegisterIndex(RegisterIndex::from(0))], + destinations: vec![ValueOrArray::MemoryAddress(MemoryAddress::from(1))], + destination_value_types: vec![HeapValueType::Simple], + inputs: vec![ValueOrArray::MemoryAddress(MemoryAddress::from(0))], + input_value_types: vec![HeapValueType::Simple], }, + BrilligOpcode::Stop { return_data_offset: 0, return_data_size: 3 }, ], predicate: None, }; @@ -151,9 +160,9 @@ fn double_inversion_brillig_oracle() { let equal_opcode = BrilligOpcode::BinaryFieldOp { op: BinaryFieldOp::Equals, - lhs: RegisterIndex::from(0), - rhs: RegisterIndex::from(1), - destination: RegisterIndex::from(4), + lhs: MemoryAddress::from(0), + rhs: MemoryAddress::from(1), + destination: MemoryAddress::from(4), }; let brillig_data = Brillig { @@ -180,18 +189,28 @@ fn double_inversion_brillig_oracle() { BrilligOutputs::Simple(w_equal_res), // Output Register 4 ], bytecode: vec![ + BrilligOpcode::CalldataCopy { + destination_address: MemoryAddress(0), + size: 3, + offset: 0, + }, equal_opcode, // Oracles are named 'foreign calls' in brillig BrilligOpcode::ForeignCall { function: "invert".into(), - destinations: vec![RegisterOrMemory::RegisterIndex(RegisterIndex::from(1))], - inputs: vec![RegisterOrMemory::RegisterIndex(RegisterIndex::from(0))], + destinations: vec![ValueOrArray::MemoryAddress(MemoryAddress::from(1))], + destination_value_types: vec![HeapValueType::Simple], + inputs: vec![ValueOrArray::MemoryAddress(MemoryAddress::from(0))], + input_value_types: vec![HeapValueType::Simple], }, BrilligOpcode::ForeignCall { function: "invert".into(), - destinations: vec![RegisterOrMemory::RegisterIndex(RegisterIndex::from(3))], - inputs: vec![RegisterOrMemory::RegisterIndex(RegisterIndex::from(2))], + destinations: vec![ValueOrArray::MemoryAddress(MemoryAddress::from(3))], + destination_value_types: vec![HeapValueType::Simple], + inputs: vec![ValueOrArray::MemoryAddress(MemoryAddress::from(2))], + input_value_types: vec![HeapValueType::Simple], }, + BrilligOpcode::Stop { return_data_offset: 0, return_data_size: 5 }, ], predicate: None, }; @@ -306,17 +325,27 @@ fn oracle_dependent_execution() { BrilligOutputs::Simple(w_y_inv), // Output Register 3 ], bytecode: vec![ + BrilligOpcode::CalldataCopy { + destination_address: MemoryAddress(0), + size: 3, + offset: 0, + }, // Oracles are named 'foreign calls' in brillig BrilligOpcode::ForeignCall { function: "invert".into(), - destinations: vec![RegisterOrMemory::RegisterIndex(RegisterIndex::from(1))], - inputs: vec![RegisterOrMemory::RegisterIndex(RegisterIndex::from(0))], + destinations: vec![ValueOrArray::MemoryAddress(MemoryAddress::from(1))], + destination_value_types: vec![HeapValueType::Simple], + inputs: vec![ValueOrArray::MemoryAddress(MemoryAddress::from(0))], + input_value_types: vec![HeapValueType::Simple], }, BrilligOpcode::ForeignCall { function: "invert".into(), - destinations: vec![RegisterOrMemory::RegisterIndex(RegisterIndex::from(3))], - inputs: vec![RegisterOrMemory::RegisterIndex(RegisterIndex::from(2))], + destinations: vec![ValueOrArray::MemoryAddress(MemoryAddress::from(3))], + destination_value_types: vec![HeapValueType::Simple], + inputs: vec![ValueOrArray::MemoryAddress(MemoryAddress::from(2))], + input_value_types: vec![HeapValueType::Simple], }, + BrilligOpcode::Stop { return_data_offset: 0, return_data_size: 4 }, ], predicate: None, }; @@ -404,9 +433,9 @@ fn brillig_oracle_predicate() { let equal_opcode = BrilligOpcode::BinaryFieldOp { op: BinaryFieldOp::Equals, - lhs: RegisterIndex::from(0), - rhs: RegisterIndex::from(1), - destination: RegisterIndex::from(2), + lhs: MemoryAddress::from(0), + rhs: MemoryAddress::from(1), + destination: MemoryAddress::from(2), }; let brillig_opcode = Opcode::Brillig(Brillig { @@ -425,12 +454,19 @@ fn brillig_oracle_predicate() { BrilligOutputs::Simple(w_lt_res), ], bytecode: vec![ + BrilligOpcode::CalldataCopy { + destination_address: MemoryAddress(0), + size: 2, + offset: 0, + }, equal_opcode, // Oracles are named 'foreign calls' in brillig BrilligOpcode::ForeignCall { function: "invert".into(), - destinations: vec![RegisterOrMemory::RegisterIndex(RegisterIndex::from(1))], - inputs: vec![RegisterOrMemory::RegisterIndex(RegisterIndex::from(0))], + destinations: vec![ValueOrArray::MemoryAddress(MemoryAddress::from(1))], + destination_value_types: vec![HeapValueType::Simple], + inputs: vec![ValueOrArray::MemoryAddress(MemoryAddress::from(0))], + input_value_types: vec![HeapValueType::Simple], }, ], predicate: Some(Expression::default()), @@ -502,21 +538,24 @@ fn unsatisfied_opcode_resolved_brillig() { let w_y = Witness(5); let w_result = Witness(6); + let calldata_copy_opcode = + BrilligOpcode::CalldataCopy { destination_address: MemoryAddress(0), size: 2, offset: 0 }; + let equal_opcode = BrilligOpcode::BinaryFieldOp { op: BinaryFieldOp::Equals, - lhs: RegisterIndex::from(0), - rhs: RegisterIndex::from(1), - destination: RegisterIndex::from(2), + lhs: MemoryAddress::from(0), + rhs: MemoryAddress::from(1), + destination: MemoryAddress::from(2), }; // Jump pass the trap if the values are equal, else // jump to the trap let location_of_stop = 3; let jmp_if_opcode = - BrilligOpcode::JumpIf { condition: RegisterIndex::from(2), location: location_of_stop }; + BrilligOpcode::JumpIf { condition: MemoryAddress::from(2), location: location_of_stop }; let trap_opcode = BrilligOpcode::Trap; - let stop_opcode = BrilligOpcode::Stop; + let stop_opcode = BrilligOpcode::Stop { return_data_offset: 0, return_data_size: 0 }; let brillig_opcode = Opcode::Brillig(Brillig { inputs: vec![ @@ -532,7 +571,7 @@ fn unsatisfied_opcode_resolved_brillig() { }), ], outputs: vec![BrilligOutputs::Simple(w_result)], - bytecode: vec![equal_opcode, jmp_if_opcode, trap_opcode, stop_opcode], + bytecode: vec![calldata_copy_opcode, equal_opcode, jmp_if_opcode, trap_opcode, stop_opcode], predicate: Some(Expression::one()), }); @@ -564,7 +603,7 @@ fn unsatisfied_opcode_resolved_brillig() { solver_status, ACVMStatus::Failure(OpcodeResolutionError::BrilligFunctionFailed { message: "explicit trap hit in brillig".to_string(), - call_stack: vec![OpcodeLocation::Brillig { acir_index: 0, brillig_index: 2 }] + call_stack: vec![OpcodeLocation::Brillig { acir_index: 0, brillig_index: 3 }] }), "The first opcode is not satisfiable, expected an error indicating this" ); diff --git a/acvm-repo/acvm_js/Cargo.toml b/acvm-repo/acvm_js/Cargo.toml index 226e273c306..7ec814a72e5 100644 --- a/acvm-repo/acvm_js/Cargo.toml +++ b/acvm-repo/acvm_js/Cargo.toml @@ -2,7 +2,7 @@ name = "acvm_js" description = "Typescript wrapper around the ACVM allowing execution of ACIR code" # x-release-please-start-version -version = "0.39.0" +version = "0.40.0" # x-release-please-end authors.workspace = true edition.workspace = true diff --git a/acvm-repo/acvm_js/package.json b/acvm-repo/acvm_js/package.json index 4ec9b1a2da3..876db9ccb62 100644 --- a/acvm-repo/acvm_js/package.json +++ b/acvm-repo/acvm_js/package.json @@ -1,6 +1,6 @@ { "name": "@noir-lang/acvm_js", - "version": "0.39.0", + "version": "0.40.0", "publishConfig": { "access": "public" }, @@ -44,7 +44,7 @@ "@web/test-runner": "^0.15.3", "@web/test-runner-playwright": "^0.10.0", "chai": "^4.3.7", - "eslint": "^8.50.0", + "eslint": "^8.56.0", "eslint-plugin-prettier": "^5.0.0", "mocha": "^10.2.0", "prettier": "3.0.3", diff --git a/acvm-repo/acvm_js/test/shared/addition.ts b/acvm-repo/acvm_js/test/shared/addition.ts index 982b9b685ce..217902bdea6 100644 --- a/acvm-repo/acvm_js/test/shared/addition.ts +++ b/acvm-repo/acvm_js/test/shared/addition.ts @@ -2,11 +2,11 @@ import { WitnessMap } from '@noir-lang/acvm_js'; // See `addition_circuit` integration test in `acir/tests/test_program_serialization.rs`. export const bytecode = Uint8Array.from([ - 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 173, 144, 187, 13, 192, 32, 12, 68, 249, 100, 32, 27, 219, 96, 119, 89, 37, 40, - 176, 255, 8, 17, 18, 5, 74, 202, 240, 154, 235, 158, 238, 238, 112, 206, 121, 247, 37, 206, 60, 103, 194, 63, 208, - 111, 116, 133, 197, 69, 144, 153, 91, 73, 13, 9, 47, 72, 86, 85, 128, 165, 102, 69, 69, 81, 185, 147, 18, 53, 101, 45, - 86, 173, 128, 33, 83, 195, 46, 70, 125, 202, 226, 190, 94, 16, 166, 103, 108, 13, 203, 151, 254, 245, 233, 224, 1, 1, - 52, 166, 127, 120, 1, 0, 0, + 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 173, 208, 49, 14, 192, 32, 8, 5, 80, 212, 30, 8, 4, 20, 182, 94, 165, 166, 122, + 255, 35, 52, 77, 28, 76, 58, 214, 191, 124, 166, 23, 242, 15, 0, 8, 240, 77, 154, 125, 206, 198, 127, 161, 176, 209, + 138, 139, 197, 88, 68, 122, 205, 157, 152, 46, 204, 222, 76, 81, 180, 21, 35, 35, 53, 189, 179, 49, 119, 19, 171, 222, + 188, 162, 147, 112, 167, 161, 206, 99, 98, 105, 223, 95, 248, 26, 113, 90, 97, 185, 97, 217, 56, 173, 35, 63, 243, 81, + 87, 163, 125, 1, 0, 0, ]); export const initialWitnessMap: WitnessMap = new Map([ diff --git a/acvm-repo/acvm_js/test/shared/complex_foreign_call.ts b/acvm-repo/acvm_js/test/shared/complex_foreign_call.ts index 1b6f5e4319a..27abd72305f 100644 --- a/acvm-repo/acvm_js/test/shared/complex_foreign_call.ts +++ b/acvm-repo/acvm_js/test/shared/complex_foreign_call.ts @@ -2,11 +2,13 @@ import { WitnessMap } from '@noir-lang/acvm_js'; // See `complex_brillig_foreign_call` integration test in `acir/tests/test_program_serialization.rs`. export const bytecode = Uint8Array.from([ - 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 213, 83, 219, 10, 128, 48, 8, 117, 174, 139, 159, 179, 254, 160, 127, 137, 222, - 138, 122, 236, 243, 19, 114, 32, 22, 244, 144, 131, 118, 64, 156, 178, 29, 14, 59, 74, 0, 16, 224, 66, 228, 64, 57, 7, - 169, 53, 242, 189, 81, 114, 250, 134, 33, 248, 113, 165, 82, 26, 177, 2, 141, 177, 128, 198, 60, 15, 63, 245, 219, - 211, 23, 215, 255, 139, 15, 251, 211, 112, 180, 28, 157, 212, 189, 100, 82, 179, 64, 170, 63, 109, 235, 190, 204, 135, - 166, 178, 150, 216, 62, 154, 252, 250, 70, 147, 35, 220, 119, 93, 227, 4, 182, 131, 81, 25, 36, 4, 0, 0, + 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 213, 84, 75, 10, 132, 48, 12, 125, 177, 163, 35, 179, 154, 35, 8, 51, 7, 232, 204, + 9, 188, 139, 184, 83, 116, 233, 241, 173, 152, 98, 12, 213, 141, 21, 244, 65, 232, 39, 175, 233, 35, 73, 155, 3, 32, + 204, 48, 206, 18, 158, 19, 175, 37, 60, 175, 228, 209, 30, 195, 143, 226, 197, 178, 103, 105, 76, 110, 160, 209, 156, + 160, 209, 247, 195, 69, 235, 29, 179, 46, 81, 243, 103, 2, 239, 231, 225, 44, 117, 150, 241, 250, 201, 99, 206, 251, + 96, 95, 161, 242, 14, 193, 243, 40, 162, 105, 253, 219, 12, 75, 47, 146, 186, 251, 37, 116, 86, 93, 219, 55, 245, 96, + 20, 85, 75, 253, 136, 249, 87, 249, 105, 231, 220, 4, 249, 237, 132, 56, 20, 224, 109, 113, 223, 88, 82, 153, 34, 64, + 34, 14, 164, 69, 172, 48, 2, 23, 243, 6, 31, 25, 5, 0, 0, ]); export const initialWitnessMap: WitnessMap = new Map([ [1, '0x0000000000000000000000000000000000000000000000000000000000000001'], diff --git a/acvm-repo/acvm_js/test/shared/fixed_base_scalar_mul.ts b/acvm-repo/acvm_js/test/shared/fixed_base_scalar_mul.ts index 0437bebc369..c0859f50135 100644 --- a/acvm-repo/acvm_js/test/shared/fixed_base_scalar_mul.ts +++ b/acvm-repo/acvm_js/test/shared/fixed_base_scalar_mul.ts @@ -1,8 +1,8 @@ // See `fixed_base_scalar_mul_circuit` integration test in `acir/tests/test_program_serialization.rs`. export const bytecode = Uint8Array.from([ - 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 77, 138, 91, 10, 0, 48, 12, 194, 178, 215, 215, 46, 189, 163, 175, 165, 10, 21, 36, - 10, 57, 192, 160, 146, 188, 226, 139, 78, 113, 69, 183, 190, 61, 111, 218, 182, 231, 124, 122, 8, 177, 65, 92, 0, 0, - 0, + 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 77, 138, 91, 10, 0, 32, 16, 2, 109, 171, 175, 46, 221, 209, 247, 229, 130, 130, + 140, 200, 92, 0, 11, 157, 228, 35, 127, 212, 200, 29, 61, 116, 76, 220, 217, 250, 171, 91, 113, 160, 66, 104, 242, 97, + 0, 0, 0, ]); export const initialWitnessMap = new Map([ [1, '0x0000000000000000000000000000000000000000000000000000000000000001'], diff --git a/acvm-repo/acvm_js/test/shared/foreign_call.ts b/acvm-repo/acvm_js/test/shared/foreign_call.ts index 178ec3a09d1..0be8937b57d 100644 --- a/acvm-repo/acvm_js/test/shared/foreign_call.ts +++ b/acvm-repo/acvm_js/test/shared/foreign_call.ts @@ -2,10 +2,10 @@ import { WitnessMap } from '@noir-lang/acvm_js'; // See `simple_brillig_foreign_call` integration test in `acir/tests/test_program_serialization.rs`. export const bytecode = Uint8Array.from([ - 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 173, 143, 49, 10, 64, 33, 12, 67, 99, 63, 124, 60, 142, 222, 192, 203, 56, 184, 56, - 136, 120, 126, 5, 21, 226, 160, 139, 62, 40, 13, 45, 132, 68, 3, 80, 232, 124, 164, 153, 121, 115, 99, 155, 59, 172, - 122, 231, 101, 56, 175, 80, 86, 221, 230, 31, 58, 196, 226, 83, 62, 53, 91, 16, 122, 10, 246, 84, 99, 243, 0, 30, 59, - 1, 0, 0, + 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 173, 143, 177, 10, 192, 32, 16, 67, 227, 21, 74, 233, 212, 79, 177, 127, 208, 159, + 233, 224, 226, 32, 226, 247, 139, 168, 16, 68, 93, 244, 45, 119, 228, 142, 144, 92, 0, 20, 50, 7, 237, 76, 213, 190, + 50, 245, 26, 175, 218, 231, 165, 57, 175, 148, 14, 137, 179, 147, 191, 114, 211, 221, 216, 240, 59, 63, 107, 221, 115, + 104, 181, 103, 244, 43, 36, 10, 38, 68, 108, 25, 253, 238, 136, 1, 0, 0, ]); export const initialWitnessMap: WitnessMap = new Map([ [1, '0x0000000000000000000000000000000000000000000000000000000000000005'], diff --git a/acvm-repo/acvm_js/test/shared/memory_op.ts b/acvm-repo/acvm_js/test/shared/memory_op.ts index ce88f491893..b5ab64b3447 100644 --- a/acvm-repo/acvm_js/test/shared/memory_op.ts +++ b/acvm-repo/acvm_js/test/shared/memory_op.ts @@ -1,9 +1,9 @@ // See `memory_op_circuit` integration test in `acir/tests/test_program_serialization.rs`. export const bytecode = Uint8Array.from([ - 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 213, 146, 49, 14, 0, 32, 8, 3, 139, 192, 127, 240, 7, 254, 255, 85, 198, 136, 9, - 131, 155, 48, 216, 165, 76, 77, 57, 80, 0, 140, 45, 117, 111, 238, 228, 179, 224, 174, 225, 110, 111, 234, 213, 185, - 148, 156, 203, 121, 89, 86, 13, 215, 126, 131, 43, 153, 187, 115, 40, 185, 62, 153, 3, 136, 83, 60, 30, 96, 2, 12, - 235, 225, 124, 14, 3, 0, 0, + 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 213, 145, 187, 17, 0, 32, 8, 67, 195, 111, 31, 220, 192, 253, 167, 178, 144, 2, + 239, 236, 132, 194, 52, 129, 230, 93, 8, 6, 64, 176, 101, 225, 28, 78, 49, 43, 238, 154, 225, 254, 166, 209, 205, 165, + 98, 174, 212, 177, 188, 187, 92, 255, 173, 92, 173, 190, 93, 82, 80, 78, 123, 14, 127, 60, 97, 1, 210, 144, 46, 242, + 19, 3, 0, 0, ]); export const initialWitnessMap = new Map([ diff --git a/acvm-repo/acvm_js/test/shared/pedersen.ts b/acvm-repo/acvm_js/test/shared/pedersen.ts index e35893fc355..5150d24131c 100644 --- a/acvm-repo/acvm_js/test/shared/pedersen.ts +++ b/acvm-repo/acvm_js/test/shared/pedersen.ts @@ -1,7 +1,7 @@ // See `pedersen_circuit` integration test in `acir/tests/test_program_serialization.rs`. export const bytecode = Uint8Array.from([ - 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 93, 138, 9, 10, 0, 64, 8, 2, 103, 15, 232, 255, 31, 142, 138, 10, 34, 65, 84, 198, - 15, 28, 82, 145, 178, 182, 86, 191, 238, 183, 24, 131, 205, 79, 203, 0, 166, 242, 158, 93, 92, 0, 0, 0, + 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 93, 74, 135, 9, 0, 48, 8, 75, 171, 224, 255, 15, 139, 27, 196, 64, 200, 100, 0, 15, + 133, 80, 57, 89, 219, 127, 39, 173, 126, 235, 236, 247, 151, 48, 224, 71, 90, 33, 97, 0, 0, 0, ]); export const initialWitnessMap = new Map([[1, '0x0000000000000000000000000000000000000000000000000000000000000001']]); diff --git a/acvm-repo/acvm_js/test/shared/schnorr_verify.ts b/acvm-repo/acvm_js/test/shared/schnorr_verify.ts index 5716cbd30f8..2127de66f69 100644 --- a/acvm-repo/acvm_js/test/shared/schnorr_verify.ts +++ b/acvm-repo/acvm_js/test/shared/schnorr_verify.ts @@ -1,17 +1,17 @@ // See `schnorr_verify_circuit` integration test in `acir/tests/test_program_serialization.rs`. export const bytecode = Uint8Array.from([ - 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 77, 210, 87, 78, 2, 1, 20, 134, 209, 177, 247, 222, 123, 71, 68, 68, 68, 68, 68, - 68, 68, 68, 68, 221, 133, 251, 95, 130, 145, 27, 206, 36, 78, 50, 57, 16, 94, 200, 253, 191, 159, 36, 73, 134, 146, - 193, 19, 142, 243, 183, 255, 14, 179, 233, 247, 145, 254, 59, 217, 127, 71, 57, 198, 113, 78, 48, 125, 167, 56, 205, - 25, 206, 114, 142, 243, 92, 224, 34, 151, 184, 204, 21, 174, 114, 141, 235, 220, 224, 38, 183, 184, 205, 29, 238, 114, - 143, 251, 60, 224, 33, 143, 120, 204, 19, 158, 242, 140, 25, 158, 51, 203, 11, 230, 120, 201, 60, 175, 88, 224, 53, - 139, 188, 97, 137, 183, 44, 243, 142, 21, 222, 179, 202, 7, 214, 248, 200, 58, 159, 216, 224, 51, 155, 124, 97, 235, - 223, 142, 241, 188, 250, 222, 230, 27, 59, 124, 103, 151, 31, 236, 241, 147, 95, 252, 246, 57, 158, 104, 47, 186, 139, - 214, 162, 179, 104, 44, 250, 74, 219, 154, 242, 63, 162, 165, 232, 40, 26, 138, 126, 162, 157, 232, 38, 154, 137, 94, - 162, 149, 232, 36, 26, 137, 62, 162, 141, 232, 34, 154, 136, 30, 162, 133, 232, 32, 26, 136, 253, 99, 251, 195, 100, - 176, 121, 236, 29, 91, 159, 218, 56, 99, 219, 172, 77, 115, 182, 204, 219, 176, 96, 187, 162, 205, 74, 182, 42, 219, - 168, 98, 155, 170, 77, 106, 182, 168, 219, 160, 225, 246, 77, 55, 111, 185, 113, 219, 109, 59, 110, 218, 117, 203, - 158, 27, 166, 55, 75, 239, 150, 184, 101, 250, 252, 1, 55, 204, 92, 74, 220, 3, 0, 0, + 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 77, 210, 7, 74, 3, 1, 20, 69, 209, 177, 247, 222, 123, 239, 189, 119, 141, 93, 99, + 220, 133, 251, 95, 130, 152, 103, 78, 32, 3, 195, 33, 4, 66, 248, 239, 254, 20, 69, 209, 84, 212, 158, 216, 206, 223, + 234, 219, 204, 146, 239, 91, 170, 111, 103, 245, 109, 101, 27, 219, 217, 193, 250, 219, 197, 110, 246, 176, 151, 125, + 236, 231, 0, 7, 57, 196, 97, 142, 112, 148, 99, 28, 231, 4, 39, 57, 197, 105, 206, 112, 150, 115, 156, 231, 2, 23, + 185, 196, 101, 174, 112, 149, 107, 92, 231, 6, 55, 185, 197, 109, 238, 112, 151, 123, 220, 231, 1, 15, 121, 196, 99, + 158, 240, 148, 103, 60, 231, 5, 47, 121, 197, 107, 222, 240, 150, 119, 188, 231, 3, 75, 124, 228, 83, 195, 142, 121, + 158, 125, 126, 225, 43, 223, 248, 206, 15, 126, 178, 204, 47, 86, 248, 237, 119, 43, 76, 127, 105, 47, 189, 165, 181, + 116, 150, 198, 234, 125, 117, 249, 47, 233, 41, 45, 165, 163, 52, 148, 126, 210, 78, 186, 73, 51, 233, 37, 173, 164, + 147, 52, 146, 62, 210, 70, 186, 72, 19, 233, 33, 45, 164, 131, 52, 144, 253, 23, 139, 218, 238, 217, 60, 123, 103, + 235, 236, 156, 141, 179, 239, 166, 93, 183, 237, 185, 107, 199, 125, 251, 29, 218, 237, 216, 94, 167, 118, 58, 183, + 207, 165, 93, 174, 237, 113, 107, 135, 123, 247, 47, 185, 251, 147, 59, 191, 184, 239, 155, 187, 126, 184, 103, 217, + 29, 235, 55, 171, 223, 173, 104, 184, 231, 255, 243, 7, 236, 52, 239, 128, 225, 3, 0, 0, ]); export const initialWitnessMap = new Map([ diff --git a/acvm-repo/blackbox_solver/Cargo.toml b/acvm-repo/blackbox_solver/Cargo.toml index 7359cf307e4..0794b2dbe7e 100644 --- a/acvm-repo/blackbox_solver/Cargo.toml +++ b/acvm-repo/blackbox_solver/Cargo.toml @@ -2,7 +2,7 @@ name = "acvm_blackbox_solver" description = "A solver for the blackbox functions found in ACIR and Brillig" # x-release-please-start-version -version = "0.39.0" +version = "0.40.0" # x-release-please-end authors.workspace = true edition.workspace = true @@ -18,7 +18,7 @@ thiserror.workspace = true blake2 = "0.10.6" blake3 = "1.5.0" -sha2 = "0.10.6" +sha2 = { version="0.10.6", features = ["compress",] } sha3 = "0.10.6" keccak = "0.1.4" k256 = { version = "0.11.0", features = [ diff --git a/acvm-repo/blackbox_solver/src/curve_specific_solver.rs b/acvm-repo/blackbox_solver/src/curve_specific_solver.rs index 2234710dec0..f0ab4561229 100644 --- a/acvm-repo/blackbox_solver/src/curve_specific_solver.rs +++ b/acvm-repo/blackbox_solver/src/curve_specific_solver.rs @@ -36,6 +36,11 @@ pub trait BlackBoxFunctionSolver { input2_x: &FieldElement, input2_y: &FieldElement, ) -> Result<(FieldElement, FieldElement), BlackBoxResolutionError>; + fn poseidon2_permutation( + &self, + _inputs: &[FieldElement], + _len: u32, + ) -> Result, BlackBoxResolutionError>; } pub struct StubbedBlackBoxSolver; @@ -89,4 +94,11 @@ impl BlackBoxFunctionSolver for StubbedBlackBoxSolver { ) -> Result<(FieldElement, FieldElement), BlackBoxResolutionError> { Err(Self::fail(BlackBoxFunc::EmbeddedCurveAdd)) } + fn poseidon2_permutation( + &self, + _inputs: &[FieldElement], + _len: u32, + ) -> Result, BlackBoxResolutionError> { + Err(Self::fail(BlackBoxFunc::Poseidon2Permutation)) + } } diff --git a/acvm-repo/blackbox_solver/src/lib.rs b/acvm-repo/blackbox_solver/src/lib.rs index afba4eff17c..e033344fefa 100644 --- a/acvm-repo/blackbox_solver/src/lib.rs +++ b/acvm-repo/blackbox_solver/src/lib.rs @@ -43,6 +43,16 @@ pub fn keccak256(inputs: &[u8]) -> Result<[u8; 32], BlackBoxResolutionError> { .map_err(|err| BlackBoxResolutionError::Failed(BlackBoxFunc::Keccak256, err)) } +pub fn sha256compression(state: &mut [u32; 8], msg_blocks: &[u32; 16]) { + let mut blocks = [0_u8; 64]; + for (i, block) in msg_blocks.iter().enumerate() { + let bytes = block.to_be_bytes(); + blocks[i * 4..i * 4 + 4].copy_from_slice(&bytes); + } + let blocks: GenericArray = blocks.into(); + sha2::compress256(state, &[blocks]); +} + const KECCAK_LANES: usize = 25; pub fn keccakf1600( diff --git a/acvm-repo/bn254_blackbox_solver/Cargo.toml b/acvm-repo/bn254_blackbox_solver/Cargo.toml index a73aded231f..ea601a6b80f 100644 --- a/acvm-repo/bn254_blackbox_solver/Cargo.toml +++ b/acvm-repo/bn254_blackbox_solver/Cargo.toml @@ -16,6 +16,7 @@ repository.workspace = true acir.workspace = true acvm_blackbox_solver.workspace = true thiserror.workspace = true +num-traits.workspace = true rust-embed = { version = "6.6.0", features = [ "debug-embed", @@ -23,8 +24,9 @@ rust-embed = { version = "6.6.0", features = [ "include-exclude", ] } -# BN254 fixed base scalar multiplication solver -grumpkin = { git = "https://github.com/noir-lang/grumpkin", rev = "56d99799381f79e42148aaef0de2b0cf9a4b9a5d", features = ["std"] } +grumpkin = { version = "0.1.0", package = "noir_grumpkin", features = [ + "std", +] } # BN254 fixed base scalar multiplication solver ark-ec = { version = "^0.4.0", default-features = false } ark-ff = { version = "^0.4.0", default-features = false } num-bigint.workspace = true diff --git a/acvm-repo/bn254_blackbox_solver/src/lib.rs b/acvm-repo/bn254_blackbox_solver/src/lib.rs index 13aa956f9e1..be0e60ada96 100644 --- a/acvm-repo/bn254_blackbox_solver/src/lib.rs +++ b/acvm-repo/bn254_blackbox_solver/src/lib.rs @@ -6,9 +6,11 @@ use acir::{BlackBoxFunc, FieldElement}; use acvm_blackbox_solver::{BlackBoxFunctionSolver, BlackBoxResolutionError}; mod fixed_base_scalar_mul; +mod poseidon2; mod wasm; pub use fixed_base_scalar_mul::{embedded_curve_add, fixed_base_scalar_mul}; +use poseidon2::Poseidon2; use wasm::Barretenberg; use self::wasm::{Pedersen, SchnorrSig}; @@ -97,4 +99,13 @@ impl BlackBoxFunctionSolver for Bn254BlackBoxSolver { ) -> Result<(FieldElement, FieldElement), BlackBoxResolutionError> { embedded_curve_add(*input1_x, *input1_y, *input2_x, *input2_y) } + + fn poseidon2_permutation( + &self, + inputs: &[FieldElement], + len: u32, + ) -> Result, BlackBoxResolutionError> { + let poseidon = Poseidon2::new(); + poseidon.permutation(inputs, len) + } } diff --git a/acvm-repo/bn254_blackbox_solver/src/poseidon2.rs b/acvm-repo/bn254_blackbox_solver/src/poseidon2.rs new file mode 100644 index 00000000000..e0ed5bcd053 --- /dev/null +++ b/acvm-repo/bn254_blackbox_solver/src/poseidon2.rs @@ -0,0 +1,1043 @@ +use acir::FieldElement; +use acvm_blackbox_solver::BlackBoxResolutionError; +use num_bigint::BigUint; +use num_traits::Num; + +pub(crate) struct Poseidon2 { + t: u32, + rounds_f: u32, + rounds_p: u32, + internal_matrix_diagonal: [FieldElement; 4], + round_constant: [[FieldElement; 4]; 64], +} + +impl Poseidon2 { + pub(crate) fn new() -> Self { + Poseidon2 { + t: 4, + rounds_f: 8, + rounds_p: 56, + internal_matrix_diagonal: [ + Poseidon2::field_from_hex( + "0x10dc6e9c006ea38b04b1e03b4bd9490c0d03f98929ca1d7fb56821fd19d3b6e7", + ), + Poseidon2::field_from_hex( + "0x0c28145b6a44df3e0149b3d0a30b3bb599df9756d4dd9b84a86b38cfb45a740b", + ), + Poseidon2::field_from_hex( + "0x00544b8338791518b2c7645a50392798b21f75bb60e3596170067d00141cac15", + ), + Poseidon2::field_from_hex( + "0x222c01175718386f2e2e82eb122789e352e105a3b8fa852613bc534433ee428b", + ), + ], + round_constant: [ + [ + Poseidon2::field_from_hex( + "0x19b849f69450b06848da1d39bd5e4a4302bb86744edc26238b0878e269ed23e5", + ), + Poseidon2::field_from_hex( + "0x265ddfe127dd51bd7239347b758f0a1320eb2cc7450acc1dad47f80c8dcf34d6", + ), + Poseidon2::field_from_hex( + "0x199750ec472f1809e0f66a545e1e51624108ac845015c2aa3dfc36bab497d8aa", + ), + Poseidon2::field_from_hex( + "0x157ff3fe65ac7208110f06a5f74302b14d743ea25067f0ffd032f787c7f1cdf8", + ), + ], + [ + Poseidon2::field_from_hex( + "0x2e49c43c4569dd9c5fd35ac45fca33f10b15c590692f8beefe18f4896ac94902", + ), + Poseidon2::field_from_hex( + "0x0e35fb89981890520d4aef2b6d6506c3cb2f0b6973c24fa82731345ffa2d1f1e", + ), + Poseidon2::field_from_hex( + "0x251ad47cb15c4f1105f109ae5e944f1ba9d9e7806d667ffec6fe723002e0b996", + ), + Poseidon2::field_from_hex( + "0x13da07dc64d428369873e97160234641f8beb56fdd05e5f3563fa39d9c22df4e", + ), + ], + [ + Poseidon2::field_from_hex( + "0x0c009b84e650e6d23dc00c7dccef7483a553939689d350cd46e7b89055fd4738", + ), + Poseidon2::field_from_hex( + "0x011f16b1c63a854f01992e3956f42d8b04eb650c6d535eb0203dec74befdca06", + ), + Poseidon2::field_from_hex( + "0x0ed69e5e383a688f209d9a561daa79612f3f78d0467ad45485df07093f367549", + ), + Poseidon2::field_from_hex( + "0x04dba94a7b0ce9e221acad41472b6bbe3aec507f5eb3d33f463672264c9f789b", + ), + ], + [ + Poseidon2::field_from_hex( + "0x0a3f2637d840f3a16eb094271c9d237b6036757d4bb50bf7ce732ff1d4fa28e8", + ), + Poseidon2::field_from_hex( + "0x259a666f129eea198f8a1c502fdb38fa39b1f075569564b6e54a485d1182323f", + ), + Poseidon2::field_from_hex( + "0x28bf7459c9b2f4c6d8e7d06a4ee3a47f7745d4271038e5157a32fdf7ede0d6a1", + ), + Poseidon2::field_from_hex( + "0x0a1ca941f057037526ea200f489be8d4c37c85bbcce6a2aeec91bd6941432447", + ), + ], + [ + Poseidon2::field_from_hex( + "0x0c6f8f958be0e93053d7fd4fc54512855535ed1539f051dcb43a26fd926361cf", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + ], + [ + Poseidon2::field_from_hex( + "0x123106a93cd17578d426e8128ac9d90aa9e8a00708e296e084dd57e69caaf811", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + ], + [ + Poseidon2::field_from_hex( + "0x26e1ba52ad9285d97dd3ab52f8e840085e8fa83ff1e8f1877b074867cd2dee75", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + ], + [ + Poseidon2::field_from_hex( + "0x1cb55cad7bd133de18a64c5c47b9c97cbe4d8b7bf9e095864471537e6a4ae2c5", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + ], + [ + Poseidon2::field_from_hex( + "0x1dcd73e46acd8f8e0e2c7ce04bde7f6d2a53043d5060a41c7143f08e6e9055d0", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + ], + [ + Poseidon2::field_from_hex( + "0x011003e32f6d9c66f5852f05474a4def0cda294a0eb4e9b9b12b9bb4512e5574", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + ], + [ + Poseidon2::field_from_hex( + "0x2b1e809ac1d10ab29ad5f20d03a57dfebadfe5903f58bafed7c508dd2287ae8c", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + ], + [ + Poseidon2::field_from_hex( + "0x2539de1785b735999fb4dac35ee17ed0ef995d05ab2fc5faeaa69ae87bcec0a5", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + ], + [ + Poseidon2::field_from_hex( + "0x0c246c5a2ef8ee0126497f222b3e0a0ef4e1c3d41c86d46e43982cb11d77951d", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + ], + [ + Poseidon2::field_from_hex( + "0x192089c4974f68e95408148f7c0632edbb09e6a6ad1a1c2f3f0305f5d03b527b", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + ], + [ + Poseidon2::field_from_hex( + "0x1eae0ad8ab68b2f06a0ee36eeb0d0c058529097d91096b756d8fdc2fb5a60d85", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + ], + [ + Poseidon2::field_from_hex( + "0x179190e5d0e22179e46f8282872abc88db6e2fdc0dee99e69768bd98c5d06bfb", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + ], + [ + Poseidon2::field_from_hex( + "0x29bb9e2c9076732576e9a81c7ac4b83214528f7db00f31bf6cafe794a9b3cd1c", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + ], + [ + Poseidon2::field_from_hex( + "0x225d394e42207599403efd0c2464a90d52652645882aac35b10e590e6e691e08", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + ], + [ + Poseidon2::field_from_hex( + "0x064760623c25c8cf753d238055b444532be13557451c087de09efd454b23fd59", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + ], + [ + Poseidon2::field_from_hex( + "0x10ba3a0e01df92e87f301c4b716d8a394d67f4bf42a75c10922910a78f6b5b87", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + ], + [ + Poseidon2::field_from_hex( + "0x0e070bf53f8451b24f9c6e96b0c2a801cb511bc0c242eb9d361b77693f21471c", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + ], + [ + Poseidon2::field_from_hex( + "0x1b94cd61b051b04dd39755ff93821a73ccd6cb11d2491d8aa7f921014de252fb", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + ], + [ + Poseidon2::field_from_hex( + "0x1d7cb39bafb8c744e148787a2e70230f9d4e917d5713bb050487b5aa7d74070b", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + ], + [ + Poseidon2::field_from_hex( + "0x2ec93189bd1ab4f69117d0fe980c80ff8785c2961829f701bb74ac1f303b17db", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + ], + [ + Poseidon2::field_from_hex( + "0x2db366bfdd36d277a692bb825b86275beac404a19ae07a9082ea46bd83517926", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + ], + [ + Poseidon2::field_from_hex( + "0x062100eb485db06269655cf186a68532985275428450359adc99cec6960711b8", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + ], + [ + Poseidon2::field_from_hex( + "0x0761d33c66614aaa570e7f1e8244ca1120243f92fa59e4f900c567bf41f5a59b", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + ], + [ + Poseidon2::field_from_hex( + "0x20fc411a114d13992c2705aa034e3f315d78608a0f7de4ccf7a72e494855ad0d", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + ], + [ + Poseidon2::field_from_hex( + "0x25b5c004a4bdfcb5add9ec4e9ab219ba102c67e8b3effb5fc3a30f317250bc5a", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + ], + [ + Poseidon2::field_from_hex( + "0x23b1822d278ed632a494e58f6df6f5ed038b186d8474155ad87e7dff62b37f4b", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + ], + [ + Poseidon2::field_from_hex( + "0x22734b4c5c3f9493606c4ba9012499bf0f14d13bfcfcccaa16102a29cc2f69e0", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + ], + [ + Poseidon2::field_from_hex( + "0x26c0c8fe09eb30b7e27a74dc33492347e5bdff409aa3610254413d3fad795ce5", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + ], + [ + Poseidon2::field_from_hex( + "0x070dd0ccb6bd7bbae88eac03fa1fbb26196be3083a809829bbd626df348ccad9", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + ], + [ + Poseidon2::field_from_hex( + "0x12b6595bdb329b6fb043ba78bb28c3bec2c0a6de46d8c5ad6067c4ebfd4250da", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + ], + [ + Poseidon2::field_from_hex( + "0x248d97d7f76283d63bec30e7a5876c11c06fca9b275c671c5e33d95bb7e8d729", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + ], + [ + Poseidon2::field_from_hex( + "0x1a306d439d463b0816fc6fd64cc939318b45eb759ddde4aa106d15d9bd9baaaa", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + ], + [ + Poseidon2::field_from_hex( + "0x28a8f8372e3c38daced7c00421cb4621f4f1b54ddc27821b0d62d3d6ec7c56cf", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + ], + [ + Poseidon2::field_from_hex( + "0x0094975717f9a8a8bb35152f24d43294071ce320c829f388bc852183e1e2ce7e", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + ], + [ + Poseidon2::field_from_hex( + "0x04d5ee4c3aa78f7d80fde60d716480d3593f74d4f653ae83f4103246db2e8d65", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + ], + [ + Poseidon2::field_from_hex( + "0x2a6cf5e9aa03d4336349ad6fb8ed2269c7bef54b8822cc76d08495c12efde187", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + ], + [ + Poseidon2::field_from_hex( + "0x2304d31eaab960ba9274da43e19ddeb7f792180808fd6e43baae48d7efcba3f3", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + ], + [ + Poseidon2::field_from_hex( + "0x03fd9ac865a4b2a6d5e7009785817249bff08a7e0726fcb4e1c11d39d199f0b0", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + ], + [ + Poseidon2::field_from_hex( + "0x00b7258ded52bbda2248404d55ee5044798afc3a209193073f7954d4d63b0b64", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + ], + [ + Poseidon2::field_from_hex( + "0x159f81ada0771799ec38fca2d4bf65ebb13d3a74f3298db36272c5ca65e92d9a", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + ], + [ + Poseidon2::field_from_hex( + "0x1ef90e67437fbc8550237a75bc28e3bb9000130ea25f0c5471e144cf4264431f", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + ], + [ + Poseidon2::field_from_hex( + "0x1e65f838515e5ff0196b49aa41a2d2568df739bc176b08ec95a79ed82932e30d", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + ], + [ + Poseidon2::field_from_hex( + "0x2b1b045def3a166cec6ce768d079ba74b18c844e570e1f826575c1068c94c33f", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + ], + [ + Poseidon2::field_from_hex( + "0x0832e5753ceb0ff6402543b1109229c165dc2d73bef715e3f1c6e07c168bb173", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + ], + [ + Poseidon2::field_from_hex( + "0x02f614e9cedfb3dc6b762ae0a37d41bab1b841c2e8b6451bc5a8e3c390b6ad16", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + ], + [ + Poseidon2::field_from_hex( + "0x0e2427d38bd46a60dd640b8e362cad967370ebb777bedff40f6a0be27e7ed705", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + ], + [ + Poseidon2::field_from_hex( + "0x0493630b7c670b6deb7c84d414e7ce79049f0ec098c3c7c50768bbe29214a53a", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + ], + [ + Poseidon2::field_from_hex( + "0x22ead100e8e482674decdab17066c5a26bb1515355d5461a3dc06cc85327cea9", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + ], + [ + Poseidon2::field_from_hex( + "0x25b3e56e655b42cdaae2626ed2554d48583f1ae35626d04de5084e0b6d2a6f16", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + ], + [ + Poseidon2::field_from_hex( + "0x1e32752ada8836ef5837a6cde8ff13dbb599c336349e4c584b4fdc0a0cf6f9d0", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + ], + [ + Poseidon2::field_from_hex( + "0x2fa2a871c15a387cc50f68f6f3c3455b23c00995f05078f672a9864074d412e5", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + ], + [ + Poseidon2::field_from_hex( + "0x2f569b8a9a4424c9278e1db7311e889f54ccbf10661bab7fcd18e7c7a7d83505", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + ], + [ + Poseidon2::field_from_hex( + "0x044cb455110a8fdd531ade530234c518a7df93f7332ffd2144165374b246b43d", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + ], + [ + Poseidon2::field_from_hex( + "0x227808de93906d5d420246157f2e42b191fe8c90adfe118178ddc723a5319025", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + ], + [ + Poseidon2::field_from_hex( + "0x02fcca2934e046bc623adead873579865d03781ae090ad4a8579d2e7a6800355", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + ], + [ + Poseidon2::field_from_hex( + "0x0ef915f0ac120b876abccceb344a1d36bad3f3c5ab91a8ddcbec2e060d8befac", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + Poseidon2::field_from_hex( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + ], + [ + Poseidon2::field_from_hex( + "0x1797130f4b7a3e1777eb757bc6f287f6ab0fb85f6be63b09f3b16ef2b1405d38", + ), + Poseidon2::field_from_hex( + "0x0a76225dc04170ae3306c85abab59e608c7f497c20156d4d36c668555decc6e5", + ), + Poseidon2::field_from_hex( + "0x1fffb9ec1992d66ba1e77a7b93209af6f8fa76d48acb664796174b5326a31a5c", + ), + Poseidon2::field_from_hex( + "0x25721c4fc15a3f2853b57c338fa538d85f8fbba6c6b9c6090611889b797b9c5f", + ), + ], + [ + Poseidon2::field_from_hex( + "0x0c817fd42d5f7a41215e3d07ba197216adb4c3790705da95eb63b982bfcaf75a", + ), + Poseidon2::field_from_hex( + "0x13abe3f5239915d39f7e13c2c24970b6df8cf86ce00a22002bc15866e52b5a96", + ), + Poseidon2::field_from_hex( + "0x2106feea546224ea12ef7f39987a46c85c1bc3dc29bdbd7a92cd60acb4d391ce", + ), + Poseidon2::field_from_hex( + "0x21ca859468a746b6aaa79474a37dab49f1ca5a28c748bc7157e1b3345bb0f959", + ), + ], + [ + Poseidon2::field_from_hex( + "0x05ccd6255c1e6f0c5cf1f0df934194c62911d14d0321662a8f1a48999e34185b", + ), + Poseidon2::field_from_hex( + "0x0f0e34a64b70a626e464d846674c4c8816c4fb267fe44fe6ea28678cb09490a4", + ), + Poseidon2::field_from_hex( + "0x0558531a4e25470c6157794ca36d0e9647dbfcfe350d64838f5b1a8a2de0d4bf", + ), + Poseidon2::field_from_hex( + "0x09d3dca9173ed2faceea125157683d18924cadad3f655a60b72f5864961f1455", + ), + ], + [ + Poseidon2::field_from_hex( + "0x0328cbd54e8c0913493f866ed03d218bf23f92d68aaec48617d4c722e5bd4335", + ), + Poseidon2::field_from_hex( + "0x2bf07216e2aff0a223a487b1a7094e07e79e7bcc9798c648ee3347dd5329d34b", + ), + Poseidon2::field_from_hex( + "0x1daf345a58006b736499c583cb76c316d6f78ed6a6dffc82111e11a63fe412df", + ), + Poseidon2::field_from_hex( + "0x176563472456aaa746b694c60e1823611ef39039b2edc7ff391e6f2293d2c404", + ), + ], + ], + } + } + fn field_from_hex(hex: &str) -> FieldElement { + let bigint = BigUint::from_str_radix(hex.strip_prefix("0x").unwrap(), 16).unwrap(); + FieldElement::from_be_bytes_reduce(&bigint.to_bytes_be()) + } + + fn single_box(x: FieldElement) -> FieldElement { + let s = x * x; + s * s * x + } + + fn s_box(input: &mut [FieldElement]) { + for i in input { + *i = Self::single_box(*i); + } + } + + fn add_round_constants(&self, state: &mut [FieldElement], round: usize) { + for (state_element, constant_element) in state.iter_mut().zip(self.round_constant[round]) { + *state_element += constant_element; + } + } + + /// Algorithm is taken directly from the Poseidon2 implementation in Barretenberg crypto module. + fn matrix_multiplication_4x4(input: &mut [FieldElement]) { + assert!(input.len() == 4); + let t0 = input[0] + input[1]; // A + B + let t1 = input[2] + input[3]; // C + D + let mut t2 = input[1] + input[1]; // 2B + t2 += t1; // 2B + C + D + let mut t3 = input[3] + input[3]; // 2D + t3 += t0; // 2D + A + B + let mut t4 = t1 + t1; + t4 += t4; + t4 += t3; // A + B + 4C + 6D + let mut t5 = t0 + t0; + t5 += t5; + t5 += t2; // 4A + 6B + C + D + let t6 = t3 + t5; // 5A + 7B + C + 3D + let t7 = t2 + t4; // A + 3B + 5C + 7D + input[0] = t6; + input[1] = t5; + input[2] = t7; + input[3] = t4; + } + + fn internal_m_multiplication(&self, input: &mut [FieldElement]) { + let mut sum = FieldElement::zero(); + for i in input.iter() { + sum += *i; + } + for (index, i) in input.iter_mut().enumerate() { + *i = *i * self.internal_matrix_diagonal[index]; + *i += sum; + } + } + + pub(crate) fn permutation( + &self, + inputs: &[FieldElement], + len: u32, + ) -> Result, BlackBoxResolutionError> { + if len as usize != inputs.len() { + return Err(BlackBoxResolutionError::Failed( + acir::BlackBoxFunc::Poseidon2Permutation, + format!( + "the number of inputs does not match specified length. {} > {}", + inputs.len(), + len + ), + )); + } + if len != self.t { + return Err(BlackBoxResolutionError::Failed( + acir::BlackBoxFunc::Poseidon2Permutation, + format!("Expected {} values but encountered {}", self.t, len), + )); + } + // Read witness assignments + let mut state = [FieldElement::zero(); 4]; + for (index, input) in inputs.iter().enumerate() { + state[index] = *input; + } + // Apply 1st linear layer + Self::matrix_multiplication_4x4(&mut state); + + // First set of external rounds + let rf_first = self.rounds_f / 2; + for r in 0..rf_first { + self.add_round_constants(&mut state, r as usize); + Self::s_box(&mut state); + Self::matrix_multiplication_4x4(&mut state); + } + // Internal rounds + let p_end = rf_first + self.rounds_p; + for r in rf_first..p_end { + state[0] += self.round_constant[r as usize][0]; + state[0] = Self::single_box(state[0]); + self.internal_m_multiplication(&mut state); + } + + // Remaining external rounds + let num_rounds = self.rounds_f + self.rounds_p; + for i in p_end..num_rounds { + self.add_round_constants(&mut state, i as usize); + Self::s_box(&mut state); + Self::matrix_multiplication_4x4(&mut state); + } + Ok(state.into()) + } +} diff --git a/acvm-repo/brillig/Cargo.toml b/acvm-repo/brillig/Cargo.toml index b9cedfe8d60..8d91d19e117 100644 --- a/acvm-repo/brillig/Cargo.toml +++ b/acvm-repo/brillig/Cargo.toml @@ -2,7 +2,7 @@ name = "brillig" description = "Brillig is the bytecode ACIR uses for non-determinism." # x-release-please-start-version -version = "0.39.0" +version = "0.40.0" # x-release-please-end authors.workspace = true edition.workspace = true diff --git a/acvm-repo/brillig/src/black_box.rs b/acvm-repo/brillig/src/black_box.rs index 22fac6f3ba3..29861d0fd84 100644 --- a/acvm-repo/brillig/src/black_box.rs +++ b/acvm-repo/brillig/src/black_box.rs @@ -1,4 +1,4 @@ -use crate::{opcodes::HeapVector, HeapArray, RegisterIndex}; +use crate::{opcodes::HeapVector, HeapArray, MemoryAddress}; use serde::{Deserialize, Serialize}; /// These opcodes provide an equivalent of ACIR blackbox functions. @@ -36,7 +36,7 @@ pub enum BlackBoxOp { public_key_x: HeapArray, public_key_y: HeapArray, signature: HeapArray, - result: RegisterIndex, + result: MemoryAddress, }, /// Verifies a ECDSA signature over the secp256r1 curve. EcdsaSecp256r1 { @@ -44,75 +44,75 @@ pub enum BlackBoxOp { public_key_x: HeapArray, public_key_y: HeapArray, signature: HeapArray, - result: RegisterIndex, + result: MemoryAddress, }, /// Verifies a Schnorr signature over a curve which is "pairing friendly" with the curve on which the Brillig bytecode is defined. SchnorrVerify { - public_key_x: RegisterIndex, - public_key_y: RegisterIndex, + public_key_x: MemoryAddress, + public_key_y: MemoryAddress, message: HeapVector, signature: HeapVector, - result: RegisterIndex, + result: MemoryAddress, }, /// Calculates a Pedersen commitment to the inputs. PedersenCommitment { inputs: HeapVector, - domain_separator: RegisterIndex, + domain_separator: MemoryAddress, output: HeapArray, }, /// Calculates a Pedersen hash to the inputs. PedersenHash { inputs: HeapVector, - domain_separator: RegisterIndex, - output: RegisterIndex, + domain_separator: MemoryAddress, + output: MemoryAddress, }, /// Performs scalar multiplication over the embedded curve. FixedBaseScalarMul { - low: RegisterIndex, - high: RegisterIndex, + low: MemoryAddress, + high: MemoryAddress, result: HeapArray, }, /// Performs addition over the embedded curve. EmbeddedCurveAdd { - input1_x: RegisterIndex, - input1_y: RegisterIndex, - input2_x: RegisterIndex, - input2_y: RegisterIndex, + input1_x: MemoryAddress, + input1_y: MemoryAddress, + input2_x: MemoryAddress, + input2_y: MemoryAddress, result: HeapArray, }, BigIntAdd { - lhs: RegisterIndex, - rhs: RegisterIndex, - output: RegisterIndex, + lhs: MemoryAddress, + rhs: MemoryAddress, + output: MemoryAddress, }, - BigIntNeg { - lhs: RegisterIndex, - rhs: RegisterIndex, - output: RegisterIndex, + BigIntSub { + lhs: MemoryAddress, + rhs: MemoryAddress, + output: MemoryAddress, }, BigIntMul { - lhs: RegisterIndex, - rhs: RegisterIndex, - output: RegisterIndex, + lhs: MemoryAddress, + rhs: MemoryAddress, + output: MemoryAddress, }, BigIntDiv { - lhs: RegisterIndex, - rhs: RegisterIndex, - output: RegisterIndex, + lhs: MemoryAddress, + rhs: MemoryAddress, + output: MemoryAddress, }, BigIntFromLeBytes { inputs: HeapVector, modulus: HeapVector, - output: RegisterIndex, + output: MemoryAddress, }, BigIntToLeBytes { - input: RegisterIndex, + input: MemoryAddress, output: HeapVector, }, Poseidon2Permutation { message: HeapVector, output: HeapArray, - len: RegisterIndex, + len: MemoryAddress, }, Sha256Compression { input: HeapVector, diff --git a/acvm-repo/brillig/src/foreign_call.rs b/acvm-repo/brillig/src/foreign_call.rs index 1359d7d604d..3f124a9a0a7 100644 --- a/acvm-repo/brillig/src/foreign_call.rs +++ b/acvm-repo/brillig/src/foreign_call.rs @@ -37,7 +37,7 @@ impl ForeignCallParam { } /// Represents the full output of a [foreign call][crate::Opcode::ForeignCall]. -#[derive(Debug, PartialEq, Eq, Serialize, Deserialize, Clone)] +#[derive(Debug, PartialEq, Eq, Serialize, Deserialize, Clone, Default)] pub struct ForeignCallResult { /// Resolved output values of the foreign call. pub values: Vec, diff --git a/acvm-repo/brillig/src/lib.rs b/acvm-repo/brillig/src/lib.rs index 5e033e3c792..0661e794360 100644 --- a/acvm-repo/brillig/src/lib.rs +++ b/acvm-repo/brillig/src/lib.rs @@ -18,7 +18,7 @@ mod value; pub use black_box::BlackBoxOp; pub use foreign_call::{ForeignCallParam, ForeignCallResult}; pub use opcodes::{ - BinaryFieldOp, BinaryIntOp, HeapArray, HeapVector, RegisterIndex, RegisterOrMemory, + BinaryFieldOp, BinaryIntOp, HeapArray, HeapValueType, HeapVector, MemoryAddress, ValueOrArray, }; pub use opcodes::{BrilligOpcode as Opcode, Label}; pub use value::Typ; diff --git a/acvm-repo/brillig/src/opcodes.rs b/acvm-repo/brillig/src/opcodes.rs index 79295cc6e5d..51df1f90941 100644 --- a/acvm-repo/brillig/src/opcodes.rs +++ b/acvm-repo/brillig/src/opcodes.rs @@ -4,100 +4,133 @@ use serde::{Deserialize, Serialize}; pub type Label = usize; #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] -pub struct RegisterIndex(pub usize); +pub struct MemoryAddress(pub usize); -/// `RegisterIndex` refers to the index in VM register space. -impl RegisterIndex { +/// `MemoryAddress` refers to the index in VM memory. +impl MemoryAddress { pub fn to_usize(self) -> usize { self.0 } } -impl From for RegisterIndex { +impl From for MemoryAddress { fn from(value: usize) -> Self { - RegisterIndex(value) + MemoryAddress(value) } } -/// A fixed-sized array starting from a Brillig register memory location. +/// Describes the memory layout for an array/vector element +#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] +pub enum HeapValueType { + // A single field element is enough to represent the value + Simple, + // The value read should be interpreted as a pointer to a heap array, which + // consists of a pointer to a slice of memory of size elements, and a + // reference count + Array { value_types: Vec, size: usize }, + // The value read should be interpreted as a pointer to a heap vector, which + // consists of a pointer to a slice of memory, a number of elements in that + // slice, and a reference count + Vector { value_types: Vec }, +} + +impl HeapValueType { + pub fn all_simple(types: &[HeapValueType]) -> bool { + types.iter().all(|typ| matches!(typ, HeapValueType::Simple)) + } +} + +/// A fixed-sized array starting from a Brillig memory location. #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Copy)] pub struct HeapArray { - pub pointer: RegisterIndex, + pub pointer: MemoryAddress, pub size: usize, } -/// A register-sized vector passed starting from a Brillig register memory location and with a register-held size +/// A memory-sized vector passed starting from a Brillig memory location and with a memory-held size #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Copy)] pub struct HeapVector { - pub pointer: RegisterIndex, - pub size: RegisterIndex, + pub pointer: MemoryAddress, + pub size: MemoryAddress, } /// Lays out various ways an external foreign call's input and output data may be interpreted inside Brillig. -/// This data can either be an individual register value or memory. +/// This data can either be an individual value or memory. /// /// While we are usually agnostic to how memory is passed within Brillig, /// this needs to be encoded somehow when dealing with an external system. /// For simplicity, the extra type information is given right in the ForeignCall instructions. #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Copy)] -pub enum RegisterOrMemory { - /// A single register value passed to or from an external call - /// It is an 'immediate' value - used without dereferencing memory. - /// For a foreign call input, the value is read directly from the register. - /// For a foreign call output, the value is written directly to the register. - RegisterIndex(RegisterIndex), +pub enum ValueOrArray { + /// A single value passed to or from an external call + /// It is an 'immediate' value - used without dereferencing. + /// For a foreign call input, the value is read directly from memory. + /// For a foreign call output, the value is written directly to memory. + MemoryAddress(MemoryAddress), /// An array passed to or from an external call /// In the case of a foreign call input, the array is read from this Brillig memory location + usize more cells. /// In the case of a foreign call output, the array is written to this Brillig memory location with the usize being here just as a sanity check for the size write. HeapArray(HeapArray), /// A vector passed to or from an external call - /// In the case of a foreign call input, the vector is read from this Brillig memory location + as many cells as the 2nd register indicates. - /// In the case of a foreign call output, the vector is written to this Brillig memory location and as 'size' cells, with size being stored in the second register. + /// In the case of a foreign call input, the vector is read from this Brillig memory location + as many cells as the 2nd address indicates. + /// In the case of a foreign call output, the vector is written to this Brillig memory location and as 'size' cells, with size being stored in the second address. HeapVector(HeapVector), } #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] pub enum BrilligOpcode { - /// Takes the fields in registers `lhs` and `rhs` + /// Takes the fields in addresses `lhs` and `rhs` /// Performs the specified binary operation - /// and stores the value in the `result` register. + /// and stores the value in the `result` address. BinaryFieldOp { - destination: RegisterIndex, + destination: MemoryAddress, op: BinaryFieldOp, - lhs: RegisterIndex, - rhs: RegisterIndex, + lhs: MemoryAddress, + rhs: MemoryAddress, }, - /// Takes the `bit_size` size integers in registers `lhs` and `rhs` + /// Takes the `bit_size` size integers in addresses `lhs` and `rhs` /// Performs the specified binary operation - /// and stores the value in the `result` register. + /// and stores the value in the `result` address. BinaryIntOp { - destination: RegisterIndex, + destination: MemoryAddress, op: BinaryIntOp, bit_size: u32, - lhs: RegisterIndex, - rhs: RegisterIndex, + lhs: MemoryAddress, + rhs: MemoryAddress, + }, + Cast { + destination: MemoryAddress, + source: MemoryAddress, + bit_size: u32, }, JumpIfNot { - condition: RegisterIndex, + condition: MemoryAddress, location: Label, }, /// Sets the program counter to the value located at `destination` /// If the value at `condition` is non-zero JumpIf { - condition: RegisterIndex, + condition: MemoryAddress, location: Label, }, /// Sets the program counter to the label. Jump { location: Label, }, + /// Copies calldata after the offset to the specified address and length + CalldataCopy { + destination_address: MemoryAddress, + size: usize, + offset: usize, + }, /// We don't support dynamic jumps or calls /// See https://github.com/ethereum/aleth/issues/3404 for reasoning Call { location: Label, }, Const { - destination: RegisterIndex, + destination: MemoryAddress, + bit_size: u32, value: Value, }, Return, @@ -109,28 +142,36 @@ pub enum BrilligOpcode { /// Interpreted by caller context, ie this will have different meanings depending on /// who the caller is. function: String, - /// Destination registers (may be single values or memory pointers). - destinations: Vec, - /// Input registers (may be single values or memory pointers). - inputs: Vec, + /// Destination addresses (may be single values or memory pointers). + destinations: Vec, + /// Destination value types + destination_value_types: Vec, + /// Input addresses (may be single values or memory pointers). + inputs: Vec, + /// Input value types (for heap allocated structures indicates how to + /// retrieve the elements) + input_value_types: Vec, }, Mov { - destination: RegisterIndex, - source: RegisterIndex, + destination: MemoryAddress, + source: MemoryAddress, }, Load { - destination: RegisterIndex, - source_pointer: RegisterIndex, + destination: MemoryAddress, + source_pointer: MemoryAddress, }, Store { - destination_pointer: RegisterIndex, - source: RegisterIndex, + destination_pointer: MemoryAddress, + source: MemoryAddress, }, BlackBox(BlackBoxOp), /// Used to denote execution failure Trap, - /// Stop execution - Stop, + /// Stop execution, returning data after the offset + Stop { + return_data_offset: usize, + return_data_size: usize, + }, } /// Binary fixed-length field expressions diff --git a/acvm-repo/brillig/src/value.rs b/acvm-repo/brillig/src/value.rs index 73a7d897eb7..5a532cbc1a7 100644 --- a/acvm-repo/brillig/src/value.rs +++ b/acvm-repo/brillig/src/value.rs @@ -37,8 +37,8 @@ impl Value { /// Panics: If `Value` cannot fit into a u64 or `Value` does //// not fit into a usize. pub fn to_usize(&self) -> usize { - usize::try_from(self.inner.try_to_u64().expect("register does not fit into u64")) - .expect("register does not fit into usize") + usize::try_from(self.inner.try_to_u64().expect("value does not fit into u64")) + .expect("value does not fit into usize") } } diff --git a/acvm-repo/brillig_vm/Cargo.toml b/acvm-repo/brillig_vm/Cargo.toml index 5a8a34be881..272e8389413 100644 --- a/acvm-repo/brillig_vm/Cargo.toml +++ b/acvm-repo/brillig_vm/Cargo.toml @@ -2,7 +2,7 @@ name = "brillig_vm" description = "The virtual machine that processes Brillig bytecode, used to introduce non-determinism to the ACVM" # x-release-please-start-version -version = "0.39.0" +version = "0.40.0" # x-release-please-end authors.workspace = true edition.workspace = true diff --git a/acvm-repo/brillig_vm/src/arithmetic.rs b/acvm-repo/brillig_vm/src/arithmetic.rs index 263a733e3c4..9d7b6fe8f02 100644 --- a/acvm-repo/brillig_vm/src/arithmetic.rs +++ b/acvm-repo/brillig_vm/src/arithmetic.rs @@ -36,18 +36,20 @@ pub(crate) fn evaluate_binary_bigint_op( BinaryIntOp::UnsignedDiv => { let b_mod = b % bit_modulo; if b_mod.is_zero() { - return Err("Division by zero".to_owned()); + BigUint::zero() + } else { + (a % bit_modulo) / b_mod } - (a % bit_modulo) / b_mod } // Perform signed division by first converting a and b to signed integers and then back to unsigned after the operation. BinaryIntOp::SignedDiv => { let b_signed = to_big_signed(b, bit_size); if b_signed.is_zero() { - return Err("Division by zero".to_owned()); + BigUint::zero() + } else { + let signed_div = to_big_signed(a, bit_size) / b_signed; + to_big_unsigned(signed_div, bit_size) } - let signed_div = to_big_signed(a, bit_size) / b_signed; - to_big_unsigned(signed_div, bit_size) } // Perform a == operation, returning 0 or 1 BinaryIntOp::Equals => { diff --git a/acvm-repo/brillig_vm/src/black_box.rs b/acvm-repo/brillig_vm/src/black_box.rs index e9c25200c47..73b57b907f3 100644 --- a/acvm-repo/brillig_vm/src/black_box.rs +++ b/acvm-repo/brillig_vm/src/black_box.rs @@ -2,26 +2,17 @@ use acir::brillig::{BlackBoxOp, HeapArray, HeapVector, Value}; use acir::{BlackBoxFunc, FieldElement}; use acvm_blackbox_solver::{ blake2s, blake3, ecdsa_secp256k1_verify, ecdsa_secp256r1_verify, keccak256, keccakf1600, - sha256, BlackBoxFunctionSolver, BlackBoxResolutionError, + sha256, sha256compression, BlackBoxFunctionSolver, BlackBoxResolutionError, }; -use crate::{Memory, Registers}; +use crate::Memory; -fn read_heap_vector<'a>( - memory: &'a Memory, - registers: &Registers, - vector: &HeapVector, -) -> &'a [Value] { - memory - .read_slice(registers.get(vector.pointer).to_usize(), registers.get(vector.size).to_usize()) +fn read_heap_vector<'a>(memory: &'a Memory, vector: &HeapVector) -> &'a [Value] { + memory.read_slice(memory.read_ref(vector.pointer), memory.read(vector.size).to_usize()) } -fn read_heap_array<'a>( - memory: &'a Memory, - registers: &Registers, - array: &HeapArray, -) -> &'a [Value] { - memory.read_slice(registers.get(array.pointer).to_usize(), array.size) +fn read_heap_array<'a>(memory: &'a Memory, array: &HeapArray) -> &'a [Value] { + memory.read_slice(memory.read_ref(array.pointer), array.size) } /// Extracts the last byte of every value @@ -42,36 +33,35 @@ fn to_value_vec(input: &[u8]) -> Vec { pub(crate) fn evaluate_black_box( op: &BlackBoxOp, solver: &Solver, - registers: &mut Registers, memory: &mut Memory, ) -> Result<(), BlackBoxResolutionError> { match op { BlackBoxOp::Sha256 { message, output } => { - let message = to_u8_vec(read_heap_vector(memory, registers, message)); + let message = to_u8_vec(read_heap_vector(memory, message)); let bytes = sha256(message.as_slice())?; - memory.write_slice(registers.get(output.pointer).to_usize(), &to_value_vec(&bytes)); + memory.write_slice(memory.read_ref(output.pointer), &to_value_vec(&bytes)); Ok(()) } BlackBoxOp::Blake2s { message, output } => { - let message = to_u8_vec(read_heap_vector(memory, registers, message)); + let message = to_u8_vec(read_heap_vector(memory, message)); let bytes = blake2s(message.as_slice())?; - memory.write_slice(registers.get(output.pointer).to_usize(), &to_value_vec(&bytes)); + memory.write_slice(memory.read_ref(output.pointer), &to_value_vec(&bytes)); Ok(()) } BlackBoxOp::Blake3 { message, output } => { - let message = to_u8_vec(read_heap_vector(memory, registers, message)); + let message = to_u8_vec(read_heap_vector(memory, message)); let bytes = blake3(message.as_slice())?; - memory.write_slice(registers.get(output.pointer).to_usize(), &to_value_vec(&bytes)); + memory.write_slice(memory.read_ref(output.pointer), &to_value_vec(&bytes)); Ok(()) } BlackBoxOp::Keccak256 { message, output } => { - let message = to_u8_vec(read_heap_vector(memory, registers, message)); + let message = to_u8_vec(read_heap_vector(memory, message)); let bytes = keccak256(message.as_slice())?; - memory.write_slice(registers.get(output.pointer).to_usize(), &to_value_vec(&bytes)); + memory.write_slice(memory.read_ref(output.pointer), &to_value_vec(&bytes)); Ok(()) } BlackBoxOp::Keccakf1600 { message, output } => { - let state_vec: Vec = read_heap_vector(memory, registers, message) + let state_vec: Vec = read_heap_vector(memory, message) .iter() .map(|value| value.to_field().try_to_u64().unwrap()) .collect(); @@ -81,7 +71,7 @@ pub(crate) fn evaluate_black_box( let new_state: Vec = new_state.into_iter().map(|x| Value::from(x as usize)).collect(); - memory.write_slice(registers.get(output.pointer).to_usize(), &new_state); + memory.write_slice(memory.read_ref(output.pointer), &new_state); Ok(()) } BlackBoxOp::EcdsaSecp256k1 { @@ -89,42 +79,37 @@ pub(crate) fn evaluate_black_box( public_key_x, public_key_y, signature, - result: result_register, + result: result_address, } | BlackBoxOp::EcdsaSecp256r1 { hashed_msg, public_key_x, public_key_y, signature, - result: result_register, + result: result_address, } => { let bb_func = black_box_function_from_op(op); - let public_key_x: [u8; 32] = to_u8_vec(read_heap_array( - memory, - registers, - public_key_x, - )) - .try_into() - .map_err(|_| { - BlackBoxResolutionError::Failed(bb_func, "Invalid public key x length".to_string()) - })?; - let public_key_y: [u8; 32] = to_u8_vec(read_heap_array( - memory, - registers, - public_key_y, - )) - .try_into() - .map_err(|_| { - BlackBoxResolutionError::Failed(bb_func, "Invalid public key y length".to_string()) - })?; - let signature: [u8; 64] = to_u8_vec(read_heap_array(memory, registers, signature)) - .try_into() - .map_err(|_| { + let public_key_x: [u8; 32] = + to_u8_vec(read_heap_array(memory, public_key_x)).try_into().map_err(|_| { + BlackBoxResolutionError::Failed( + bb_func, + "Invalid public key x length".to_string(), + ) + })?; + let public_key_y: [u8; 32] = + to_u8_vec(read_heap_array(memory, public_key_y)).try_into().map_err(|_| { + BlackBoxResolutionError::Failed( + bb_func, + "Invalid public key y length".to_string(), + ) + })?; + let signature: [u8; 64] = + to_u8_vec(read_heap_array(memory, signature)).try_into().map_err(|_| { BlackBoxResolutionError::Failed(bb_func, "Invalid signature length".to_string()) })?; - let hashed_msg = to_u8_vec(read_heap_vector(memory, registers, hashed_msg)); + let hashed_msg = to_u8_vec(read_heap_vector(memory, hashed_msg)); let result = match op { BlackBoxOp::EcdsaSecp256k1 { .. } => { @@ -136,71 +121,111 @@ pub(crate) fn evaluate_black_box( _ => unreachable!("`BlackBoxOp` is guarded against being a non-ecdsa operation"), }; - registers.set(*result_register, result.into()); + memory.write(*result_address, result.into()); Ok(()) } BlackBoxOp::SchnorrVerify { public_key_x, public_key_y, message, signature, result } => { - let public_key_x = registers.get(*public_key_x).to_field(); - let public_key_y = registers.get(*public_key_y).to_field(); - let message: Vec = to_u8_vec(read_heap_vector(memory, registers, message)); - let signature: Vec = to_u8_vec(read_heap_vector(memory, registers, signature)); + let public_key_x = memory.read(*public_key_x).to_field(); + let public_key_y = memory.read(*public_key_y).to_field(); + let message: Vec = to_u8_vec(read_heap_vector(memory, message)); + let signature: Vec = to_u8_vec(read_heap_vector(memory, signature)); let verified = solver.schnorr_verify(&public_key_x, &public_key_y, &signature, &message)?; - registers.set(*result, verified.into()); + memory.write(*result, verified.into()); Ok(()) } BlackBoxOp::FixedBaseScalarMul { low, high, result } => { - let low = registers.get(*low).to_field(); - let high = registers.get(*high).to_field(); + let low = memory.read(*low).to_field(); + let high = memory.read(*high).to_field(); let (x, y) = solver.fixed_base_scalar_mul(&low, &high)?; - memory.write_slice(registers.get(result.pointer).to_usize(), &[x.into(), y.into()]); + memory.write_slice(memory.read_ref(result.pointer), &[x.into(), y.into()]); Ok(()) } BlackBoxOp::EmbeddedCurveAdd { input1_x, input1_y, input2_x, input2_y, result } => { - let input1_x = registers.get(*input1_x).to_field(); - let input1_y = registers.get(*input1_y).to_field(); - let input2_x = registers.get(*input2_x).to_field(); - let input2_y = registers.get(*input2_y).to_field(); + let input1_x = memory.read(*input1_x).to_field(); + let input1_y = memory.read(*input1_y).to_field(); + let input2_x = memory.read(*input2_x).to_field(); + let input2_y = memory.read(*input2_y).to_field(); let (x, y) = solver.ec_add(&input1_x, &input1_y, &input2_x, &input2_y)?; - memory.write_slice(registers.get(result.pointer).to_usize(), &[x.into(), y.into()]); + memory.write_slice(memory.read_ref(result.pointer), &[x.into(), y.into()]); Ok(()) } BlackBoxOp::PedersenCommitment { inputs, domain_separator, output } => { let inputs: Vec = - read_heap_vector(memory, registers, inputs).iter().map(|x| x.to_field()).collect(); + read_heap_vector(memory, inputs).iter().map(|x| x.to_field()).collect(); let domain_separator: u32 = - registers.get(*domain_separator).to_u128().try_into().map_err(|_| { + memory.read(*domain_separator).to_u128().try_into().map_err(|_| { BlackBoxResolutionError::Failed( BlackBoxFunc::PedersenCommitment, "Invalid signature length".to_string(), ) })?; let (x, y) = solver.pedersen_commitment(&inputs, domain_separator)?; - memory.write_slice(registers.get(output.pointer).to_usize(), &[x.into(), y.into()]); + memory.write_slice(memory.read_ref(output.pointer), &[x.into(), y.into()]); Ok(()) } BlackBoxOp::PedersenHash { inputs, domain_separator, output } => { let inputs: Vec = - read_heap_vector(memory, registers, inputs).iter().map(|x| x.to_field()).collect(); + read_heap_vector(memory, inputs).iter().map(|x| x.to_field()).collect(); let domain_separator: u32 = - registers.get(*domain_separator).to_u128().try_into().map_err(|_| { + memory.read(*domain_separator).to_u128().try_into().map_err(|_| { BlackBoxResolutionError::Failed( BlackBoxFunc::PedersenCommitment, "Invalid signature length".to_string(), ) })?; let hash = solver.pedersen_hash(&inputs, domain_separator)?; - registers.set(*output, hash.into()); + memory.write(*output, hash.into()); Ok(()) } BlackBoxOp::BigIntAdd { .. } => todo!(), - BlackBoxOp::BigIntNeg { .. } => todo!(), + BlackBoxOp::BigIntSub { .. } => todo!(), BlackBoxOp::BigIntMul { .. } => todo!(), BlackBoxOp::BigIntDiv { .. } => todo!(), BlackBoxOp::BigIntFromLeBytes { .. } => todo!(), BlackBoxOp::BigIntToLeBytes { .. } => todo!(), - BlackBoxOp::Poseidon2Permutation { .. } => todo!(), - BlackBoxOp::Sha256Compression { .. } => todo!(), + BlackBoxOp::Poseidon2Permutation { message, output, len } => { + let input = read_heap_vector(memory, message); + let input: Vec = input.iter().map(|x| x.to_field()).collect(); + let len = memory.read(*len).to_u128() as u32; + let result = solver.poseidon2_permutation(&input, len)?; + let mut values = Vec::new(); + for i in result { + values.push(Value::from(i)); + } + memory.write_slice(memory.read_ref(output.pointer), &values); + Ok(()) + } + BlackBoxOp::Sha256Compression { input, hash_values, output } => { + let mut message = [0; 16]; + let inputs = read_heap_vector(memory, input); + if inputs.len() != 16 { + return Err(BlackBoxResolutionError::Failed( + BlackBoxFunc::Sha256Compression, + format!("Expected 16 inputs but encountered {}", &inputs.len()), + )); + } + for (i, input) in inputs.iter().enumerate() { + message[i] = input.to_u128() as u32; + } + let mut state = [0; 8]; + let values = read_heap_vector(memory, hash_values); + if values.len() != 8 { + return Err(BlackBoxResolutionError::Failed( + BlackBoxFunc::Sha256Compression, + format!("Expected 8 values but encountered {}", &values.len()), + )); + } + for (i, value) in values.iter().enumerate() { + state[i] = value.to_u128() as u32; + } + + sha256compression(&mut state, &message); + let state = state.map(|x| Value::from(x as u128)); + + memory.write_slice(memory.read_ref(output.pointer), &state); + Ok(()) + } } } @@ -219,7 +244,7 @@ fn black_box_function_from_op(op: &BlackBoxOp) -> BlackBoxFunc { BlackBoxOp::FixedBaseScalarMul { .. } => BlackBoxFunc::FixedBaseScalarMul, BlackBoxOp::EmbeddedCurveAdd { .. } => BlackBoxFunc::EmbeddedCurveAdd, BlackBoxOp::BigIntAdd { .. } => BlackBoxFunc::BigIntAdd, - BlackBoxOp::BigIntNeg { .. } => BlackBoxFunc::BigIntNeg, + BlackBoxOp::BigIntSub { .. } => BlackBoxFunc::BigIntSub, BlackBoxOp::BigIntMul { .. } => BlackBoxFunc::BigIntMul, BlackBoxOp::BigIntDiv { .. } => BlackBoxFunc::BigIntDiv, BlackBoxOp::BigIntFromLeBytes { .. } => BlackBoxFunc::BigIntFromLeBytes, @@ -231,11 +256,11 @@ fn black_box_function_from_op(op: &BlackBoxOp) -> BlackBoxFunc { #[cfg(test)] mod test { - use acir::brillig::BlackBoxOp; + use acir::brillig::{BlackBoxOp, MemoryAddress}; use crate::{ black_box::{evaluate_black_box, to_u8_vec, to_value_vec}, - DummyBlackBoxSolver, HeapArray, HeapVector, Memory, Registers, Value, + DummyBlackBoxSolver, HeapArray, HeapVector, Memory, }; #[test] @@ -243,27 +268,22 @@ mod test { let message: Vec = b"hello world".to_vec(); let message_length = message.len(); - let mut memory = Memory::from(vec![]); - let message_pointer = 0; + let mut memory = Memory::default(); + let message_pointer = 3; let result_pointer = message_pointer + message_length; - memory.write_slice(message_pointer, to_value_vec(&message).as_slice()); - - let mut registers = Registers { - inner: vec![ - Value::from(message_pointer), - Value::from(message_length), - Value::from(result_pointer), - ], - }; + memory.write(MemoryAddress(0), message_pointer.into()); + memory.write(MemoryAddress(1), message_length.into()); + memory.write(MemoryAddress(2), result_pointer.into()); + memory.write_slice(MemoryAddress(message_pointer), to_value_vec(&message).as_slice()); let op = BlackBoxOp::Sha256 { message: HeapVector { pointer: 0.into(), size: 1.into() }, output: HeapArray { pointer: 2.into(), size: 32 }, }; - evaluate_black_box(&op, &DummyBlackBoxSolver, &mut registers, &mut memory).unwrap(); + evaluate_black_box(&op, &DummyBlackBoxSolver, &mut memory).unwrap(); - let result = memory.read_slice(result_pointer, 32); + let result = memory.read_slice(MemoryAddress(result_pointer), 32); assert_eq!( to_u8_vec(result), diff --git a/acvm-repo/brillig_vm/src/lib.rs b/acvm-repo/brillig_vm/src/lib.rs index 0c258ab34d6..c7bf014f068 100644 --- a/acvm-repo/brillig_vm/src/lib.rs +++ b/acvm-repo/brillig_vm/src/lib.rs @@ -12,8 +12,8 @@ //! [acvm]: https://crates.io/crates/acvm use acir::brillig::{ - BinaryFieldOp, BinaryIntOp, ForeignCallParam, ForeignCallResult, HeapArray, HeapVector, Opcode, - RegisterIndex, RegisterOrMemory, Value, + BinaryFieldOp, BinaryIntOp, ForeignCallParam, ForeignCallResult, HeapArray, HeapValueType, + HeapVector, MemoryAddress, Opcode, Value, ValueOrArray, }; use acir::FieldElement; // Re-export `brillig`. @@ -22,7 +22,6 @@ pub use acir::brillig; mod arithmetic; mod black_box; mod memory; -mod registers; use acvm_blackbox_solver::{BlackBoxFunctionSolver, BlackBoxResolutionError}; use arithmetic::{evaluate_binary_bigint_op, evaluate_binary_field_op}; @@ -30,14 +29,16 @@ use black_box::evaluate_black_box; pub use memory::Memory; use num_bigint::BigUint; -pub use registers::Registers; /// The error call stack contains the opcode indexes of the call stack at the time of failure, plus the index of the opcode that failed. pub type ErrorCallStack = Vec; #[derive(Debug, PartialEq, Eq, Clone)] pub enum VMStatus { - Finished, + Finished { + return_data_offset: usize, + return_data_size: usize, + }, InProgress, Failure { message: String, @@ -61,8 +62,8 @@ pub enum VMStatus { #[derive(Debug, PartialEq, Eq, Clone)] /// VM encapsulates the state of the Brillig VM during execution. pub struct VM<'a, B: BlackBoxFunctionSolver> { - /// Register storage - registers: Registers, + /// Calldata to the brillig function + calldata: Vec, /// Instruction pointer program_counter: usize, /// A counter maintained throughout a Brillig process that determines @@ -86,20 +87,19 @@ pub struct VM<'a, B: BlackBoxFunctionSolver> { impl<'a, B: BlackBoxFunctionSolver> VM<'a, B> { /// Constructs a new VM instance pub fn new( - inputs: Registers, - memory: Vec, + calldata: Vec, bytecode: &'a [Opcode], foreign_call_results: Vec, black_box_solver: &'a B, ) -> Self { Self { - registers: inputs, + calldata, program_counter: 0, foreign_call_counter: 0, foreign_call_results, bytecode, status: VMStatus::InProgress, - memory: memory.into(), + memory: Memory::default(), call_stack: Vec::new(), black_box_solver, } @@ -117,8 +117,8 @@ impl<'a, B: BlackBoxFunctionSolver> VM<'a, B> { } /// Sets the current status of the VM to Finished (completed execution). - fn finish(&mut self) -> VMStatus { - self.status(VMStatus::Finished) + fn finish(&mut self, return_data_offset: usize, return_data_size: usize) -> VMStatus { + self.status(VMStatus::Finished { return_data_offset, return_data_size }) } /// Sets the status of the VM to `ForeignCallWait`. @@ -154,26 +154,27 @@ impl<'a, B: BlackBoxFunctionSolver> VM<'a, B> { pub fn process_opcodes(&mut self) -> VMStatus { while !matches!( self.process_opcode(), - VMStatus::Finished | VMStatus::Failure { .. } | VMStatus::ForeignCallWait { .. } + VMStatus::Finished { .. } | VMStatus::Failure { .. } | VMStatus::ForeignCallWait { .. } ) {} self.status.clone() } - /// Returns all of the registers in the VM. - pub fn get_registers(&self) -> &Registers { - &self.registers - } - - pub fn set_register(&mut self, register_index: RegisterIndex, value: Value) { - self.registers.set(register_index, value); - } - pub fn get_memory(&self) -> &[Value] { self.memory.values() } pub fn write_memory_at(&mut self, ptr: usize, value: Value) { - self.memory.write(ptr, value); + self.memory.write(MemoryAddress(ptr), value); + } + + /// Returns the VM's current call stack, including the actual program + /// counter in the last position of the returned vector. + pub fn get_call_stack(&self) -> Vec { + self.call_stack + .iter() + .map(|program_counter| program_counter.to_usize()) + .chain(std::iter::once(self.program_counter)) + .collect() } /// Process a single opcode and modify the program counter. @@ -192,31 +193,51 @@ impl<'a, B: BlackBoxFunctionSolver> VM<'a, B> { self.increment_program_counter() } } + Opcode::Cast { destination: destination_address, source: source_address, bit_size } => { + let source_value = self.memory.read(*source_address); + let casted_value = self.cast(*bit_size, source_value); + self.memory.write(*destination_address, casted_value); + self.increment_program_counter() + } Opcode::Jump { location: destination } => self.set_program_counter(*destination), Opcode::JumpIf { condition, location: destination } => { // Check if condition is true // We use 0 to mean false and any other value to mean true - let condition_value = self.registers.get(*condition); + let condition_value = self.memory.read(*condition); if !condition_value.is_zero() { return self.set_program_counter(*destination); } self.increment_program_counter() } Opcode::JumpIfNot { condition, location: destination } => { - let condition_value = self.registers.get(*condition); + let condition_value = self.memory.read(*condition); if condition_value.is_zero() { return self.set_program_counter(*destination); } self.increment_program_counter() } + Opcode::CalldataCopy { destination_address, size, offset } => { + let values = &self.calldata[*offset..(*offset + size)]; + self.memory.write_slice(*destination_address, values); + self.increment_program_counter() + } Opcode::Return => { - if let Some(register) = self.call_stack.pop() { - self.set_program_counter(register.to_usize() + 1) + if let Some(return_location) = self.call_stack.pop() { + self.set_program_counter(return_location.to_usize() + 1) } else { self.fail("return opcode hit, but callstack already empty".to_string()) } } - Opcode::ForeignCall { function, destinations, inputs } => { + Opcode::ForeignCall { + function, + destinations, + destination_value_types, + inputs, + input_value_types, + } => { + assert!(inputs.len() == input_value_types.len()); + assert!(destinations.len() == destination_value_types.len()); + if self.foreign_call_counter >= self.foreign_call_results.len() { // When this opcode is called, it is possible that the results of a foreign call are // not yet known (not enough entries in `foreign_call_results`). @@ -227,7 +248,8 @@ impl<'a, B: BlackBoxFunctionSolver> VM<'a, B> { // but has the necessary results to proceed with execution. let resolved_inputs = inputs .iter() - .map(|input| self.get_register_value_or_memory_values(*input)) + .zip(input_value_types) + .map(|(input, input_type)| self.get_memory_values(*input, input_type)) .collect::>(); return self.wait_for_foreign_call(function.clone(), resolved_inputs); } @@ -235,48 +257,69 @@ impl<'a, B: BlackBoxFunctionSolver> VM<'a, B> { let values = &self.foreign_call_results[self.foreign_call_counter].values; let mut invalid_foreign_call_result = false; - for (destination, output) in destinations.iter().zip(values) { - match destination { - RegisterOrMemory::RegisterIndex(value_index) => match output { - ForeignCallParam::Single(value) => { - self.registers.set(*value_index, *value); - } - _ => unreachable!( - "Function result size does not match brillig bytecode (expected 1 result)" - ), - }, - RegisterOrMemory::HeapArray(HeapArray { pointer: pointer_index, size }) => { + for ((destination, value_type), output) in + destinations.iter().zip(destination_value_types).zip(values) + { + match (destination, value_type) { + (ValueOrArray::MemoryAddress(value_index), HeapValueType::Simple) => { match output { - ForeignCallParam::Array(values) => { - if values.len() != *size { - invalid_foreign_call_result = true; - break; - } - // Convert the destination pointer to a usize - let destination = self.registers.get(*pointer_index).to_usize(); - // Write to our destination memory - self.memory.write_slice(destination, values); - } - _ => { - unreachable!("Function result size does not match brillig bytecode size") + ForeignCallParam::Single(value) => { + self.memory.write(*value_index, *value); } + _ => unreachable!( + "Function result size does not match brillig bytecode. Expected 1 result but got {output:?}" + ), } } - RegisterOrMemory::HeapVector(HeapVector { pointer: pointer_index, size: size_index }) => { - match output { - ForeignCallParam::Array(values) => { - // Set our size in the size register - self.registers.set(*size_index, Value::from(values.len())); - // Convert the destination pointer to a usize - let destination = self.registers.get(*pointer_index).to_usize(); - // Write to our destination memory - self.memory.write_slice(destination, values); + ( + ValueOrArray::HeapArray(HeapArray { pointer: pointer_index, size }), + HeapValueType::Array { value_types, size: type_size }, + ) if size == type_size => { + if HeapValueType::all_simple(value_types) { + match output { + ForeignCallParam::Array(values) => { + if values.len() != *size { + invalid_foreign_call_result = true; + break; + } + // Convert the destination pointer to a usize + let destination = self.memory.read_ref(*pointer_index); + // Write to our destination memory + self.memory.write_slice(destination, values); + } + _ => { + unreachable!("Function result size does not match brillig bytecode size") + } } - _ => { - unreachable!("Function result size does not match brillig bytecode size") + } else { + unimplemented!("deflattening heap arrays from foreign calls"); + } + } + ( + ValueOrArray::HeapVector(HeapVector {pointer: pointer_index, size: size_index }), + HeapValueType::Vector { value_types }, + ) => { + if HeapValueType::all_simple(value_types) { + match output { + ForeignCallParam::Array(values) => { + // Set our size in the size address + self.memory.write(*size_index, Value::from(values.len())); + // Convert the destination pointer to a usize + let destination = self.memory.read_ref(*pointer_index); + // Write to our destination memory + self.memory.write_slice(destination, values); + } + _ => { + unreachable!("Function result size does not match brillig bytecode size") + } } + } else { + unimplemented!("deflattening heap vectors from foreign calls"); } } + _ => { + unreachable!("Unexpected value type {value_type:?} for destination {destination:?}"); + } } } @@ -291,26 +334,28 @@ impl<'a, B: BlackBoxFunctionSolver> VM<'a, B> { self.foreign_call_counter += 1; self.increment_program_counter() } - Opcode::Mov { destination: destination_register, source: source_register } => { - let source_value = self.registers.get(*source_register); - self.registers.set(*destination_register, source_value); + Opcode::Mov { destination: destination_address, source: source_address } => { + let source_value = self.memory.read(*source_address); + self.memory.write(*destination_address, source_value); self.increment_program_counter() } Opcode::Trap => self.fail("explicit trap hit in brillig".to_string()), - Opcode::Stop => self.finish(), - Opcode::Load { destination: destination_register, source_pointer } => { - // Convert our source_pointer to a usize - let source = self.registers.get(*source_pointer); + Opcode::Stop { return_data_offset, return_data_size } => { + self.finish(*return_data_offset, *return_data_size) + } + Opcode::Load { destination: destination_address, source_pointer } => { + // Convert our source_pointer to an address + let source = self.memory.read_ref(*source_pointer); // Use our usize source index to lookup the value in memory - let value = &self.memory.read(source.to_usize()); - self.registers.set(*destination_register, *value); + let value = &self.memory.read(source); + self.memory.write(*destination_address, *value); self.increment_program_counter() } - Opcode::Store { destination_pointer, source: source_register } => { - // Convert our destination_pointer to a usize - let destination = self.registers.get(*destination_pointer).to_usize(); + Opcode::Store { destination_pointer, source: source_address } => { + // Convert our destination_pointer to an address + let destination = self.memory.read_ref(*destination_pointer); // Use our usize destination index to set the value in memory - self.memory.write(destination, self.registers.get(*source_register)); + self.memory.write(destination, self.memory.read(*source_address)); self.increment_program_counter() } Opcode::Call { location } => { @@ -318,17 +363,12 @@ impl<'a, B: BlackBoxFunctionSolver> VM<'a, B> { self.call_stack.push(Value::from(self.program_counter)); self.set_program_counter(*location) } - Opcode::Const { destination, value } => { - self.registers.set(*destination, *value); + Opcode::Const { destination, value, bit_size: _ } => { + self.memory.write(*destination, *value); self.increment_program_counter() } Opcode::BlackBox(black_box_op) => { - match evaluate_black_box( - black_box_op, - self.black_box_solver, - &mut self.registers, - &mut self.memory, - ) { + match evaluate_black_box(black_box_op, self.black_box_solver, &mut self.memory) { Ok(()) => self.increment_program_counter(), Err(e) => self.fail(e.to_string()), } @@ -353,26 +393,87 @@ impl<'a, B: BlackBoxFunctionSolver> VM<'a, B> { assert!(self.program_counter < self.bytecode.len()); self.program_counter = value; if self.program_counter >= self.bytecode.len() { - self.status = VMStatus::Finished; + self.status = VMStatus::Finished { return_data_offset: 0, return_data_size: 0 }; } self.status.clone() } - fn get_register_value_or_memory_values(&self, input: RegisterOrMemory) -> ForeignCallParam { - match input { - RegisterOrMemory::RegisterIndex(value_index) => self.registers.get(value_index).into(), - RegisterOrMemory::HeapArray(HeapArray { pointer: pointer_index, size }) => { - let start = self.registers.get(pointer_index); - self.memory.read_slice(start.to_usize(), size).to_vec().into() + fn get_memory_values( + &self, + input: ValueOrArray, + value_type: &HeapValueType, + ) -> ForeignCallParam { + match (input, value_type) { + (ValueOrArray::MemoryAddress(value_index), HeapValueType::Simple) => { + self.memory.read(value_index).into() + } + ( + ValueOrArray::HeapArray(HeapArray { pointer: pointer_index, size }), + HeapValueType::Array { value_types, size: type_size }, + ) if *type_size == size => { + let start = self.memory.read_ref(pointer_index); + self.read_slice_of_values_from_memory(start, size, value_types).into() } - RegisterOrMemory::HeapVector(HeapVector { - pointer: pointer_index, - size: size_index, - }) => { - let start = self.registers.get(pointer_index); - let size = self.registers.get(size_index); - self.memory.read_slice(start.to_usize(), size.to_usize()).to_vec().into() + ( + ValueOrArray::HeapVector(HeapVector { pointer: pointer_index, size: size_index }), + HeapValueType::Vector { value_types }, + ) => { + let start = self.memory.read_ref(pointer_index); + let size = self.memory.read(size_index).to_usize(); + self.read_slice_of_values_from_memory(start, size, value_types).into() } + _ => { + unreachable!("Unexpected value type {value_type:?} for input {input:?}"); + } + } + } + + /// Reads an array/vector from memory but recursively reads pointers to + /// nested arrays/vectors according to the sequence of value types. + fn read_slice_of_values_from_memory( + &self, + start: MemoryAddress, + size: usize, + value_types: &[HeapValueType], + ) -> Vec { + if HeapValueType::all_simple(value_types) { + self.memory.read_slice(start, size).to_vec() + } else { + // Check that the sequence of value types fit an integer number of + // times inside the given size. + assert!( + 0 == size % value_types.len(), + "array/vector does not contain a whole number of elements" + ); + (0..size) + .zip(value_types.iter().cycle()) + .flat_map(|(i, value_type)| { + let value_address: MemoryAddress = (start.to_usize() + i).into(); + match value_type { + HeapValueType::Simple => { + let value = self.memory.read(value_address); + vec![value] + } + HeapValueType::Array { value_types, size } => { + let array_address = self.memory.read_ref(value_address); + let array_start = self.memory.read_ref(array_address); + self.read_slice_of_values_from_memory(array_start, *size, value_types) + } + HeapValueType::Vector { value_types } => { + let vector_address = self.memory.read_ref(value_address); + let vector_start = self.memory.read_ref(vector_address); + let size_address: MemoryAddress = + (vector_address.to_usize() + 1).into(); + let vector_size = self.memory.read(size_address).to_usize(); + self.read_slice_of_values_from_memory( + vector_start, + vector_size, + value_types, + ) + } + } + }) + .collect::>() } } @@ -381,17 +482,17 @@ impl<'a, B: BlackBoxFunctionSolver> VM<'a, B> { fn process_binary_field_op( &mut self, op: BinaryFieldOp, - lhs: RegisterIndex, - rhs: RegisterIndex, - result: RegisterIndex, + lhs: MemoryAddress, + rhs: MemoryAddress, + result: MemoryAddress, ) { - let lhs_value = self.registers.get(lhs); - let rhs_value = self.registers.get(rhs); + let lhs_value = self.memory.read(lhs); + let rhs_value = self.memory.read(rhs); let result_value = evaluate_binary_field_op(&op, lhs_value.to_field(), rhs_value.to_field()); - self.registers.set(result, result_value.into()); + self.memory.write(result, result_value.into()); } /// Process a binary operation. @@ -400,22 +501,29 @@ impl<'a, B: BlackBoxFunctionSolver> VM<'a, B> { &mut self, op: BinaryIntOp, bit_size: u32, - lhs: RegisterIndex, - rhs: RegisterIndex, - result: RegisterIndex, + lhs: MemoryAddress, + rhs: MemoryAddress, + result: MemoryAddress, ) -> Result<(), String> { - let lhs_value = self.registers.get(lhs); - let rhs_value = self.registers.get(rhs); + let lhs_value = self.memory.read(lhs); + let rhs_value = self.memory.read(rhs); // Convert to big integers let lhs_big = BigUint::from_bytes_be(&lhs_value.to_field().to_be_bytes()); let rhs_big = BigUint::from_bytes_be(&rhs_value.to_field().to_be_bytes()); let result_value = evaluate_binary_bigint_op(&op, lhs_big, rhs_big, bit_size)?; // Convert back to field element - self.registers - .set(result, FieldElement::from_be_bytes_reduce(&result_value.to_bytes_be()).into()); + self.memory + .write(result, FieldElement::from_be_bytes_reduce(&result_value.to_bytes_be()).into()); Ok(()) } + + /// Casts a value to a different bit size. + fn cast(&self, bit_size: u32, value: Value) -> Value { + let lhs_big = BigUint::from_bytes_be(&value.to_field().to_be_bytes()); + let mask = BigUint::from(2_u32).pow(bit_size) - 1_u32; + FieldElement::from_be_bytes_reduce(&(lhs_big & mask).to_bytes_be()).into() + } } pub(crate) struct DummyBlackBoxSolver; @@ -460,6 +568,13 @@ impl BlackBoxFunctionSolver for DummyBlackBoxSolver { ) -> Result<(FieldElement, FieldElement), BlackBoxResolutionError> { Ok((5_u128.into(), 6_u128.into())) } + fn poseidon2_permutation( + &self, + _input: &[FieldElement], + len: u32, + ) -> Result, BlackBoxResolutionError> { + Ok(vec![0_u128.into(); len as usize]) + } } #[cfg(test)] @@ -468,111 +583,123 @@ mod tests { #[test] fn add_single_step_smoke() { - // Load values into registers and initialize the registers that - // will be used during bytecode processing - let input_registers = - Registers::load(vec![Value::from(1u128), Value::from(2u128), Value::from(0u128)]); - - // Add opcode to add the value in register `0` and `1` - // and place the output in register `2` - let opcode = Opcode::BinaryIntOp { - op: BinaryIntOp::Add, - bit_size: 2, - lhs: RegisterIndex::from(0), - rhs: RegisterIndex::from(1), - destination: RegisterIndex::from(2), + let calldata = vec![Value::from(27u128)]; + + // Add opcode to add the value in address `0` and `1` + // and place the output in address `2` + let calldata_copy = Opcode::CalldataCopy { + destination_address: MemoryAddress::from(0), + size: 1, + offset: 0, }; // Start VM - let opcodes = [opcode]; - let mut vm = VM::new(input_registers, vec![], &opcodes, vec![], &DummyBlackBoxSolver); + let opcodes = [calldata_copy]; + let mut vm = VM::new(calldata, &opcodes, vec![], &DummyBlackBoxSolver); // Process a single VM opcode // // After processing a single opcode, we should have // the vm status as finished since there is only one opcode let status = vm.process_opcode(); - assert_eq!(status, VMStatus::Finished); + assert_eq!(status, VMStatus::Finished { return_data_offset: 0, return_data_size: 0 }); - // The register at index `2` should have the value of 3 since we had an + // The address at index `2` should have the value of 3 since we had an // add opcode - let VM { registers, .. } = vm; - let output_value = registers.get(RegisterIndex::from(2)); + let VM { memory, .. } = vm; + let output_value = memory.read(MemoryAddress::from(0)); - assert_eq!(output_value, Value::from(3u128)); + assert_eq!(output_value, Value::from(27u128)); } #[test] fn jmpif_opcode() { - let mut registers = vec![]; + let mut calldata = vec![]; let mut opcodes = vec![]; let lhs = { - registers.push(Value::from(2u128)); - RegisterIndex::from(registers.len() - 1) + calldata.push(Value::from(2u128)); + MemoryAddress::from(calldata.len() - 1) }; let rhs = { - registers.push(Value::from(2u128)); - RegisterIndex::from(registers.len() - 1) + calldata.push(Value::from(2u128)); + MemoryAddress::from(calldata.len() - 1) }; - let destination = { - registers.push(Value::from(0u128)); - RegisterIndex::from(registers.len() - 1) - }; + let destination = MemoryAddress::from(calldata.len()); + opcodes.push(Opcode::CalldataCopy { + destination_address: MemoryAddress::from(0), + size: 2, + offset: 0, + }); let equal_cmp_opcode = Opcode::BinaryIntOp { op: BinaryIntOp::Equals, bit_size: 1, lhs, rhs, destination }; opcodes.push(equal_cmp_opcode); - opcodes.push(Opcode::Jump { location: 2 }); - opcodes.push(Opcode::JumpIf { condition: RegisterIndex::from(2), location: 3 }); + opcodes.push(Opcode::Jump { location: 3 }); + opcodes.push(Opcode::JumpIf { condition: MemoryAddress::from(2), location: 4 }); - let mut vm = - VM::new(Registers::load(registers), vec![], &opcodes, vec![], &DummyBlackBoxSolver); + let mut vm = VM::new(calldata, &opcodes, vec![], &DummyBlackBoxSolver); + + let status = vm.process_opcode(); + assert_eq!(status, VMStatus::InProgress); let status = vm.process_opcode(); assert_eq!(status, VMStatus::InProgress); - let output_cmp_value = vm.registers.get(RegisterIndex::from(2)); + let output_cmp_value = vm.memory.read(MemoryAddress::from(2)); assert_eq!(output_cmp_value, Value::from(true)); let status = vm.process_opcode(); assert_eq!(status, VMStatus::InProgress); let status = vm.process_opcode(); - assert_eq!(status, VMStatus::Finished); + assert_eq!(status, VMStatus::Finished { return_data_offset: 0, return_data_size: 0 }); } #[test] fn jmpifnot_opcode() { - let input_registers = - Registers::load(vec![Value::from(1u128), Value::from(2u128), Value::from(0u128)]); + let calldata = vec![Value::from(1u128), Value::from(2u128)]; + + let calldata_copy = Opcode::CalldataCopy { + destination_address: MemoryAddress::from(0), + size: 2, + offset: 0, + }; + + let jump_opcode = Opcode::Jump { location: 3 }; let trap_opcode = Opcode::Trap; let not_equal_cmp_opcode = Opcode::BinaryFieldOp { op: BinaryFieldOp::Equals, - lhs: RegisterIndex::from(0), - rhs: RegisterIndex::from(1), - destination: RegisterIndex::from(2), + lhs: MemoryAddress::from(0), + rhs: MemoryAddress::from(1), + destination: MemoryAddress::from(2), }; - let jump_opcode = Opcode::Jump { location: 2 }; - let jump_if_not_opcode = - Opcode::JumpIfNot { condition: RegisterIndex::from(2), location: 1 }; + Opcode::JumpIfNot { condition: MemoryAddress::from(2), location: 2 }; let add_opcode = Opcode::BinaryFieldOp { op: BinaryFieldOp::Add, - lhs: RegisterIndex::from(0), - rhs: RegisterIndex::from(1), - destination: RegisterIndex::from(2), + lhs: MemoryAddress::from(0), + rhs: MemoryAddress::from(1), + destination: MemoryAddress::from(2), }; - let opcodes = - [jump_opcode, trap_opcode, not_equal_cmp_opcode, jump_if_not_opcode, add_opcode]; - let mut vm = VM::new(input_registers, vec![], &opcodes, vec![], &DummyBlackBoxSolver); + let opcodes = [ + calldata_copy, + jump_opcode, + trap_opcode, + not_equal_cmp_opcode, + jump_if_not_opcode, + add_opcode, + ]; + let mut vm = VM::new(calldata, &opcodes, vec![], &DummyBlackBoxSolver); + let status = vm.process_opcode(); + assert_eq!(status, VMStatus::InProgress); let status = vm.process_opcode(); assert_eq!(status, VMStatus::InProgress); @@ -580,7 +707,7 @@ mod tests { let status = vm.process_opcode(); assert_eq!(status, VMStatus::InProgress); - let output_cmp_value = vm.registers.get(RegisterIndex::from(2)); + let output_cmp_value = vm.memory.read(MemoryAddress::from(2)); assert_eq!(output_cmp_value, Value::from(false)); let status = vm.process_opcode(); @@ -591,107 +718,164 @@ mod tests { status, VMStatus::Failure { message: "explicit trap hit in brillig".to_string(), - call_stack: vec![1] + call_stack: vec![2] } ); - // The register at index `2` should have not changed as we jumped over the add opcode - let VM { registers, .. } = vm; - let output_value = registers.get(RegisterIndex::from(2)); + // The address at index `2` should have not changed as we jumped over the add opcode + let VM { memory, .. } = vm; + let output_value = memory.read(MemoryAddress::from(2)); assert_eq!(output_value, Value::from(false)); } + #[test] + fn cast_opcode() { + let calldata = vec![Value::from((2_u128.pow(32)) - 1)]; + + let opcodes = &[ + Opcode::CalldataCopy { + destination_address: MemoryAddress::from(0), + size: 1, + offset: 0, + }, + Opcode::Cast { + destination: MemoryAddress::from(1), + source: MemoryAddress::from(0), + bit_size: 8, + }, + Opcode::Stop { return_data_offset: 1, return_data_size: 1 }, + ]; + let mut vm = VM::new(calldata, opcodes, vec![], &DummyBlackBoxSolver); + + let status = vm.process_opcode(); + assert_eq!(status, VMStatus::InProgress); + + let status = vm.process_opcode(); + assert_eq!(status, VMStatus::InProgress); + + let status = vm.process_opcode(); + assert_eq!(status, VMStatus::Finished { return_data_offset: 1, return_data_size: 1 }); + + let VM { memory, .. } = vm; + + let casted_value = memory.read(MemoryAddress::from(1)); + assert_eq!(casted_value, Value::from(2_u128.pow(8) - 1)); + } + #[test] fn mov_opcode() { - let input_registers = - Registers::load(vec![Value::from(1u128), Value::from(2u128), Value::from(3u128)]); + let calldata = vec![Value::from(1u128), Value::from(2u128), Value::from(3u128)]; + + let calldata_copy = Opcode::CalldataCopy { + destination_address: MemoryAddress::from(0), + size: 3, + offset: 0, + }; let mov_opcode = - Opcode::Mov { destination: RegisterIndex::from(2), source: RegisterIndex::from(0) }; + Opcode::Mov { destination: MemoryAddress::from(2), source: MemoryAddress::from(0) }; - let opcodes = &[mov_opcode]; - let mut vm = VM::new(input_registers, vec![], opcodes, vec![], &DummyBlackBoxSolver); + let opcodes = &[calldata_copy, mov_opcode]; + let mut vm = VM::new(calldata, opcodes, vec![], &DummyBlackBoxSolver); let status = vm.process_opcode(); - assert_eq!(status, VMStatus::Finished); + assert_eq!(status, VMStatus::InProgress); - let VM { registers, .. } = vm; + let status = vm.process_opcode(); + assert_eq!(status, VMStatus::Finished { return_data_offset: 0, return_data_size: 0 }); - let destination_value = registers.get(RegisterIndex::from(2)); + let VM { memory, .. } = vm; + + let destination_value = memory.read(MemoryAddress::from(2)); assert_eq!(destination_value, Value::from(1u128)); - let source_value = registers.get(RegisterIndex::from(0)); + let source_value = memory.read(MemoryAddress::from(0)); assert_eq!(source_value, Value::from(1u128)); } #[test] fn cmp_binary_ops() { let bit_size = 32; - let input_registers = Registers::load(vec![ + let calldata = vec![ Value::from(2u128), Value::from(2u128), Value::from(0u128), Value::from(5u128), Value::from(6u128), - ]); + ]; + + let calldata_copy = Opcode::CalldataCopy { + destination_address: MemoryAddress::from(0), + size: 5, + offset: 0, + }; let equal_opcode = Opcode::BinaryIntOp { bit_size, op: BinaryIntOp::Equals, - lhs: RegisterIndex::from(0), - rhs: RegisterIndex::from(1), - destination: RegisterIndex::from(2), + lhs: MemoryAddress::from(0), + rhs: MemoryAddress::from(1), + destination: MemoryAddress::from(2), }; let not_equal_opcode = Opcode::BinaryIntOp { bit_size, op: BinaryIntOp::Equals, - lhs: RegisterIndex::from(0), - rhs: RegisterIndex::from(3), - destination: RegisterIndex::from(2), + lhs: MemoryAddress::from(0), + rhs: MemoryAddress::from(3), + destination: MemoryAddress::from(2), }; let less_than_opcode = Opcode::BinaryIntOp { bit_size, op: BinaryIntOp::LessThan, - lhs: RegisterIndex::from(3), - rhs: RegisterIndex::from(4), - destination: RegisterIndex::from(2), + lhs: MemoryAddress::from(3), + rhs: MemoryAddress::from(4), + destination: MemoryAddress::from(2), }; let less_than_equal_opcode = Opcode::BinaryIntOp { bit_size, op: BinaryIntOp::LessThanEquals, - lhs: RegisterIndex::from(3), - rhs: RegisterIndex::from(4), - destination: RegisterIndex::from(2), + lhs: MemoryAddress::from(3), + rhs: MemoryAddress::from(4), + destination: MemoryAddress::from(2), }; - let opcodes = [equal_opcode, not_equal_opcode, less_than_opcode, less_than_equal_opcode]; - let mut vm = VM::new(input_registers, vec![], &opcodes, vec![], &DummyBlackBoxSolver); + let opcodes = [ + calldata_copy, + equal_opcode, + not_equal_opcode, + less_than_opcode, + less_than_equal_opcode, + ]; + let mut vm = VM::new(calldata, &opcodes, vec![], &DummyBlackBoxSolver); let status = vm.process_opcode(); assert_eq!(status, VMStatus::InProgress); - let output_eq_value = vm.registers.get(RegisterIndex::from(2)); + let status = vm.process_opcode(); + assert_eq!(status, VMStatus::InProgress); + + let output_eq_value = vm.memory.read(MemoryAddress::from(2)); assert_eq!(output_eq_value, Value::from(true)); let status = vm.process_opcode(); assert_eq!(status, VMStatus::InProgress); - let output_neq_value = vm.registers.get(RegisterIndex::from(2)); + let output_neq_value = vm.memory.read(MemoryAddress::from(2)); assert_eq!(output_neq_value, Value::from(false)); let status = vm.process_opcode(); assert_eq!(status, VMStatus::InProgress); - let lt_value = vm.registers.get(RegisterIndex::from(2)); + let lt_value = vm.memory.read(MemoryAddress::from(2)); assert_eq!(lt_value, Value::from(true)); let status = vm.process_opcode(); - assert_eq!(status, VMStatus::Finished); + assert_eq!(status, VMStatus::Finished { return_data_offset: 0, return_data_size: 0 }); - let lte_value = vm.registers.get(RegisterIndex::from(2)); + let lte_value = vm.memory.read(MemoryAddress::from(2)); assert_eq!(lte_value, Value::from(true)); } #[test] @@ -703,22 +887,30 @@ mod tests { /// memory[i] = i as Value; /// i += 1; /// } - fn brillig_write_memory(memory: Vec) -> Vec { + fn brillig_write_memory(item_count: usize) -> Vec { let bit_size = 32; - let r_i = RegisterIndex::from(0); - let r_len = RegisterIndex::from(1); - let r_tmp = RegisterIndex::from(2); + let r_i = MemoryAddress::from(0); + let r_len = MemoryAddress::from(1); + let r_tmp = MemoryAddress::from(2); + let r_pointer = MemoryAddress::from(3); + let start = [ // i = 0 - Opcode::Const { destination: r_i, value: 0u128.into() }, + Opcode::Const { destination: r_i, value: 0u128.into(), bit_size: 32 }, // len = memory.len() (approximation) - Opcode::Const { destination: r_len, value: Value::from(memory.len() as u128) }, + Opcode::Const { + destination: r_len, + value: Value::from(item_count as u128), + bit_size: 32, + }, + // pointer = free_memory_ptr + Opcode::Const { destination: r_pointer, value: 4u128.into(), bit_size: 32 }, ]; let loop_body = [ // *i = i - Opcode::Store { destination_pointer: r_i, source: r_i }, + Opcode::Store { destination_pointer: r_pointer, source: r_i }, // tmp = 1 - Opcode::Const { destination: r_tmp, value: 1u128.into() }, + Opcode::Const { destination: r_tmp, value: 1u128.into(), bit_size: 32 }, // i = i + 1 (tmp) Opcode::BinaryIntOp { destination: r_i, @@ -727,6 +919,14 @@ mod tests { rhs: r_tmp, bit_size, }, + // pointer = pointer + 1 + Opcode::BinaryIntOp { + destination: r_pointer, + lhs: r_pointer, + op: BinaryIntOp::Add, + rhs: r_tmp, + bit_size, + }, // tmp = i < len Opcode::BinaryIntOp { destination: r_tmp, @@ -740,11 +940,11 @@ mod tests { ]; let opcodes = [&start[..], &loop_body[..]].concat(); - let vm = brillig_execute_and_get_vm(memory, &opcodes); - vm.get_memory().to_vec() + let vm = brillig_execute_and_get_vm(vec![], &opcodes); + vm.get_memory()[4..].to_vec() } - let memory = brillig_write_memory(vec![Value::from(0u128); 5]); + let memory = brillig_write_memory(5); let expected = vec![ Value::from(0u128), Value::from(1u128), @@ -754,7 +954,7 @@ mod tests { ]; assert_eq!(memory, expected); - let memory = brillig_write_memory(vec![Value::from(0u128); 1024]); + let memory = brillig_write_memory(1024); let expected: Vec = (0..1024).map(|i| Value::from(i as u128)).collect(); assert_eq!(memory, expected); } @@ -771,21 +971,34 @@ mod tests { /// } fn brillig_sum_memory(memory: Vec) -> Value { let bit_size = 32; - let r_i = RegisterIndex::from(0); - let r_len = RegisterIndex::from(1); - let r_sum = RegisterIndex::from(2); - let r_tmp = RegisterIndex::from(3); + let r_i = MemoryAddress::from(0); + let r_len = MemoryAddress::from(1); + let r_sum = MemoryAddress::from(2); + let r_tmp = MemoryAddress::from(3); + let r_pointer = MemoryAddress::from(4); + let start = [ // sum = 0 - Opcode::Const { destination: r_sum, value: 0u128.into() }, + Opcode::Const { destination: r_sum, value: 0u128.into(), bit_size: 32 }, // i = 0 - Opcode::Const { destination: r_i, value: 0u128.into() }, + Opcode::Const { destination: r_i, value: 0u128.into(), bit_size: 32 }, // len = array.len() (approximation) - Opcode::Const { destination: r_len, value: Value::from(memory.len() as u128) }, + Opcode::Const { + destination: r_len, + value: Value::from(memory.len() as u128), + bit_size: 32, + }, + // pointer = array_ptr + Opcode::Const { destination: r_pointer, value: 5u128.into(), bit_size: 32 }, + Opcode::CalldataCopy { + destination_address: MemoryAddress(5), + size: memory.len(), + offset: 0, + }, ]; let loop_body = [ // tmp = *i - Opcode::Load { destination: r_tmp, source_pointer: r_i }, + Opcode::Load { destination: r_tmp, source_pointer: r_pointer }, // sum = sum + tmp Opcode::BinaryIntOp { destination: r_sum, @@ -795,7 +1008,7 @@ mod tests { bit_size, }, // tmp = 1 - Opcode::Const { destination: r_tmp, value: 1u128.into() }, + Opcode::Const { destination: r_tmp, value: 1u128.into(), bit_size: 32 }, // i = i + 1 (tmp) Opcode::BinaryIntOp { destination: r_i, @@ -804,6 +1017,14 @@ mod tests { rhs: r_tmp, bit_size, }, + // pointer = pointer + 1 + Opcode::BinaryIntOp { + destination: r_pointer, + lhs: r_pointer, + op: BinaryIntOp::Add, + rhs: r_tmp, + bit_size, + }, // tmp = i < len Opcode::BinaryIntOp { destination: r_tmp, @@ -818,7 +1039,7 @@ mod tests { let opcodes = [&start[..], &loop_body[..]].concat(); let vm = brillig_execute_and_get_vm(memory, &opcodes); - vm.registers.get(r_sum) + vm.memory.read(r_sum) } assert_eq!( @@ -844,21 +1065,24 @@ mod tests { /// memory[i as usize] = i as Value; /// recursive_write(memory, i + 1, len); /// } - /// Note we represent a 100% in-register optimized form in brillig - fn brillig_recursive_write_memory(memory: Vec) -> Vec { + /// Note we represent a 100% in-stack optimized form in brillig + fn brillig_recursive_write_memory(size: usize) -> Vec { let bit_size = 32; - let r_i = RegisterIndex::from(0); - let r_len = RegisterIndex::from(1); - let r_tmp = RegisterIndex::from(2); + let r_i = MemoryAddress::from(0); + let r_len = MemoryAddress::from(1); + let r_tmp = MemoryAddress::from(2); + let r_pointer = MemoryAddress::from(3); let start = [ // i = 0 - Opcode::Const { destination: r_i, value: 0u128.into() }, - // len = memory.len() (approximation) - Opcode::Const { destination: r_len, value: Value::from(memory.len() as u128) }, + Opcode::Const { destination: r_i, value: 0u128.into(), bit_size: 32 }, + // len = size + Opcode::Const { destination: r_len, value: size.into(), bit_size: 32 }, + // pointer = free_memory_ptr + Opcode::Const { destination: r_pointer, value: 4u128.into(), bit_size: 32 }, // call recursive_fn Opcode::Call { - location: 4, // Call after 'start' + location: 5, // Call after 'start' }, // end program by jumping to end Opcode::Jump { location: 100 }, @@ -876,12 +1100,12 @@ mod tests { // if !tmp, goto end Opcode::JumpIf { condition: r_tmp, - location: start.len() + 6, // 7 ops in recursive_fn, go to 'Return' + location: start.len() + 7, // 8 ops in recursive_fn, go to 'Return' }, // *i = i - Opcode::Store { destination_pointer: r_i, source: r_i }, + Opcode::Store { destination_pointer: r_pointer, source: r_i }, // tmp = 1 - Opcode::Const { destination: r_tmp, value: 1u128.into() }, + Opcode::Const { destination: r_tmp, value: 1u128.into(), bit_size: 32 }, // i = i + 1 (tmp) Opcode::BinaryIntOp { destination: r_i, @@ -890,17 +1114,25 @@ mod tests { rhs: r_tmp, bit_size, }, + // pointer = pointer + 1 + Opcode::BinaryIntOp { + destination: r_pointer, + lhs: r_pointer, + op: BinaryIntOp::Add, + rhs: r_tmp, + bit_size, + }, // call recursive_fn Opcode::Call { location: start.len() }, Opcode::Return {}, ]; let opcodes = [&start[..], &recursive_fn[..]].concat(); - let vm = brillig_execute_and_get_vm(memory, &opcodes); - vm.get_memory().to_vec() + let vm = brillig_execute_and_get_vm(vec![], &opcodes); + vm.get_memory()[4..].to_vec() } - let memory = brillig_recursive_write_memory(vec![Value::from(0u128); 5]); + let memory = brillig_recursive_write_memory(5); let expected = vec![ Value::from(0u128), Value::from(1u128), @@ -910,20 +1142,17 @@ mod tests { ]; assert_eq!(memory, expected); - let memory = brillig_recursive_write_memory(vec![Value::from(0u128); 1024]); + let memory = brillig_recursive_write_memory(1024); let expected: Vec = (0..1024).map(|i| Value::from(i as u128)).collect(); assert_eq!(memory, expected); } - fn empty_registers() -> Registers { - Registers::load(vec![Value::from(0u128); 16]) - } /// Helper to execute brillig code fn brillig_execute_and_get_vm( - memory: Vec, + calldata: Vec, opcodes: &[Opcode], ) -> VM<'_, DummyBlackBoxSolver> { - let mut vm = VM::new(empty_registers(), memory, opcodes, vec![], &DummyBlackBoxSolver); + let mut vm = VM::new(calldata, opcodes, vec![], &DummyBlackBoxSolver); brillig_execute(&mut vm); assert_eq!(vm.call_stack, vec![]); vm @@ -932,7 +1161,7 @@ mod tests { fn brillig_execute(vm: &mut VM) { loop { let status = vm.process_opcode(); - if matches!(status, VMStatus::Finished | VMStatus::ForeignCallWait { .. }) { + if matches!(status, VMStatus::Finished { .. } | VMStatus::ForeignCallWait { .. }) { break; } assert_eq!(status, VMStatus::InProgress); @@ -940,18 +1169,20 @@ mod tests { } #[test] - fn foreign_call_opcode_register_result() { - let r_input = RegisterIndex::from(0); - let r_result = RegisterIndex::from(1); + fn foreign_call_opcode_simple_result() { + let r_input = MemoryAddress::from(0); + let r_result = MemoryAddress::from(1); let double_program = vec![ - // Load input register with value 5 - Opcode::Const { destination: r_input, value: Value::from(5u128) }, - // Call foreign function "double" with the input register + // Load input address with value 5 + Opcode::Const { destination: r_input, value: Value::from(5u128), bit_size: 32 }, + // Call foreign function "double" with the input address Opcode::ForeignCall { function: "double".into(), - destinations: vec![RegisterOrMemory::RegisterIndex(r_result)], - inputs: vec![RegisterOrMemory::RegisterIndex(r_input)], + destinations: vec![ValueOrArray::MemoryAddress(r_result)], + destination_value_types: vec![HeapValueType::Simple], + inputs: vec![ValueOrArray::MemoryAddress(r_input)], + input_value_types: vec![HeapValueType::Simple], }, ]; @@ -975,19 +1206,20 @@ mod tests { brillig_execute(&mut vm); // Check that VM finished once resumed - assert_eq!(vm.status, VMStatus::Finished); + assert_eq!(vm.status, VMStatus::Finished { return_data_offset: 0, return_data_size: 0 }); - // Check result register - let result_value = vm.registers.get(r_result); + // Check result address + let result_value = vm.memory.read(r_result); assert_eq!(result_value, Value::from(10u128)); // Ensure the foreign call counter has been incremented assert_eq!(vm.foreign_call_counter, 1); } + #[test] fn foreign_call_opcode_memory_result() { - let r_input = RegisterIndex::from(0); - let r_output = RegisterIndex::from(1); + let r_input = MemoryAddress::from(0); + let r_output = MemoryAddress::from(1); // Define a simple 2x2 matrix in memory let initial_matrix = @@ -998,21 +1230,34 @@ mod tests { vec![Value::from(1u128), Value::from(3u128), Value::from(2u128), Value::from(4u128)]; let invert_program = vec![ + Opcode::CalldataCopy { + destination_address: MemoryAddress::from(2), + size: initial_matrix.len(), + offset: 0, + }, // input = 0 - Opcode::Const { destination: r_input, value: Value::from(0u128) }, + Opcode::Const { destination: r_input, value: 2_usize.into(), bit_size: 32 }, // output = 0 - Opcode::Const { destination: r_output, value: Value::from(0u128) }, + Opcode::Const { destination: r_output, value: 2_usize.into(), bit_size: 32 }, // *output = matrix_2x2_transpose(*input) Opcode::ForeignCall { function: "matrix_2x2_transpose".into(), - destinations: vec![RegisterOrMemory::HeapArray(HeapArray { + destinations: vec![ValueOrArray::HeapArray(HeapArray { pointer: r_output, size: initial_matrix.len(), })], - inputs: vec![RegisterOrMemory::HeapArray(HeapArray { + destination_value_types: vec![HeapValueType::Array { + size: initial_matrix.len(), + value_types: vec![HeapValueType::Simple], + }], + inputs: vec![ValueOrArray::HeapArray(HeapArray { pointer: r_input, size: initial_matrix.len(), })], + input_value_types: vec![HeapValueType::Array { + value_types: vec![HeapValueType::Simple], + size: initial_matrix.len(), + }], }, ]; @@ -1034,10 +1279,10 @@ mod tests { brillig_execute(&mut vm); // Check that VM finished once resumed - assert_eq!(vm.status, VMStatus::Finished); + assert_eq!(vm.status, VMStatus::Finished { return_data_offset: 0, return_data_size: 0 }); // Check result in memory - let result_values = vm.memory.read_slice(0, 4).to_vec(); + let result_values = vm.memory.read_slice(MemoryAddress(2), 4).to_vec(); assert_eq!(result_values, expected_result); // Ensure the foreign call counter has been incremented @@ -1047,11 +1292,11 @@ mod tests { /// Calling a simple foreign call function that takes any string input, concatenates it with itself, and reverses the concatenation #[test] fn foreign_call_opcode_vector_input_and_output() { - let r_input_pointer = RegisterIndex::from(0); - let r_input_size = RegisterIndex::from(1); + let r_input_pointer = MemoryAddress::from(0); + let r_input_size = MemoryAddress::from(1); // We need to pass a location of appropriate size - let r_output_pointer = RegisterIndex::from(2); - let r_output_size = RegisterIndex::from(3); + let r_output_pointer = MemoryAddress::from(2); + let r_output_size = MemoryAddress::from(3); // Our first string to use the identity function with let input_string = @@ -1064,28 +1309,48 @@ mod tests { // First call: let string_double_program = vec![ - // input_pointer = 0 - Opcode::Const { destination: r_input_pointer, value: Value::from(0u128) }, + Opcode::CalldataCopy { + destination_address: MemoryAddress(4), + size: input_string.len(), + offset: 0, + }, + // input_pointer = 4 + Opcode::Const { destination: r_input_pointer, value: Value::from(4u128), bit_size: 32 }, // input_size = input_string.len() (constant here) - Opcode::Const { destination: r_input_size, value: Value::from(input_string.len()) }, - // output_pointer = 0 + input_size = input_size - Opcode::Const { destination: r_output_pointer, value: Value::from(input_string.len()) }, + Opcode::Const { + destination: r_input_size, + value: Value::from(input_string.len()), + bit_size: 32, + }, + // output_pointer = 4 + input_size + Opcode::Const { + destination: r_output_pointer, + value: Value::from(4 + input_string.len()), + bit_size: 32, + }, // output_size = input_size * 2 Opcode::Const { destination: r_output_size, value: Value::from(input_string.len() * 2), + bit_size: 32, }, // output_pointer[0..output_size] = string_double(input_pointer[0...input_size]) Opcode::ForeignCall { function: "string_double".into(), - destinations: vec![RegisterOrMemory::HeapVector(HeapVector { + destinations: vec![ValueOrArray::HeapVector(HeapVector { pointer: r_output_pointer, size: r_output_size, })], - inputs: vec![RegisterOrMemory::HeapVector(HeapVector { + destination_value_types: vec![HeapValueType::Vector { + value_types: vec![HeapValueType::Simple], + }], + inputs: vec![ValueOrArray::HeapVector(HeapVector { pointer: r_input_pointer, size: r_input_size, })], + input_value_types: vec![HeapValueType::Vector { + value_types: vec![HeapValueType::Simple], + }], }, ]; @@ -1109,10 +1374,13 @@ mod tests { brillig_execute(&mut vm); // Check that VM finished once resumed - assert_eq!(vm.status, VMStatus::Finished); + assert_eq!(vm.status, VMStatus::Finished { return_data_offset: 0, return_data_size: 0 }); // Check result in memory - let result_values = vm.memory.read_slice(input_string.len(), output_string.len()).to_vec(); + let result_values = vm + .memory + .read_slice(MemoryAddress(4 + input_string.len()), output_string.len()) + .to_vec(); assert_eq!(result_values, output_string); // Ensure the foreign call counter has been incremented @@ -1121,8 +1389,8 @@ mod tests { #[test] fn foreign_call_opcode_memory_alloc_result() { - let r_input = RegisterIndex::from(0); - let r_output = RegisterIndex::from(1); + let r_input = MemoryAddress::from(0); + let r_output = MemoryAddress::from(1); // Define a simple 2x2 matrix in memory let initial_matrix = @@ -1133,21 +1401,34 @@ mod tests { vec![Value::from(1u128), Value::from(3u128), Value::from(2u128), Value::from(4u128)]; let invert_program = vec![ + Opcode::CalldataCopy { + destination_address: MemoryAddress::from(2), + size: initial_matrix.len(), + offset: 0, + }, // input = 0 - Opcode::Const { destination: r_input, value: Value::from(0u128) }, + Opcode::Const { destination: r_input, value: Value::from(2u128), bit_size: 32 }, // output = 0 - Opcode::Const { destination: r_output, value: Value::from(4u128) }, + Opcode::Const { destination: r_output, value: Value::from(6u128), bit_size: 32 }, // *output = matrix_2x2_transpose(*input) Opcode::ForeignCall { function: "matrix_2x2_transpose".into(), - destinations: vec![RegisterOrMemory::HeapArray(HeapArray { + destinations: vec![ValueOrArray::HeapArray(HeapArray { pointer: r_output, size: initial_matrix.len(), })], - inputs: vec![RegisterOrMemory::HeapArray(HeapArray { + destination_value_types: vec![HeapValueType::Array { + size: initial_matrix.len(), + value_types: vec![HeapValueType::Simple], + }], + inputs: vec![ValueOrArray::HeapArray(HeapArray { pointer: r_input, size: initial_matrix.len(), })], + input_value_types: vec![HeapValueType::Array { + size: initial_matrix.len(), + value_types: vec![HeapValueType::Simple], + }], }, ]; @@ -1169,14 +1450,14 @@ mod tests { brillig_execute(&mut vm); // Check that VM finished once resumed - assert_eq!(vm.status, VMStatus::Finished); + assert_eq!(vm.status, VMStatus::Finished { return_data_offset: 0, return_data_size: 0 }); // Check initial memory still in place - let initial_values = vm.memory.read_slice(0, 4).to_vec(); + let initial_values = vm.memory.read_slice(MemoryAddress(2), 4).to_vec(); assert_eq!(initial_values, initial_matrix); // Check result in memory - let result_values = vm.memory.read_slice(4, 4).to_vec(); + let result_values = vm.memory.read_slice(MemoryAddress(6), 4).to_vec(); assert_eq!(result_values, expected_result); // Ensure the foreign call counter has been incremented @@ -1185,9 +1466,9 @@ mod tests { #[test] fn foreign_call_opcode_multiple_array_inputs_result() { - let r_input_a = RegisterIndex::from(0); - let r_input_b = RegisterIndex::from(1); - let r_output = RegisterIndex::from(2); + let r_input_a = MemoryAddress::from(0); + let r_input_b = MemoryAddress::from(1); + let r_output = MemoryAddress::from(2); // Define a simple 2x2 matrix in memory let matrix_a = @@ -1209,28 +1490,41 @@ mod tests { ]; let matrix_mul_program = vec![ - // input = 0 - Opcode::Const { destination: r_input_a, value: Value::from(0u128) }, - // input = 0 - Opcode::Const { destination: r_input_b, value: Value::from(4u128) }, + Opcode::CalldataCopy { + destination_address: MemoryAddress::from(3), + size: matrix_a.len() + matrix_b.len(), + offset: 0, + }, + // input = 3 + Opcode::Const { destination: r_input_a, value: Value::from(3u128), bit_size: 32 }, + // input = 7 + Opcode::Const { destination: r_input_b, value: Value::from(7u128), bit_size: 32 }, // output = 0 - Opcode::Const { destination: r_output, value: Value::from(0u128) }, + Opcode::Const { destination: r_output, value: Value::from(0u128), bit_size: 32 }, // *output = matrix_2x2_transpose(*input) Opcode::ForeignCall { function: "matrix_2x2_transpose".into(), - destinations: vec![RegisterOrMemory::HeapArray(HeapArray { + destinations: vec![ValueOrArray::HeapArray(HeapArray { pointer: r_output, size: matrix_a.len(), })], + destination_value_types: vec![HeapValueType::Array { + size: matrix_a.len(), + value_types: vec![HeapValueType::Simple], + }], inputs: vec![ - RegisterOrMemory::HeapArray(HeapArray { - pointer: r_input_a, + ValueOrArray::HeapArray(HeapArray { pointer: r_input_a, size: matrix_a.len() }), + ValueOrArray::HeapArray(HeapArray { pointer: r_input_b, size: matrix_b.len() }), + ], + input_value_types: vec![ + HeapValueType::Array { size: matrix_a.len(), - }), - RegisterOrMemory::HeapArray(HeapArray { - pointer: r_input_b, + value_types: vec![HeapValueType::Simple], + }, + HeapValueType::Array { size: matrix_b.len(), - }), + value_types: vec![HeapValueType::Simple], + }, ], }, ]; @@ -1254,13 +1548,130 @@ mod tests { brillig_execute(&mut vm); // Check that VM finished once resumed - assert_eq!(vm.status, VMStatus::Finished); + assert_eq!(vm.status, VMStatus::Finished { return_data_offset: 0, return_data_size: 0 }); // Check result in memory - let result_values = vm.memory.read_slice(0, 4).to_vec(); + let result_values = vm.memory.read_slice(MemoryAddress(0), 4).to_vec(); assert_eq!(result_values, expected_result); // Ensure the foreign call counter has been incremented assert_eq!(vm.foreign_call_counter, 1); } + + #[test] + fn foreign_call_opcode_nested_arrays_and_slices_input() { + // [(1, <2,3>, [4]), (5, <6,7,8>, [9])] + + let v2 = vec![Value::from(2u128), Value::from(3u128)]; + let a4 = vec![Value::from(4u128)]; + let v6 = vec![Value::from(6u128), Value::from(7u128), Value::from(8u128)]; + let a9 = vec![Value::from(9u128)]; + + // construct memory by declaring all inner arrays/vectors first + let v2_ptr = 0u128; + let mut memory = v2.clone(); + let v2_start = memory.len(); + memory.extend(vec![Value::from(v2_ptr), Value::from(v2.len()), Value::from(1u128)]); + let a4_ptr = memory.len(); + memory.extend(a4.clone()); + let a4_start = memory.len(); + memory.extend(vec![Value::from(a4_ptr), Value::from(1u128)]); + let v6_ptr = memory.len(); + memory.extend(v6.clone()); + let v6_start = memory.len(); + memory.extend(vec![Value::from(v6_ptr), Value::from(v6.len()), Value::from(1u128)]); + let a9_ptr = memory.len(); + memory.extend(a9.clone()); + let a9_start = memory.len(); + memory.extend(vec![Value::from(a9_ptr), Value::from(1u128)]); + // finally we add the contents of the outer array + let outer_ptr = memory.len(); + let outer_array = vec![ + Value::from(1u128), + Value::from(v2.len()), + Value::from(v2_start), + Value::from(a4_start), + Value::from(5u128), + Value::from(v6.len()), + Value::from(v6_start), + Value::from(a9_start), + ]; + memory.extend(outer_array.clone()); + + let input_array_value_types = vec![ + HeapValueType::Simple, + HeapValueType::Simple, // size of following vector + HeapValueType::Vector { value_types: vec![HeapValueType::Simple] }, + HeapValueType::Array { value_types: vec![HeapValueType::Simple], size: 1 }, + ]; + + // memory address of the end of the above data structures + let r_ptr = memory.len(); + + let r_input = MemoryAddress::from(r_ptr); + let r_output = MemoryAddress::from(r_ptr + 1); + + let program = vec![ + Opcode::CalldataCopy { + destination_address: MemoryAddress::from(0), + size: memory.len(), + offset: 0, + }, + // input = 0 + Opcode::Const { destination: r_input, value: Value::from(outer_ptr), bit_size: 32 }, + // some_function(input) + Opcode::ForeignCall { + function: "flat_sum".into(), + destinations: vec![ValueOrArray::MemoryAddress(r_output)], + destination_value_types: vec![HeapValueType::Simple], + inputs: vec![ValueOrArray::HeapArray(HeapArray { + pointer: r_input, + size: outer_array.len(), + })], + input_value_types: vec![HeapValueType::Array { + value_types: input_array_value_types, + size: outer_array.len(), + }], + }, + ]; + + let mut vm = brillig_execute_and_get_vm(memory, &program); + + // Check that VM is waiting + assert_eq!( + vm.status, + VMStatus::ForeignCallWait { + function: "flat_sum".into(), + inputs: vec![ForeignCallParam::Array(vec![ + Value::from(1u128), + Value::from(2u128), // size of following vector + Value::from(2u128), + Value::from(3u128), + Value::from(4u128), + Value::from(5u128), + Value::from(3u128), // size of following vector + Value::from(6u128), + Value::from(7u128), + Value::from(8u128), + Value::from(9u128), + ])], + } + ); + + // Push result we're waiting for + vm.resolve_foreign_call(Value::from(45u128).into()); + + // Resume VM + brillig_execute(&mut vm); + + // Check that VM finished once resumed + assert_eq!(vm.status, VMStatus::Finished { return_data_offset: 0, return_data_size: 0 }); + + // Check result + let result_value = vm.memory.read(r_output); + assert_eq!(result_value, Value::from(45u128)); + + // Ensure the foreign call counter has been incremented + assert_eq!(vm.foreign_call_counter, 1); + } } diff --git a/acvm-repo/brillig_vm/src/memory.rs b/acvm-repo/brillig_vm/src/memory.rs index 8a6993f1353..d1c81447170 100644 --- a/acvm-repo/brillig_vm/src/memory.rs +++ b/acvm-repo/brillig_vm/src/memory.rs @@ -1,41 +1,45 @@ +use acir::{brillig::MemoryAddress, FieldElement}; + use crate::Value; -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Debug, Clone, Default, PartialEq, Eq)] pub struct Memory { // Memory is a vector of values. // We grow the memory when values past the end are set, extending with 0s. inner: Vec, } -impl From> for Memory { - fn from(values: Vec) -> Self { - Memory { inner: values } - } -} - impl Memory { /// Gets the value at pointer - pub fn read(&self, ptr: usize) -> Value { - self.inner[ptr] + pub fn read(&self, ptr: MemoryAddress) -> Value { + self.inner.get(ptr.to_usize()).copied().unwrap_or(0_u128.into()) } - pub fn read_slice(&self, ptr: usize, len: usize) -> &[Value] { - &self.inner[ptr..ptr + len] + pub fn read_ref(&self, ptr: MemoryAddress) -> MemoryAddress { + MemoryAddress(self.read(ptr).to_usize()) + } + + pub fn read_slice(&self, addr: MemoryAddress, len: usize) -> &[Value] { + &self.inner[addr.to_usize()..(addr.to_usize() + len)] } /// Sets the value at pointer `ptr` to `value` - pub fn write(&mut self, ptr: usize, value: Value) { - self.write_slice(ptr, &[value]); + pub fn write(&mut self, ptr: MemoryAddress, value: Value) { + self.resize_to_fit(ptr.to_usize() + 1); + self.inner[ptr.to_usize()] = value; } - /// Sets the values after pointer `ptr` to `values` - pub fn write_slice(&mut self, ptr: usize, values: &[Value]) { + fn resize_to_fit(&mut self, size: usize) { // Calculate new memory size - let new_size = std::cmp::max(self.inner.len(), ptr + values.len()); + let new_size = std::cmp::max(self.inner.len(), size); // Expand memory to new size with default values if needed - self.inner.resize(new_size, Value::from(0_usize)); + self.inner.resize(new_size, Value::from(FieldElement::zero())); + } - self.inner[ptr..ptr + values.len()].copy_from_slice(values); + /// Sets the values after pointer `ptr` to `values` + pub fn write_slice(&mut self, ptr: MemoryAddress, values: &[Value]) { + self.resize_to_fit(ptr.to_usize() + values.len()); + self.inner[ptr.to_usize()..(ptr.to_usize() + values.len())].copy_from_slice(values); } /// Returns the values of the memory diff --git a/acvm-repo/brillig_vm/src/registers.rs b/acvm-repo/brillig_vm/src/registers.rs deleted file mode 100644 index fcc596dd6c9..00000000000 --- a/acvm-repo/brillig_vm/src/registers.rs +++ /dev/null @@ -1,43 +0,0 @@ -use acir::brillig::{RegisterIndex, Value}; - -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct Registers { - // Registers are a vector of values. - // We grow the register as registers past the end are set, extending with 0s. - pub inner: Vec, -} - -/// Aims to match a reasonable max register count for a SNARK prover. -/// As well, catches obvious erroneous use of registers. -/// This can be revisited if it proves not enough. -const MAX_REGISTERS: usize = 2_usize.pow(16); - -/// Registers will store field element values during the -/// duration of the execution of the bytecode. -impl Registers { - /// Create a Registers object initialized with definite values - pub fn load(values: Vec) -> Registers { - let inner = values.into_iter().collect(); - Self { inner } - } - - /// Gets the values at register with address `index` - pub fn get(&self, register_index: RegisterIndex) -> Value { - let index = register_index.to_usize(); - assert!(index < MAX_REGISTERS, "Reading register past maximum!"); - let value = self.inner.get(index); - match value { - Some(value) => *value, - None => 0u128.into(), - } - } - - /// Sets the value at register with address `index` to `value` - pub fn set(&mut self, RegisterIndex(index): RegisterIndex, value: Value) { - assert!(index < MAX_REGISTERS, "Writing register past maximum!"); - // if size isn't at least index + 1, resize - let new_register_size = std::cmp::max(index + 1, self.inner.len()); - self.inner.resize(new_register_size, 0u128.into()); - self.inner[index] = value; - } -} diff --git a/aztec_macros/Cargo.toml b/aztec_macros/Cargo.toml index 04f74d3b022..ed9821fabcf 100644 --- a/aztec_macros/Cargo.toml +++ b/aztec_macros/Cargo.toml @@ -11,4 +11,6 @@ repository.workspace = true [dependencies] noirc_frontend.workspace = true +noirc_errors.workspace = true iter-extended.workspace = true +convert_case = "0.6.0" diff --git a/aztec_macros/src/lib.rs b/aztec_macros/src/lib.rs index c9adece4eb5..09deb2c9712 100644 --- a/aztec_macros/src/lib.rs +++ b/aztec_macros/src/lib.rs @@ -1,5 +1,12 @@ -use iter_extended::vecmap; +use std::borrow::{Borrow, BorrowMut}; +use std::vec; +use convert_case::{Case, Casing}; +use iter_extended::vecmap; +use noirc_errors::Location; +use noirc_frontend::hir::def_collector::dc_crate::{UnresolvedFunctions, UnresolvedTraitImpl}; +use noirc_frontend::hir::def_map::{LocalModuleId, ModuleId}; +use noirc_frontend::macros_api::parse_program; use noirc_frontend::macros_api::FieldElement; use noirc_frontend::macros_api::{ BlockExpression, CallExpression, CastExpression, Distinctness, Expression, ExpressionKind, @@ -13,7 +20,8 @@ use noirc_frontend::macros_api::{ use noirc_frontend::macros_api::{CrateId, FileId}; use noirc_frontend::macros_api::{MacroError, MacroProcessor}; use noirc_frontend::macros_api::{ModuleDefId, NodeInterner, SortedModule, StructId}; - +use noirc_frontend::node_interner::{FuncId, TraitId, TraitImplId, TraitImplKind}; +use noirc_frontend::Lambda; pub struct AztecMacro; impl MacroProcessor for AztecMacro { @@ -26,38 +34,58 @@ impl MacroProcessor for AztecMacro { transform(ast, crate_id, context) } - fn process_typed_ast(&self, crate_id: &CrateId, context: &mut HirContext) { - transform_hir(crate_id, context) + fn process_unresolved_traits_impls( + &self, + crate_id: &CrateId, + context: &mut HirContext, + unresolved_traits_impls: &[UnresolvedTraitImpl], + collected_functions: &mut Vec, + ) -> Result<(), (MacroError, FileId)> { + if has_aztec_dependency(crate_id, context) { + inject_compute_note_hash_and_nullifier( + crate_id, + context, + unresolved_traits_impls, + collected_functions, + ) + } else { + Ok(()) + } + } + + fn process_typed_ast( + &self, + crate_id: &CrateId, + context: &mut HirContext, + ) -> Result<(), (MacroError, FileId)> { + transform_hir(crate_id, context).map_err(|(err, file_id)| (err.into(), file_id)) } } const FUNCTION_TREE_HEIGHT: u32 = 5; -const MAX_CONTRACT_FUNCTIONS: usize = 2_usize.pow(FUNCTION_TREE_HEIGHT); +const MAX_CONTRACT_PRIVATE_FUNCTIONS: usize = 2_usize.pow(FUNCTION_TREE_HEIGHT); #[derive(Debug, Clone)] pub enum AztecMacroError { AztecDepNotFound, - ComputeNoteHashAndNullifierNotFound { span: Span }, - ContractHasTooManyFunctions { span: Span }, + ContractHasTooManyPrivateFunctions { span: Span }, ContractConstructorMissing { span: Span }, UnsupportedFunctionArgumentType { span: Span, typ: UnresolvedTypeData }, + UnsupportedStorageType { span: Option, typ: UnresolvedTypeData }, + CouldNotAssignStorageSlots { secondary_message: Option }, + EventError { span: Span, message: String }, } impl From for MacroError { fn from(err: AztecMacroError) -> Self { match err { AztecMacroError::AztecDepNotFound {} => MacroError { - primary_message: "Aztec dependency not found. Please add aztec as a dependency in your Cargo.toml. For more information go to https://docs.aztec.network/dev_docs/debugging/aztecnr-errors#aztec-dependency-not-found-please-add-aztec-as-a-dependency-in-your-nargotoml".to_owned(), + primary_message: "Aztec dependency not found. Please add aztec as a dependency in your Cargo.toml. For more information go to https://docs.aztec.network/developers/debugging/aztecnr-errors#aztec-dependency-not-found-please-add-aztec-as-a-dependency-in-your-nargotoml".to_owned(), secondary_message: None, span: None, }, - AztecMacroError::ComputeNoteHashAndNullifierNotFound { span } => MacroError { - primary_message: "compute_note_hash_and_nullifier function not found. Define it in your contract. For more information go to https://docs.aztec.network/dev_docs/debugging/aztecnr-errors#compute_note_hash_and_nullifier-function-not-found-define-it-in-your-contract".to_owned(), - secondary_message: None, - span: Some(span), - }, - AztecMacroError::ContractHasTooManyFunctions { span } => MacroError { - primary_message: format!("Contract can only have a maximum of {} functions", MAX_CONTRACT_FUNCTIONS), + AztecMacroError::ContractHasTooManyPrivateFunctions { span } => MacroError { + primary_message: format!("Contract can only have a maximum of {} private functions", MAX_CONTRACT_PRIVATE_FUNCTIONS), secondary_message: None, span: Some(span), }, @@ -71,6 +99,21 @@ impl From for MacroError { secondary_message: None, span: Some(span), }, + AztecMacroError::UnsupportedStorageType { span, typ } => MacroError { + primary_message: format!("Provided storage type `{typ:?}` is not directly supported in Aztec. Please provide a custom storage implementation"), + secondary_message: None, + span, + }, + AztecMacroError::CouldNotAssignStorageSlots { secondary_message } => MacroError { + primary_message: "Could not assign storage slots, please provide a custom storage implementation".to_string(), + secondary_message, + span: None, + }, + AztecMacroError::EventError { span, message } => MacroError { + primary_message: message, + secondary_message: None, + span: Some(span), + }, } } } @@ -123,7 +166,7 @@ fn pattern(name: &str) -> Pattern { } fn mutable(name: &str) -> Pattern { - Pattern::Mutable(Box::new(pattern(name)), Span::default()) + Pattern::Mutable(Box::new(pattern(name)), Span::default(), true) } fn mutable_assignment(name: &str, assigned_to: Expression) -> Statement { @@ -156,7 +199,28 @@ fn member_access(lhs: &str, rhs: &str) -> Expression { }))) } +fn return_type(path: Path) -> FunctionReturnType { + let ty = make_type(UnresolvedTypeData::Named(path, vec![], true)); + FunctionReturnType::Ty(ty) +} + +fn lambda(parameters: Vec<(Pattern, UnresolvedType)>, body: Expression) -> Expression { + expression(ExpressionKind::Lambda(Box::new(Lambda { + parameters, + return_type: UnresolvedType { + typ: UnresolvedTypeData::Unspecified, + span: Some(Span::default()), + }, + body, + }))) +} + macro_rules! chained_path { + ( $base:expr ) => { + { + ident_path($base) + } + }; ( $base:expr $(, $tail:expr)* ) => { { let mut base_path = ident_path($base); @@ -186,7 +250,7 @@ fn cast(lhs: Expression, ty: UnresolvedTypeData) -> Expression { } fn make_type(typ: UnresolvedTypeData) -> UnresolvedType { - UnresolvedType { typ, span: None } + UnresolvedType { typ, span: Some(Span::default()) } } fn index_array(array: Ident, index: &str) -> Expression { @@ -237,8 +301,12 @@ fn transform( // /// Completes the Hir with data gathered from type resolution -fn transform_hir(crate_id: &CrateId, context: &mut HirContext) { - transform_events(crate_id, context); +fn transform_hir( + crate_id: &CrateId, + context: &mut HirContext, +) -> Result<(), (AztecMacroError, FileId)> { + transform_events(crate_id, context)?; + assign_storage_slots(crate_id, context) } /// Includes an import to the aztec library if it has not been included yet @@ -261,41 +329,57 @@ fn check_for_aztec_dependency( crate_id: &CrateId, context: &HirContext, ) -> Result<(), (MacroError, FileId)> { - let crate_graph = &context.crate_graph[crate_id]; - let has_aztec_dependency = crate_graph.dependencies.iter().any(|dep| dep.as_name() == "aztec"); - if has_aztec_dependency { + if has_aztec_dependency(crate_id, context) { Ok(()) } else { - Err((AztecMacroError::AztecDepNotFound.into(), crate_graph.root_file_id)) + Err((AztecMacroError::AztecDepNotFound.into(), context.crate_graph[crate_id].root_file_id)) } } +fn has_aztec_dependency(crate_id: &CrateId, context: &HirContext) -> bool { + context.crate_graph[crate_id].dependencies.iter().any(|dep| dep.as_name() == "aztec") +} + // Check to see if the user has defined a storage struct fn check_for_storage_definition(module: &SortedModule) -> bool { module.types.iter().any(|r#struct| r#struct.name.0.contents == "Storage") } -// Check if "compute_note_hash_and_nullifier(AztecAddress,Field,Field,[Field; N]) -> [Field; 4]" is defined -fn check_for_compute_note_hash_and_nullifier_definition(module: &SortedModule) -> bool { - module.functions.iter().any(|func| { - func.def.name.0.contents == "compute_note_hash_and_nullifier" - && func.def.parameters.len() == 4 - && match &func.def.parameters[0].typ.typ { - UnresolvedTypeData::Named(path, _) => path.segments.last().unwrap().0.contents == "AztecAddress", +// Check to see if the user has defined a storage struct +fn check_for_storage_implementation(module: &SortedModule) -> bool { + module.impls.iter().any(|r#impl| match &r#impl.object_type.typ { + UnresolvedTypeData::Named(path, _, _) => { + path.segments.last().is_some_and(|segment| segment.0.contents == "Storage") + } + _ => false, + }) +} + +// Check if "compute_note_hash_and_nullifier(AztecAddress,Field,Field,Field,[Field; N]) -> [Field; 4]" is defined +fn check_for_compute_note_hash_and_nullifier_definition( + functions_data: &[(LocalModuleId, FuncId, NoirFunction)], + module_id: LocalModuleId, +) -> bool { + functions_data.iter().filter(|func_data| func_data.0 == module_id).any(|func_data| { + func_data.2.def.name.0.contents == "compute_note_hash_and_nullifier" + && func_data.2.def.parameters.len() == 5 + && match &func_data.2.def.parameters[0].typ.typ { + UnresolvedTypeData::Named(path, _, _) => path.segments.last().unwrap().0.contents == "AztecAddress", _ => false, } - && func.def.parameters[1].typ.typ == UnresolvedTypeData::FieldElement - && func.def.parameters[2].typ.typ == UnresolvedTypeData::FieldElement - // checks if the 4th parameter is an array and the Box in + && func_data.2.def.parameters[1].typ.typ == UnresolvedTypeData::FieldElement + && func_data.2.def.parameters[2].typ.typ == UnresolvedTypeData::FieldElement + && func_data.2.def.parameters[3].typ.typ == UnresolvedTypeData::FieldElement + // checks if the 5th parameter is an array and the Box in // Array(Option, Box) contains only fields - && match &func.def.parameters[3].typ.typ { + && match &func_data.2.def.parameters[4].typ.typ { UnresolvedTypeData::Array(_, inner_type) => { matches!(inner_type.typ, UnresolvedTypeData::FieldElement) }, _ => false, } - // We check the return type the same way as we did the 4th parameter - && match &func.def.return_type { + // We check the return type the same way as we did the 5th parameter + && match &func_data.2.def.return_type { FunctionReturnType::Default(_) => false, FunctionReturnType::Ty(unresolved_type) => { match &unresolved_type.typ { @@ -330,13 +414,12 @@ fn transform_module( // Check for a user defined storage struct let storage_defined = check_for_storage_definition(module); + let storage_implemented = check_for_storage_implementation(module); + + let crate_graph = &context.crate_graph[crate_id]; - if storage_defined && !check_for_compute_note_hash_and_nullifier_definition(module) { - let crate_graph = &context.crate_graph[crate_id]; - return Err(( - AztecMacroError::ComputeNoteHashAndNullifierNotFound { span: Span::default() }, - crate_graph.root_file_id, - )); + if storage_defined && !storage_implemented { + generate_storage_implementation(module).map_err(|err| (err, crate_graph.root_file_id))?; } for structure in module.types.iter() { @@ -357,6 +440,10 @@ fn transform_module( transform_function("Public", func, storage_defined) .map_err(|err| (err, crate_graph.root_file_id))?; has_transformed_module = true; + } else if is_custom_attribute(&secondary_attribute, "aztec(public-vm)") { + transform_vm_function(func, storage_defined) + .map_err(|err| (err, crate_graph.root_file_id))?; + has_transformed_module = true; } } // Add the storage struct to the beginning of the function if it is unconstrained in an aztec contract @@ -369,10 +456,22 @@ fn transform_module( if has_transformed_module { // We only want to run these checks if the macro processor has found the module to be an Aztec contract. - if module.functions.len() > MAX_CONTRACT_FUNCTIONS { + let private_functions_count = module + .functions + .iter() + .filter(|func| { + func.def + .attributes + .secondary + .iter() + .any(|attr| is_custom_attribute(attr, "aztec(private)")) + }) + .count(); + + if private_functions_count > MAX_CONTRACT_PRIVATE_FUNCTIONS { let crate_graph = &context.crate_graph[crate_id]; return Err(( - AztecMacroError::ContractHasTooManyFunctions { span: Span::default() }, + AztecMacroError::ContractHasTooManyPrivateFunctions { span: Span::default() }, crate_graph.root_file_id, )); } @@ -390,10 +489,140 @@ fn transform_module( Ok(has_transformed_module) } +/// Auxiliary function to generate the storage constructor for a given field, using +/// the Storage definition as a reference. Supports nesting. +fn generate_storage_field_constructor( + (type_ident, unresolved_type): &(Ident, UnresolvedType), + slot: Expression, +) -> Result { + let typ = &unresolved_type.typ; + match typ { + UnresolvedTypeData::Named(path, generics, _) => { + let mut new_path = path.clone().to_owned(); + new_path.segments.push(ident("new")); + match path.segments.last().unwrap().0.contents.as_str() { + "Map" => Ok(call( + variable_path(new_path), + vec![ + variable("context"), + slot, + lambda( + vec![ + ( + pattern("context"), + make_type(UnresolvedTypeData::Named( + chained_path!("aztec", "context", "Context"), + vec![], + true, + )), + ), + ( + Pattern::Identifier(ident("slot")), + make_type(UnresolvedTypeData::FieldElement), + ), + ], + generate_storage_field_constructor( + &(type_ident.clone(), generics.iter().last().unwrap().clone()), + variable("slot"), + )?, + ), + ], + )), + _ => Ok(call(variable_path(new_path), vec![variable("context"), slot])), + } + } + _ => Err(AztecMacroError::UnsupportedStorageType { + typ: typ.clone(), + span: Some(type_ident.span()), + }), + } +} + +// Generates the Storage implementation block from the Storage struct definition if it does not exist +/// From: +/// +/// struct Storage { +/// a_map: Map>, +/// a_nested_map: Map>>, +/// a_field: SomeStoragePrimitive, +/// } +/// +/// To: +/// +/// impl Storage { +/// fn init(context: Context) -> Self { +/// Storage { +/// a_map: Map::new(context, 0, |context, slot| { +/// SomeStoragePrimitive::new(context, slot) +/// }), +/// a_nested_map: Map::new(context, 0, |context, slot| { +/// Map::new(context, slot, |context, slot| { +/// SomeStoragePrimitive::new(context, slot) +/// }) +/// }), +/// a_field: SomeStoragePrimitive::new(context, 0), +/// } +/// } +/// } +/// +/// Storage slots are generated as 0 and will be populated using the information from the HIR +/// at a later stage. +fn generate_storage_implementation(module: &mut SortedModule) -> Result<(), AztecMacroError> { + let definition = + module.types.iter().find(|r#struct| r#struct.name.0.contents == "Storage").unwrap(); + + let slot_zero = expression(ExpressionKind::Literal(Literal::Integer( + FieldElement::from(i128::from(0)), + false, + ))); + + let field_constructors = definition + .fields + .iter() + .flat_map(|field| { + generate_storage_field_constructor(field, slot_zero.clone()) + .map(|expression| (field.0.clone(), expression)) + }) + .collect(); + + let storage_constructor_statement = make_statement(StatementKind::Expression(expression( + ExpressionKind::constructor((chained_path!("Storage"), field_constructors)), + ))); + + let init = NoirFunction::normal(FunctionDefinition::normal( + &ident("init"), + &vec![], + &[( + ident("context"), + make_type(UnresolvedTypeData::Named( + chained_path!("aztec", "context", "Context"), + vec![], + true, + )), + )], + &BlockExpression(vec![storage_constructor_statement]), + &[], + &return_type(chained_path!("Self")), + )); + + let storage_impl = TypeImpl { + object_type: UnresolvedType { + typ: UnresolvedTypeData::Named(chained_path!("Storage"), vec![], true), + span: Some(Span::default()), + }, + type_span: Span::default(), + generics: vec![], + methods: vec![(init, Span::default())], + }; + module.impls.push(storage_impl); + + Ok(()) +} + /// If it does, it will insert the following things: /// - A new Input that is provided for a kernel app circuit, named: {Public/Private}ContextInputs /// - Hashes all of the function input variables -/// - This instantiates a helper function +/// - This instantiates a helper function fn transform_function( ty: &str, func: &mut NoirFunction, @@ -419,6 +648,10 @@ fn transform_function( // Abstract return types such that they get added to the kernel's return_values if let Some(return_values) = abstract_return_values(func) { + // In case we are pushing return values to the context, we remove the statement that originated it + // This avoids running duplicate code, since blocks like if/else can be value returning statements + func.def.body.0.pop(); + // Add the new return statement func.def.body.0.push(return_values); } @@ -441,6 +674,24 @@ fn transform_function( Ok(()) } +/// Transform a function to work with AVM bytecode +fn transform_vm_function( + func: &mut NoirFunction, + _storage_defined: bool, +) -> Result<(), AztecMacroError> { + // Push Avm context creation to the beginning of the function + let create_context = create_avm_context()?; + func.def.body.0.insert(0, create_context); + + // We want the function to be seen as a public function + func.def.is_open = true; + + // NOTE: the line below is a temporary hack to trigger external transpilation tools + // It will be removed once the transpiler is integrated into the Noir compiler + func.def.name.0.contents = format!("avm_{}", func.def.name.0.contents); + Ok(()) +} + /// Transform Unconstrained /// /// Inserts the following code at the beginning of an unconstrained function @@ -471,20 +722,48 @@ fn collect_crate_structs(crate_id: &CrateId, context: &HirContext) -> Vec Vec { + let crates = context.crates(); + crates + .flat_map(|crate_id| context.def_map(&crate_id).map(|def_map| def_map.modules())) + .flatten() + .flat_map(|module| { + module.type_definitions().filter_map(|typ| { + if let ModuleDefId::TraitId(struct_id) = typ { + Some(struct_id) + } else { + None + } + }) + }) + .collect() +} + /// Substitutes the signature literal that was introduced in the selector method previously with the actual signature. -fn transform_event(struct_id: StructId, interner: &mut NodeInterner) { +fn transform_event( + struct_id: StructId, + interner: &mut NodeInterner, +) -> Result<(), (AztecMacroError, FileId)> { let struct_type = interner.get_struct(struct_id); let selector_id = interner - .lookup_method(&Type::Struct(struct_type, vec![]), struct_id, "selector", false) - .expect("Selector method not found"); + .lookup_method(&Type::Struct(struct_type.clone(), vec![]), struct_id, "selector", false) + .ok_or_else(|| { + let error = AztecMacroError::EventError { + span: struct_type.borrow().location.span, + message: "Selector method not found".to_owned(), + }; + (error, struct_type.borrow().location.file) + })?; let selector_function = interner.function(&selector_id); let compute_selector_statement = interner.statement( - selector_function - .block(interner) - .statements() - .first() - .expect("Compute selector statement not found"), + selector_function.block(interner).statements().first().ok_or_else(|| { + let error = AztecMacroError::EventError { + span: struct_type.borrow().location.span, + message: "Compute selector statement not found".to_owned(), + }; + (error, struct_type.borrow().location.file) + })?, ); let compute_selector_expression = match compute_selector_statement { @@ -494,22 +773,31 @@ fn transform_event(struct_id: StructId, interner: &mut NodeInterner) { }, _ => None, } - .expect("Compute selector statement is not a call expression"); - - let first_arg_id = compute_selector_expression - .arguments - .first() - .expect("Missing argument for compute selector"); + .ok_or_else(|| { + let error = AztecMacroError::EventError { + span: struct_type.borrow().location.span, + message: "Compute selector statement is not a call expression".to_owned(), + }; + (error, struct_type.borrow().location.file) + })?; + + let first_arg_id = compute_selector_expression.arguments.first().ok_or_else(|| { + let error = AztecMacroError::EventError { + span: struct_type.borrow().location.span, + message: "Compute selector statement is not a call expression".to_owned(), + }; + (error, struct_type.borrow().location.file) + })?; match interner.expression(first_arg_id) { HirExpression::Literal(HirLiteral::Str(signature)) if signature == SIGNATURE_PLACEHOLDER => { - let selector_literal_id = first_arg_id; + let selector_literal_id = *first_arg_id; let structure = interner.get_struct(struct_id); let signature = event_signature(&structure.borrow()); - interner.update_expression(*selector_literal_id, |expr| { + interner.update_expression(selector_literal_id, |expr| { *expr = HirExpression::Literal(HirLiteral::Str(signature.clone())); }); @@ -518,18 +806,205 @@ fn transform_event(struct_id: StructId, interner: &mut NodeInterner) { selector_literal_id, Type::String(Box::new(Type::Constant(signature.len() as u64))), ); + Ok(()) } - _ => unreachable!("Signature placeholder literal does not match"), + _ => Err(( + AztecMacroError::EventError { + span: struct_type.borrow().location.span, + message: "Signature placeholder literal does not match".to_owned(), + }, + struct_type.borrow().location.file, + )), } } -fn transform_events(crate_id: &CrateId, context: &mut HirContext) { +fn transform_events( + crate_id: &CrateId, + context: &mut HirContext, +) -> Result<(), (AztecMacroError, FileId)> { for struct_id in collect_crate_structs(crate_id, context) { let attributes = context.def_interner.struct_attributes(&struct_id); if attributes.iter().any(|attr| matches!(attr, SecondaryAttribute::Event)) { - transform_event(struct_id, &mut context.def_interner); + transform_event(struct_id, &mut context.def_interner)?; } } + Ok(()) +} + +/// Obtains the serialized length of a type that implements the Serialize trait. +fn get_serialized_length( + traits: &[TraitId], + typ: &Type, + interner: &NodeInterner, +) -> Result { + let (struct_name, maybe_stored_in_state) = match typ { + Type::Struct(struct_type, generics) => { + Ok((struct_type.borrow().name.0.contents.clone(), generics.first())) + } + _ => Err(AztecMacroError::CouldNotAssignStorageSlots { + secondary_message: Some("State storage variable must be a struct".to_string()), + }), + }?; + let stored_in_state = + maybe_stored_in_state.ok_or(AztecMacroError::CouldNotAssignStorageSlots { + secondary_message: Some("State storage variable must be generic".to_string()), + })?; + + let is_note = traits.iter().any(|&trait_id| { + let r#trait = interner.get_trait(trait_id); + r#trait.name.0.contents == "NoteInterface" + && !interner.lookup_all_trait_implementations(stored_in_state, trait_id).is_empty() + }); + + // Maps and (private) Notes always occupy a single slot. Someone could store a Note in PublicMutable for whatever reason though. + if struct_name == "Map" || (is_note && struct_name != "PublicMutable") { + return Ok(1); + } + + let serialized_trait_impl_kind = traits + .iter() + .find_map(|&trait_id| { + let r#trait = interner.get_trait(trait_id); + if r#trait.borrow().name.0.contents == "Serialize" + && r#trait.borrow().generics.len() == 1 + { + interner + .lookup_all_trait_implementations(stored_in_state, trait_id) + .into_iter() + .next() + } else { + None + } + }) + .ok_or(AztecMacroError::CouldNotAssignStorageSlots { + secondary_message: Some("Stored data must implement Serialize trait".to_string()), + })?; + + let serialized_trait_impl_id = match serialized_trait_impl_kind { + TraitImplKind::Normal(trait_impl_id) => Ok(trait_impl_id), + _ => Err(AztecMacroError::CouldNotAssignStorageSlots { secondary_message: None }), + }?; + + let serialized_trait_impl_shared = interner.get_trait_implementation(*serialized_trait_impl_id); + let serialized_trait_impl = serialized_trait_impl_shared.borrow(); + + match serialized_trait_impl.trait_generics.first().unwrap() { + Type::Constant(value) => Ok(*value), + _ => Err(AztecMacroError::CouldNotAssignStorageSlots { secondary_message: None }), + } +} + +/// Assigns storage slots to the storage struct fields based on the serialized length of the types. This automatic assignment +/// will only trigger if the assigned storage slot is invalid (0 as generated by generate_storage_implementation) +fn assign_storage_slots( + crate_id: &CrateId, + context: &mut HirContext, +) -> Result<(), (AztecMacroError, FileId)> { + let traits: Vec<_> = collect_traits(context); + for struct_id in collect_crate_structs(crate_id, context) { + let interner: &mut NodeInterner = context.def_interner.borrow_mut(); + let r#struct = interner.get_struct(struct_id); + let file_id = r#struct.borrow().location.file; + if r#struct.borrow().name.0.contents == "Storage" && r#struct.borrow().id.krate().is_root() + { + let init_id = interner + .lookup_method( + &Type::Struct(interner.get_struct(struct_id), vec![]), + struct_id, + "init", + false, + ) + .ok_or(( + AztecMacroError::CouldNotAssignStorageSlots { + secondary_message: Some( + "Storage struct must have an init function".to_string(), + ), + }, + file_id, + ))?; + let init_function = interner.function(&init_id).block(interner); + let init_function_statement_id = init_function.statements().first().ok_or(( + AztecMacroError::CouldNotAssignStorageSlots { + secondary_message: Some("Init storage statement not found".to_string()), + }, + file_id, + ))?; + let storage_constructor_statement = interner.statement(init_function_statement_id); + + let storage_constructor_expression = match storage_constructor_statement { + HirStatement::Expression(expression_id) => { + match interner.expression(&expression_id) { + HirExpression::Constructor(hir_constructor_expression) => { + Ok(hir_constructor_expression) + } + _ => Err((AztecMacroError::CouldNotAssignStorageSlots { + secondary_message: Some( + "Storage constructor statement must be a constructor expression" + .to_string(), + ), + }, file_id)) + } + } + _ => Err(( + AztecMacroError::CouldNotAssignStorageSlots { + secondary_message: Some( + "Storage constructor statement must be an expression".to_string(), + ), + }, + file_id, + )), + }?; + + let mut storage_slot: u64 = 1; + for (index, (_, expr_id)) in storage_constructor_expression.fields.iter().enumerate() { + let fields = r#struct.borrow().get_fields(&[]); + let (_, field_type) = fields.get(index).unwrap(); + let new_call_expression = match interner.expression(expr_id) { + HirExpression::Call(hir_call_expression) => Ok(hir_call_expression), + _ => Err(( + AztecMacroError::CouldNotAssignStorageSlots { + secondary_message: Some( + "Storage field initialization expression is not a call expression" + .to_string(), + ), + }, + file_id, + )), + }?; + + let slot_arg_expression = interner.expression(&new_call_expression.arguments[1]); + + let current_storage_slot = match slot_arg_expression { + HirExpression::Literal(HirLiteral::Integer(slot, _)) => Ok(slot.to_u128()), + _ => Err(( + AztecMacroError::CouldNotAssignStorageSlots { + secondary_message: Some( + "Storage slot argument expression must be a literal integer" + .to_string(), + ), + }, + file_id, + )), + }?; + + if current_storage_slot != 0 { + continue; + } + + let type_serialized_len = get_serialized_length(&traits, field_type, interner) + .map_err(|err| (err, file_id))?; + interner.update_expression(new_call_expression.arguments[1], |expr| { + *expr = HirExpression::Literal(HirLiteral::Integer( + FieldElement::from(u128::from(storage_slot)), + false, + )); + }); + + storage_slot += type_serialized_len; + } + } + } + Ok(()) } const SIGNATURE_PLACEHOLDER: &str = "SIGNATURE_PLACEHOLDER"; @@ -549,7 +1024,8 @@ const SIGNATURE_PLACEHOLDER: &str = "SIGNATURE_PLACEHOLDER"; /// The signature cannot be known at this point since types are not resolved yet, so we use a signature placeholder. /// It'll get resolved after by transforming the HIR. fn generate_selector_impl(structure: &NoirStruct) -> TypeImpl { - let struct_type = make_type(UnresolvedTypeData::Named(path(structure.name.clone()), vec![])); + let struct_type = + make_type(UnresolvedTypeData::Named(path(structure.name.clone()), vec![], true)); let selector_path = chained_path!("aztec", "protocol_types", "abis", "function_selector", "FunctionSelector"); @@ -563,7 +1039,7 @@ fn generate_selector_impl(structure: &NoirStruct) -> TypeImpl { // Define `FunctionSelector` return type let return_type = - FunctionReturnType::Ty(make_type(UnresolvedTypeData::Named(selector_path, vec![]))); + FunctionReturnType::Ty(make_type(UnresolvedTypeData::Named(selector_path, vec![], true))); let mut selector_fn_def = FunctionDefinition::normal( &ident("selector"), @@ -583,7 +1059,7 @@ fn generate_selector_impl(structure: &NoirStruct) -> TypeImpl { object_type: struct_type, type_span: structure.span, generics: vec![], - methods: vec![NoirFunction::normal(selector_fn_def)], + methods: vec![(NoirFunction::normal(selector_fn_def), Span::default())], } } @@ -605,8 +1081,11 @@ fn generate_selector_impl(structure: &NoirStruct) -> TypeImpl { fn create_inputs(ty: &str) -> Param { let context_ident = ident("inputs"); let context_pattern = Pattern::Identifier(context_ident); - let type_path = chained_path!("aztec", "abi", ty); - let context_type = make_type(UnresolvedTypeData::Named(type_path, vec![])); + + let path_snippet = ty.to_case(Case::Snake); // e.g. private_context_inputs + let type_path = chained_path!("aztec", "context", "inputs", &path_snippet, ty); + + let context_type = make_type(UnresolvedTypeData::Named(type_path, vec![], true)); let visibility = Visibility::Private; Param { pattern: context_pattern, typ: context_type, visibility, span: Span::default() } @@ -644,8 +1123,8 @@ fn create_context(ty: &str, params: &[Param]) -> Result, AztecMac let let_hasher = mutable_assignment( "hasher", // Assigned to call( - variable_path(chained_path!("aztec", "abi", "Hasher", "new")), // Path - vec![], // args + variable_path(chained_path!("aztec", "hasher", "Hasher", "new")), // Path + vec![], // args ), ); @@ -677,7 +1156,10 @@ fn create_context(ty: &str, params: &[Param]) -> Result, AztecMac add_array_to_hasher( &id, &UnresolvedType { - typ: UnresolvedTypeData::Integer(Signedness::Unsigned, 32), + typ: UnresolvedTypeData::Integer( + Signedness::Unsigned, + noirc_frontend::IntegerBitSize::ThirtyTwo, + ), span: None, }, ) @@ -704,12 +1186,14 @@ fn create_context(ty: &str, params: &[Param]) -> Result, AztecMac vec![], // args ); + let path_snippet = ty.to_case(Case::Snake); // e.g. private_context + // let mut context = {ty}::new(inputs, hash); let let_context = mutable_assignment( "context", // Assigned to call( - variable_path(chained_path!("aztec", "context", ty, "new")), // Path - vec![inputs_expression, hash_call], // args + variable_path(chained_path!("aztec", "context", &path_snippet, ty, "new")), // Path + vec![inputs_expression, hash_call], // args ), ); injected_expressions.push(let_context); @@ -718,6 +1202,35 @@ fn create_context(ty: &str, params: &[Param]) -> Result, AztecMac Ok(injected_expressions) } +/// Creates an mutable avm context +/// +/// ```noir +/// /// Before +/// #[aztec(public-vm)] +/// fn foo() -> Field { +/// let mut context = aztec::context::AVMContext::new(); +/// let timestamp = context.timestamp(); +/// // ... +/// } +/// +/// /// After +/// #[aztec(private)] +/// fn foo() -> Field { +/// let mut timestamp = context.timestamp(); +/// // ... +/// } +fn create_avm_context() -> Result { + let let_context = mutable_assignment( + "context", // Assigned to + call( + variable_path(chained_path!("aztec", "context", "AVMContext", "new")), // Path + vec![], // args + ), + ); + + Ok(let_context) +} + /// Abstract Return Type /// /// This function intercepts the function's current return type and replaces it with pushes @@ -727,7 +1240,7 @@ fn create_context(ty: &str, params: &[Param]) -> Result, AztecMac /// ```noir /// /// Before /// #[aztec(private)] -/// fn foo() -> abi::PrivateCircuitPublicInputs { +/// fn foo() -> protocol_types::abis::private_circuit_public_inputs::PrivateCircuitPublicInputs { /// // ... /// let my_return_value: Field = 10; /// context.return_values.push(my_return_value); @@ -746,13 +1259,11 @@ fn create_context(ty: &str, params: &[Param]) -> Result, AztecMac /// Any primitive type that can be cast will be casted to a field and pushed to the context. fn abstract_return_values(func: &NoirFunction) -> Option { let current_return_type = func.return_type().typ; - let len = func.def.body.len(); - let last_statement = &func.def.body.0[len - 1]; + let last_statement = func.def.body.0.last()?; // TODO: (length, type) => We can limit the size of the array returned to be limited by kernel size // Doesn't need done until we have settled on a kernel size // TODO: support tuples here and in inputs -> convert into an issue - // Check if the return type is an expression, if it is, we can handle it match last_statement { Statement { kind: StatementKind::Expression(expression), .. } => { @@ -846,11 +1357,11 @@ fn make_return_push(push_value: Expression) -> Statement { /// Make Return push array /// /// Translates to: -/// `context.return_values.push_array({push_value})` -fn make_return_push_array(push_value: Expression) -> Statement { +/// `context.return_values.extend_from_array({push_value})` +fn make_return_extend_from_array(push_value: Expression) -> Statement { make_statement(StatementKind::Semi(method_call( context_return_values(), - "push_array", + "extend_from_array", vec![push_value], ))) } @@ -859,14 +1370,14 @@ fn make_return_push_array(push_value: Expression) -> Statement { /// /// Translates to: /// ```noir -/// `context.return_values.push_array({push_value}.serialize())` +/// `context.return_values.extend_from_array({push_value}.serialize())` fn make_struct_return_type(expression: Expression) -> Statement { let serialized_call = method_call( expression, // variable "serialize", // method name vec![], // args ); - make_return_push_array(serialized_call) + make_return_extend_from_array(serialized_call) } /// Make array return type @@ -903,8 +1414,8 @@ fn make_castable_return_type(expression: Expression) -> Statement { /// Create Return Type /// -/// Public functions return abi::PublicCircuitPublicInputs while -/// private functions return abi::PrivateCircuitPublicInputs +/// Public functions return protocol_types::abis::public_circuit_public_inputs::PublicCircuitPublicInputs while +/// private functions return protocol_types::abis::private_circuit_public_inputs::::PrivateCircuitPublicInputs /// /// This call constructs an ast token referencing the above types /// The name is set in the function above `transform`, hence the @@ -914,7 +1425,7 @@ fn make_castable_return_type(expression: Expression) -> Statement { /// ```noir /// /// /// Before -/// fn foo() -> abi::PrivateCircuitPublicInputs { +/// fn foo() -> protocol_types::abis::private_circuit_public_inputs::PrivateCircuitPublicInputs { /// // ... /// } /// @@ -924,10 +1435,9 @@ fn make_castable_return_type(expression: Expression) -> Statement { /// // ... /// } fn create_return_type(ty: &str) -> FunctionReturnType { - let return_path = chained_path!("aztec", "abi", ty); - - let ty = make_type(UnresolvedTypeData::Named(return_path, vec![])); - FunctionReturnType::Ty(ty) + let path_snippet = ty.to_case(Case::Snake); // e.g. private_circuit_public_inputs or public_circuit_public_inputs + let return_path = chained_path!("aztec", "protocol_types", "abis", &path_snippet, ty); + return_type(return_path) } /// Create Context Finish @@ -938,7 +1448,7 @@ fn create_return_type(ty: &str) -> FunctionReturnType { /// The replaced code: /// ```noir /// /// Before -/// fn foo() -> abi::PrivateCircuitPublicInputs { +/// fn foo() -> protocol_types::abis::private_circuit_public_inputs::PrivateCircuitPublicInputs { /// // ... /// context.finish() /// } @@ -1119,3 +1629,194 @@ fn event_signature(event: &StructType) -> String { let fields = vecmap(event.get_fields(&[]), |(_, typ)| signature_of_type(&typ)); format!("{}({})", event.name.0.contents, fields.join(",")) } + +fn inject_compute_note_hash_and_nullifier( + crate_id: &CrateId, + context: &mut HirContext, + unresolved_traits_impls: &[UnresolvedTraitImpl], + collected_functions: &mut [UnresolvedFunctions], +) -> Result<(), (MacroError, FileId)> { + // We first fetch modules in this crate which correspond to contracts, along with their file id. + let contract_module_file_ids: Vec<(LocalModuleId, FileId)> = context + .def_map(crate_id) + .expect("ICE: Missing crate in def_map") + .modules() + .iter() + .filter(|(_, module)| module.is_contract) + .map(|(idx, module)| (LocalModuleId(idx), module.location.file)) + .collect(); + + // If the current crate does not contain a contract module we simply skip it. + if contract_module_file_ids.is_empty() { + return Ok(()); + } else if contract_module_file_ids.len() != 1 { + panic!("Found multiple contracts in the same crate"); + } + + let (module_id, file_id) = contract_module_file_ids[0]; + + // If compute_note_hash_and_nullifier is already defined by the user, we skip auto-generation in order to provide an + // escape hatch for this mechanism. + // TODO(#4647): improve this diagnosis and error messaging. + if collected_functions.iter().any(|coll_funcs_data| { + check_for_compute_note_hash_and_nullifier_definition(&coll_funcs_data.functions, module_id) + }) { + return Ok(()); + } + + // In order to implement compute_note_hash_and_nullifier, we need to know all of the different note types the + // contract might use. These are the types that implement the NoteInterface trait, which provides the + // get_note_type_id function. + let note_types = fetch_struct_trait_impls(context, unresolved_traits_impls, "NoteInterface"); + + // We can now generate a version of compute_note_hash_and_nullifier tailored for the contract in this crate. + let func = generate_compute_note_hash_and_nullifier(¬e_types); + + // And inject the newly created function into the contract. + + // TODO(#4373): We don't have a reasonable location for the source code of this autogenerated function, so we simply + // pass an empty span. This function should not produce errors anyway so this should not matter. + let location = Location::new(Span::empty(0), file_id); + + // These are the same things the ModCollector does when collecting functions: we push the function to the + // NodeInterner, declare it in the module (which checks for duplicate definitions), and finally add it to the list + // on collected but unresolved functions. + + let func_id = context.def_interner.push_empty_fn(); + context.def_interner.push_function( + func_id, + &func.def, + ModuleId { krate: *crate_id, local_id: module_id }, + location, + ); + + context.def_map_mut(crate_id).unwrap() + .modules_mut()[module_id.0] + .declare_function( + func.name_ident().clone(), func_id + ).expect( + "Failed to declare the autogenerated compute_note_hash_and_nullifier function, likely due to a duplicate definition. See https://github.com/AztecProtocol/aztec-packages/issues/4647." + ); + + collected_functions + .iter_mut() + .find(|fns| fns.file_id == file_id) + .expect("ICE: no functions found in contract file") + .push_fn(module_id, func_id, func.clone()); + + Ok(()) +} + +// Fetches the name of all structs that implement trait_name, both in the current crate and all of its dependencies. +fn fetch_struct_trait_impls( + context: &mut HirContext, + unresolved_traits_impls: &[UnresolvedTraitImpl], + trait_name: &str, +) -> Vec { + let mut struct_typenames: Vec = Vec::new(); + + // These structs can be declared in either external crates or the current one. External crates that contain + // dependencies have already been processed and resolved, but are available here via the NodeInterner. Note that + // crates on which the current crate does not depend on may not have been processed, and will be ignored. + for trait_impl_id in 0..context.def_interner.next_trait_impl_id().0 { + let trait_impl = &context.def_interner.get_trait_implementation(TraitImplId(trait_impl_id)); + + if trait_impl.borrow().ident.0.contents == *trait_name { + if let Type::Struct(s, _) = &trait_impl.borrow().typ { + struct_typenames.push(s.borrow().name.0.contents.clone()); + } else { + panic!("Found impl for {} on non-Struct", trait_name); + } + } + } + + // This crate's traits and impls have not yet been resolved, so we look for impls in unresolved_trait_impls. + struct_typenames.extend( + unresolved_traits_impls + .iter() + .filter(|trait_impl| { + trait_impl + .trait_path + .segments + .last() + .expect("ICE: empty trait_impl path") + .0 + .contents + == *trait_name + }) + .filter_map(|trait_impl| match &trait_impl.object_type.typ { + UnresolvedTypeData::Named(path, _, _) => { + Some(path.segments.last().unwrap().0.contents.clone()) + } + _ => None, + }), + ); + + struct_typenames +} + +fn generate_compute_note_hash_and_nullifier(note_types: &Vec) -> NoirFunction { + let function_source = generate_compute_note_hash_and_nullifier_source(note_types); + + let (function_ast, errors) = parse_program(&function_source); + if !errors.is_empty() { + dbg!(errors.clone()); + } + assert_eq!(errors.len(), 0, "Failed to parse Noir macro code. This is either a bug in the compiler or the Noir macro code"); + + let mut function_ast = function_ast.into_sorted(); + function_ast.functions.remove(0) +} + +fn generate_compute_note_hash_and_nullifier_source(note_types: &Vec) -> String { + // TODO(#4649): The serialized_note parameter is a fixed-size array, but we don't know what length it should have. + // For now we hardcode it to 20, which is the same as MAX_NOTE_FIELDS_LENGTH. + + if note_types.is_empty() { + // TODO(#4520): Even if the contract does not include any notes, other parts of the stack expect for this + // function to exist, so we include a dummy version. We likely should error out here instead. + " + unconstrained fn compute_note_hash_and_nullifier( + contract_address: AztecAddress, + nonce: Field, + storage_slot: Field, + note_type_id: Field, + serialized_note: [Field; 20] + ) -> pub [Field; 4] { + [0, 0, 0, 0] + }" + .to_string() + } else { + // For contracts that include notes we do a simple if-else chain comparing note_type_id with the different + // get_note_type_id of each of the note types. + + let if_statements: Vec = note_types.iter().map(|note_type| format!( + "if (note_type_id == {0}::get_note_type_id()) {{ + note_utils::compute_note_hash_and_nullifier({0}::deserialize_content, note_header, serialized_note) + }}" + , note_type)).collect(); + + // TODO(#4520): error out on the else instead of returning a zero array + let full_if_statement = if_statements.join(" else ") + + " + else { + [0, 0, 0, 0] + }"; + + format!( + " + unconstrained fn compute_note_hash_and_nullifier( + contract_address: AztecAddress, + nonce: Field, + storage_slot: Field, + note_type_id: Field, + serialized_note: [Field; 20] + ) -> pub [Field; 4] {{ + let note_header = NoteHeader::new(contract_address, nonce, storage_slot); + + {} + }}", + full_if_statement + ) + } +} diff --git a/bootstrap.sh b/bootstrap.sh index 5ebe7ade090..54129c3d61a 100755 --- a/bootstrap.sh +++ b/bootstrap.sh @@ -15,5 +15,8 @@ if [ -n "$CMD" ]; then fi fi +# Attempt to just pull artefacts from CI and exit on success. +[ -n "${USE_CACHE:-}" ] && ./bootstrap_cache.sh && exit + ./scripts/bootstrap_native.sh -./scripts/bootstrap_packages.sh \ No newline at end of file +./scripts/bootstrap_packages.sh diff --git a/bootstrap_cache.sh b/bootstrap_cache.sh index 672702416bd..1cec6c81d8e 100755 --- a/bootstrap_cache.sh +++ b/bootstrap_cache.sh @@ -9,3 +9,5 @@ extract_repo noir-packages /usr/src/noir/packages ./ echo -e "\033[1mRetrieving nargo from remote cache...\033[0m" extract_repo noir /usr/src/noir/target/release ./target/ +remove_old_images noir-packages +remove_old_images noir diff --git a/compiler/integration-tests/package.json b/compiler/integration-tests/package.json index c4e424df480..798b7c55312 100644 --- a/compiler/integration-tests/package.json +++ b/compiler/integration-tests/package.json @@ -5,8 +5,8 @@ "private": true, "scripts": { "build": "echo Integration Test build step", - "test": "bash ./scripts/codegen-verifiers.sh && yarn test:browser && yarn test:node", - "test:node": "hardhat test test/node/**/*", + "test": "yarn test:browser && yarn test:node", + "test:node": "bash ./scripts/codegen-verifiers.sh && hardhat test test/node/**/*", "test:browser": "web-test-runner", "test:integration:browser": "web-test-runner test/browser/**/*.test.ts", "test:integration:browser:watch": "web-test-runner test/browser/**/*.test.ts --watch", @@ -19,9 +19,10 @@ "@nomicfoundation/hardhat-chai-matchers": "^2.0.0", "@nomicfoundation/hardhat-ethers": "^3.0.0", "@web/dev-server-esbuild": "^0.3.6", + "@web/dev-server-import-maps": "^0.2.0", "@web/test-runner": "^0.15.3", "@web/test-runner-playwright": "^0.10.0", - "eslint": "^8.50.0", + "eslint": "^8.56.0", "eslint-plugin-prettier": "^5.0.0", "ethers": "^6.7.1", "hardhat": "^2.17.4", diff --git a/compiler/integration-tests/test/browser/compile_prove_verify.test.ts b/compiler/integration-tests/test/browser/compile_prove_verify.test.ts index 0a829def09e..dba51895bb8 100644 --- a/compiler/integration-tests/test/browser/compile_prove_verify.test.ts +++ b/compiler/integration-tests/test/browser/compile_prove_verify.test.ts @@ -59,11 +59,11 @@ test_cases.forEach((testInfo) => { // JS Proving - const proofWithPublicInputs = await program.generateFinalProof(inputs); + const proofWithPublicInputs = await program.generateProof(inputs); // JS verification - const verified = await program.verifyFinalProof(proofWithPublicInputs); + const verified = await program.verifyProof(proofWithPublicInputs); expect(verified, 'Proof fails verification in JS').to.be.true; }); diff --git a/compiler/integration-tests/test/browser/recursion.test.ts b/compiler/integration-tests/test/browser/recursion.test.ts index 80199de5701..a8927aa6a75 100644 --- a/compiler/integration-tests/test/browser/recursion.test.ts +++ b/compiler/integration-tests/test/browser/recursion.test.ts @@ -19,7 +19,7 @@ await newABICoder(); await initACVM(); const base_relative_path = '../../../../..'; -const circuit_main = 'test_programs/execution_success/assert_statement'; +const circuit_main = 'test_programs/execution_success/assert_statement_recursive'; const circuit_recursion = 'compiler/integration-tests/circuits/recursion'; async function getCircuit(projectPath: string) { @@ -48,15 +48,15 @@ describe('It compiles noir program code, receiving circuit bytes and abi object. const { witness: main_witnessUint8Array } = await new Noir(main_program).execute(main_inputs); - const main_proof = await main_backend.generateIntermediateProof(main_witnessUint8Array); - const main_verification = await main_backend.verifyIntermediateProof(main_proof); + const main_proof = await main_backend.generateProof(main_witnessUint8Array); + const main_verification = await main_backend.verifyProof(main_proof); logger.debug('main_verification', main_verification); expect(main_verification).to.be.true; const numPublicInputs = 1; - const { proofAsFields, vkAsFields, vkHash } = await main_backend.generateIntermediateProofArtifacts( + const { proofAsFields, vkAsFields, vkHash } = await main_backend.generateRecursiveProofArtifacts( main_proof, numPublicInputs, ); @@ -76,20 +76,20 @@ describe('It compiles noir program code, receiving circuit bytes and abi object. const { witness: recursion_witnessUint8Array } = await new Noir(recursion_program).execute(recursion_inputs); - const recursion_proof = await recursion_backend.generateFinalProof(recursion_witnessUint8Array); + const recursion_proof = await recursion_backend.generateProof(recursion_witnessUint8Array); // Causes an "unreachable" error. // Due to the fact that it's a non-recursive proof? // // const recursion_numPublicInputs = 1; - // const { proofAsFields: recursion_proofAsFields } = await recursion_backend.generateIntermediateProofArtifacts( + // const { proofAsFields: recursion_proofAsFields } = await recursion_backend.generateRecursiveProofArtifacts( // recursion_proof, // recursion_numPublicInputs, // ); // // logger.debug('recursion_proofAsFields', recursion_proofAsFields); - const recursion_verification = await recursion_backend.verifyFinalProof(recursion_proof); + const recursion_verification = await recursion_backend.verifyProof(recursion_proof); logger.debug('recursion_verification', recursion_verification); diff --git a/compiler/integration-tests/test/mocks/os.js b/compiler/integration-tests/test/mocks/os.js new file mode 100644 index 00000000000..32333568316 --- /dev/null +++ b/compiler/integration-tests/test/mocks/os.js @@ -0,0 +1 @@ +export const os = { cpus: () => new Array(4) }; diff --git a/compiler/integration-tests/test/node/onchain_recursive_verification.test.ts b/compiler/integration-tests/test/node/onchain_recursive_verification.test.ts index 9cdd80edc15..6147f770f16 100644 --- a/compiler/integration-tests/test/node/onchain_recursive_verification.test.ts +++ b/compiler/integration-tests/test/node/onchain_recursive_verification.test.ts @@ -16,7 +16,7 @@ it(`smart contract can verify a recursive proof`, async () => { const fm = createFileManager(basePath); const innerCompilationResult = await compile( fm, - join(basePath, './test_programs/execution_success/assert_statement'), + join(basePath, './test_programs/execution_success/assert_statement_recursive'), ); if (!('program' in innerCompilationResult)) { throw new Error('Compilation failed'); @@ -38,17 +38,17 @@ it(`smart contract can verify a recursive proof`, async () => { const inner = new Noir(innerProgram); const inner_prover_toml = readFileSync( - join(basePath, `./test_programs/execution_success/assert_statement/Prover.toml`), + join(basePath, `./test_programs/execution_success/assert_statement_recursive/Prover.toml`), ).toString(); const inner_inputs = toml.parse(inner_prover_toml); const { witness: main_witness } = await inner.execute(inner_inputs); - const intermediate_proof = await inner_backend.generateIntermediateProof(main_witness); + const intermediate_proof = await inner_backend.generateProof(main_witness); - expect(await inner_backend.verifyIntermediateProof(intermediate_proof)).to.be.true; + expect(await inner_backend.verifyProof(intermediate_proof)).to.be.true; - const { proofAsFields, vkAsFields, vkHash } = await inner_backend.generateIntermediateProofArtifacts( + const { proofAsFields, vkAsFields, vkHash } = await inner_backend.generateRecursiveProofArtifacts( intermediate_proof, 1, // 1 public input ); @@ -65,8 +65,8 @@ it(`smart contract can verify a recursive proof`, async () => { key_hash: vkHash, }; - const recursion_proof = await recursion.generateFinalProof(recursion_inputs); - expect(await recursion.verifyFinalProof(recursion_proof)).to.be.true; + const recursion_proof = await recursion.generateProof(recursion_inputs); + expect(await recursion.verifyProof(recursion_proof)).to.be.true; // Smart contract verification diff --git a/compiler/integration-tests/test/node/smart_contract_verifier.test.ts b/compiler/integration-tests/test/node/smart_contract_verifier.test.ts index d870956ea7a..79a0520da32 100644 --- a/compiler/integration-tests/test/node/smart_contract_verifier.test.ts +++ b/compiler/integration-tests/test/node/smart_contract_verifier.test.ts @@ -46,11 +46,11 @@ test_cases.forEach((testInfo) => { const prover_toml = readFileSync(resolve(`${base_relative_path}/${test_case}/Prover.toml`)).toString(); const inputs = toml.parse(prover_toml); - const proofData = await program.generateFinalProof(inputs); + const proofData = await program.generateProof(inputs); // JS verification - const verified = await program.verifyFinalProof(proofData); + const verified = await program.verifyProof(proofData); expect(verified, 'Proof fails verification in JS').to.be.true; // Smart contract verification diff --git a/compiler/integration-tests/web-test-runner.config.mjs b/compiler/integration-tests/web-test-runner.config.mjs index 665ea262f99..4dfc96dd0a6 100644 --- a/compiler/integration-tests/web-test-runner.config.mjs +++ b/compiler/integration-tests/web-test-runner.config.mjs @@ -2,7 +2,8 @@ import { defaultReporter } from '@web/test-runner'; import { summaryReporter } from '@web/test-runner'; import { fileURLToPath } from 'url'; import { esbuildPlugin } from '@web/dev-server-esbuild'; -import { playwrightLauncher } from "@web/test-runner-playwright"; +import { playwrightLauncher } from '@web/test-runner-playwright'; +import { importMapsPlugin } from '@web/dev-server-import-maps'; let reporter = summaryReporter(); const debugPlugins = []; @@ -21,7 +22,7 @@ if (process.env.CI !== 'true' || process.env.RUNNER_DEBUG === '1') { export default { browsers: [ - playwrightLauncher({ product: "chromium" }), + playwrightLauncher({ product: 'chromium' }), // playwrightLauncher({ product: "webkit" }), // playwrightLauncher({ product: "firefox" }), ], @@ -29,6 +30,16 @@ export default { esbuildPlugin({ ts: true, }), + importMapsPlugin({ + inject: { + importMap: { + imports: { + // mock os module + os: '/test/mocks/os.js', + }, + }, + }, + }), ...debugPlugins, ], files: ['test/browser/**/*.test.ts'], diff --git a/compiler/noirc_driver/Cargo.toml b/compiler/noirc_driver/Cargo.toml index eb9650e8aec..681976735f3 100644 --- a/compiler/noirc_driver/Cargo.toml +++ b/compiler/noirc_driver/Cargo.toml @@ -23,5 +23,7 @@ serde.workspace = true fxhash.workspace = true rust-embed.workspace = true tracing.workspace = true +thiserror.workspace = true aztec_macros = { path = "../../aztec_macros" } +noirc_macros = { path = "../../noirc_macros" } diff --git a/compiler/noirc_driver/src/lib.rs b/compiler/noirc_driver/src/lib.rs index 6fd69f8b576..11f53cdb749 100644 --- a/compiler/noirc_driver/src/lib.rs +++ b/compiler/noirc_driver/src/lib.rs @@ -3,7 +3,7 @@ #![warn(unreachable_pub)] #![warn(clippy::semicolon_if_nothing_returned)] -use acvm::ExpressionWidth; +use acvm::acir::circuit::ExpressionWidth; use clap::Args; use fm::{FileId, FileManager}; use iter_extended::vecmap; @@ -11,13 +11,15 @@ use noirc_abi::{AbiParameter, AbiType, ContractEvent}; use noirc_errors::{CustomDiagnostic, FileDiagnostic}; use noirc_evaluator::create_circuit; use noirc_evaluator::errors::RuntimeError; +use noirc_frontend::debug::build_debug_crate_file; use noirc_frontend::graph::{CrateId, CrateName}; use noirc_frontend::hir::def_map::{Contract, CrateDefMap}; use noirc_frontend::hir::Context; use noirc_frontend::macros_api::MacroProcessor; -use noirc_frontend::monomorphization::monomorphize; +use noirc_frontend::monomorphization::{monomorphize, monomorphize_debug, MonomorphizationError}; use noirc_frontend::node_interner::FuncId; use std::path::Path; +use thiserror::Error; use tracing::info; mod abi_gen; @@ -33,6 +35,7 @@ pub use debug::DebugFile; pub use program::CompiledProgram; const STD_CRATE_NAME: &str = "std"; +const DEBUG_CRATE_NAME: &str = "__debug"; pub const GIT_COMMIT: &str = env!("GIT_COMMIT"); pub const GIT_DIRTY: &str = env!("GIT_DIRTY"); @@ -76,23 +79,51 @@ pub struct CompileOptions { #[arg(long, hide = true)] pub only_acir: bool, - /// Disables the builtin macros being used in the compiler + /// Disables the builtin Aztec macros being used in the compiler #[arg(long, hide = true)] pub disable_macros: bool, /// Outputs the monomorphized IR to stdout for debugging #[arg(long, hide = true)] pub show_monomorphized: bool, + + /// Insert debug symbols to inspect variables + #[arg(long, hide = true)] + pub instrument_debug: bool, + + /// Force Brillig output (for step debugging) + #[arg(long, hide = true)] + pub force_brillig: bool, } fn parse_expression_width(input: &str) -> Result { use std::io::{Error, ErrorKind}; - let width = input .parse::() .map_err(|err| Error::new(ErrorKind::InvalidInput, err.to_string()))?; - Ok(ExpressionWidth::from(width)) + match width { + 0 => Ok(ExpressionWidth::Unbounded), + _ => Ok(ExpressionWidth::Bounded { width }), + } +} + +#[derive(Debug, Error)] +pub enum CompileError { + #[error(transparent)] + MonomorphizationError(#[from] MonomorphizationError), + + #[error(transparent)] + RuntimeError(#[from] RuntimeError), +} + +impl From for FileDiagnostic { + fn from(error: CompileError) -> FileDiagnostic { + match error { + CompileError::RuntimeError(err) => err.into(), + CompileError::MonomorphizationError(err) => err.into(), + } + } } /// Helper type used to signify where only warnings are expected in file diagnostics @@ -113,6 +144,7 @@ pub fn file_manager_with_stdlib(root: &Path) -> FileManager { let mut file_manager = FileManager::new(root); add_stdlib_source_to_file_manager(&mut file_manager); + add_debug_source_to_file_manager(&mut file_manager); file_manager } @@ -129,6 +161,15 @@ fn add_stdlib_source_to_file_manager(file_manager: &mut FileManager) { } } +/// Adds the source code of the debug crate needed to support instrumentation to +/// track variables values +fn add_debug_source_to_file_manager(file_manager: &mut FileManager) { + // Adds the synthetic debug module for instrumentation into the file manager + let path_to_debug_lib_file = Path::new(DEBUG_CRATE_NAME).join("lib.nr"); + file_manager + .add_file_with_source_canonical_path(&path_to_debug_lib_file, build_debug_crate_file()); +} + /// Adds the file from the file system at `Path` to the crate graph as a root file /// /// Note: This methods adds the stdlib as a dependency to the crate. @@ -150,6 +191,12 @@ pub fn prepare_crate(context: &mut Context, file_name: &Path) -> CrateId { root_crate_id } +pub fn link_to_debug_crate(context: &mut Context, root_crate_id: CrateId) { + let path_to_debug_lib_file = Path::new(DEBUG_CRATE_NAME).join("lib.nr"); + let debug_crate_id = prepare_dependency(context, &path_to_debug_lib_file); + add_dep(context, root_crate_id, debug_crate_id, DEBUG_CRATE_NAME.parse().unwrap()); +} + // Adds the file from the file system at `Path` to the crate graph pub fn prepare_dependency(context: &mut Context, file_name: &Path) -> CrateId { let root_file_id = context @@ -191,9 +238,12 @@ pub fn check_crate( disable_macros: bool, ) -> CompilationResult<()> { let macros: Vec<&dyn MacroProcessor> = if disable_macros { - vec![] + vec![&noirc_macros::AssertMessageMacro as &dyn MacroProcessor] } else { - vec![&aztec_macros::AztecMacro as &dyn MacroProcessor] + vec![ + &aztec_macros::AztecMacro as &dyn MacroProcessor, + &noirc_macros::AssertMessageMacro as &dyn MacroProcessor, + ] }; let mut errors = vec![]; @@ -244,6 +294,7 @@ pub fn compile_main( let compiled_program = compile_no_check(context, options, main, cached_program, options.force_compile) .map_err(FileDiagnostic::from)?; + let compilation_warnings = vecmap(compiled_program.warnings.clone(), FileDiagnostic::from); if options.deny_warnings && !compilation_warnings.is_empty() { return Err(compilation_warnings); @@ -323,7 +374,7 @@ fn has_errors(errors: &[FileDiagnostic], deny_warnings: bool) -> bool { /// Compile all of the functions associated with a Noir contract. fn compile_contract_inner( - context: &Context, + context: &mut Context, contract: Contract, options: &CompileOptions, ) -> Result { @@ -399,13 +450,17 @@ fn compile_contract_inner( /// This function assumes [`check_crate`] is called beforehand. #[tracing::instrument(level = "trace", skip_all, fields(function_name = context.function_name(&main_function)))] pub fn compile_no_check( - context: &Context, + context: &mut Context, options: &CompileOptions, main_function: FuncId, cached_program: Option, force_compile: bool, -) -> Result { - let program = monomorphize(main_function, &context.def_interner); +) -> Result { + let program = if options.instrument_debug { + monomorphize_debug(main_function, &mut context.def_interner, &context.debug_instrumenter)? + } else { + monomorphize(main_function, &mut context.def_interner)? + }; let hash = fxhash::hash64(&program); let hashes_match = cached_program.as_ref().map_or(false, |program| program.hash == hash); @@ -424,7 +479,7 @@ pub fn compile_no_check( } let visibility = program.return_visibility; let (circuit, debug, input_witnesses, return_witnesses, warnings) = - create_circuit(program, options.show_ssa, options.show_brillig)?; + create_circuit(program, options.show_ssa, options.show_brillig, options.force_brillig)?; let abi = abi_gen::gen_abi(context, &main_function, input_witnesses, return_witnesses, visibility); diff --git a/compiler/noirc_errors/Cargo.toml b/compiler/noirc_errors/Cargo.toml index 935137ba2fc..da18399971e 100644 --- a/compiler/noirc_errors/Cargo.toml +++ b/compiler/noirc_errors/Cargo.toml @@ -13,6 +13,7 @@ codespan-reporting.workspace = true codespan.workspace = true fm.workspace = true chumsky.workspace = true +noirc_printable_type.workspace = true serde.workspace = true serde_with = "3.2.0" tracing.workspace = true diff --git a/compiler/noirc_errors/src/debug_info.rs b/compiler/noirc_errors/src/debug_info.rs index ffca8fbf2e1..67ec851d46d 100644 --- a/compiler/noirc_errors/src/debug_info.rs +++ b/compiler/noirc_errors/src/debug_info.rs @@ -16,10 +16,26 @@ use std::io::Write; use std::mem; use crate::Location; +use noirc_printable_type::PrintableType; use serde::{ de::Error as DeserializationError, ser::Error as SerializationError, Deserialize, Serialize, }; +#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash, PartialOrd, Ord, Deserialize, Serialize)] +pub struct DebugVarId(pub u32); + +#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash, PartialOrd, Ord, Deserialize, Serialize)] +pub struct DebugTypeId(pub u32); + +#[derive(Debug, Clone, Hash, Deserialize, Serialize)] +pub struct DebugVariable { + pub name: String, + pub debug_type_id: DebugTypeId, +} + +pub type DebugVariables = BTreeMap; +pub type DebugTypes = BTreeMap; + #[serde_as] #[derive(Default, Debug, Clone, Deserialize, Serialize)] pub struct DebugInfo { @@ -28,6 +44,8 @@ pub struct DebugInfo { /// that they should be serialized to/from strings. #[serde_as(as = "BTreeMap")] pub locations: BTreeMap>, + pub variables: DebugVariables, + pub types: DebugTypes, } /// Holds OpCodes Counts for Acir and Brillig Opcodes @@ -39,8 +57,12 @@ pub struct OpCodesCount { } impl DebugInfo { - pub fn new(locations: BTreeMap>) -> Self { - DebugInfo { locations } + pub fn new( + locations: BTreeMap>, + variables: DebugVariables, + types: DebugTypes, + ) -> Self { + Self { locations, variables, types } } /// Updates the locations map when the [`Circuit`][acvm::acir::circuit::Circuit] is modified. @@ -68,7 +90,7 @@ impl DebugInfo { for (opcode_location, locations) in self.locations.iter() { for location in locations.iter() { - let opcodes = accumulator.entry(*location).or_insert(Vec::new()); + let opcodes = accumulator.entry(*location).or_default(); opcodes.push(opcode_location); } } diff --git a/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_black_box.rs b/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_black_box.rs index 96d80cb8131..d542240a40c 100644 --- a/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_black_box.rs +++ b/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_black_box.rs @@ -56,12 +56,12 @@ pub(crate) fn convert_black_box_call( } BlackBoxFunc::Keccak256 => { if let ( - [message, BrilligVariable::Simple(array_size)], + [message, BrilligVariable::SingleAddr(array_size)], [BrilligVariable::BrilligArray(result_array)], ) = (function_arguments, function_results) { let mut message_vector = convert_array_or_vector(brillig_context, message, bb_func); - message_vector.size = *array_size; + message_vector.size = array_size.address; brillig_context.black_box_op_instruction(BlackBoxOp::Keccak256 { message: message_vector.to_heap_vector(), @@ -88,7 +88,7 @@ pub(crate) fn convert_black_box_call( BlackBoxFunc::EcdsaSecp256k1 => { if let ( [BrilligVariable::BrilligArray(public_key_x), BrilligVariable::BrilligArray(public_key_y), BrilligVariable::BrilligArray(signature), message], - [BrilligVariable::Simple(result_register)], + [BrilligVariable::SingleAddr(result_register)], ) = (function_arguments, function_results) { let message_hash_vector = @@ -98,7 +98,7 @@ pub(crate) fn convert_black_box_call( public_key_x: public_key_x.to_heap_array(), public_key_y: public_key_y.to_heap_array(), signature: signature.to_heap_array(), - result: *result_register, + result: result_register.address, }); } else { unreachable!( @@ -109,7 +109,7 @@ pub(crate) fn convert_black_box_call( BlackBoxFunc::EcdsaSecp256r1 => { if let ( [BrilligVariable::BrilligArray(public_key_x), BrilligVariable::BrilligArray(public_key_y), BrilligVariable::BrilligArray(signature), message], - [BrilligVariable::Simple(result_register)], + [BrilligVariable::SingleAddr(result_register)], ) = (function_arguments, function_results) { let message_hash_vector = @@ -119,7 +119,7 @@ pub(crate) fn convert_black_box_call( public_key_x: public_key_x.to_heap_array(), public_key_y: public_key_y.to_heap_array(), signature: signature.to_heap_array(), - result: *result_register, + result: result_register.address, }); } else { unreachable!( @@ -130,14 +130,14 @@ pub(crate) fn convert_black_box_call( BlackBoxFunc::PedersenCommitment => { if let ( - [message, BrilligVariable::Simple(domain_separator)], + [message, BrilligVariable::SingleAddr(domain_separator)], [BrilligVariable::BrilligArray(result_array)], ) = (function_arguments, function_results) { let message_vector = convert_array_or_vector(brillig_context, message, bb_func); brillig_context.black_box_op_instruction(BlackBoxOp::PedersenCommitment { inputs: message_vector.to_heap_vector(), - domain_separator: *domain_separator, + domain_separator: domain_separator.address, output: result_array.to_heap_array(), }); } else { @@ -146,15 +146,15 @@ pub(crate) fn convert_black_box_call( } BlackBoxFunc::PedersenHash => { if let ( - [message, BrilligVariable::Simple(domain_separator)], - [BrilligVariable::Simple(result)], + [message, BrilligVariable::SingleAddr(domain_separator)], + [BrilligVariable::SingleAddr(result)], ) = (function_arguments, function_results) { let message_vector = convert_array_or_vector(brillig_context, message, bb_func); brillig_context.black_box_op_instruction(BlackBoxOp::PedersenHash { inputs: message_vector.to_heap_vector(), - domain_separator: *domain_separator, - output: *result, + domain_separator: domain_separator.address, + output: result.address, }); } else { unreachable!("ICE: Pedersen hash expects one array argument, a register for the domain separator, and one register result") @@ -162,18 +162,18 @@ pub(crate) fn convert_black_box_call( } BlackBoxFunc::SchnorrVerify => { if let ( - [BrilligVariable::Simple(public_key_x), BrilligVariable::Simple(public_key_y), BrilligVariable::BrilligArray(signature), message], - [BrilligVariable::Simple(result_register)], + [BrilligVariable::SingleAddr(public_key_x), BrilligVariable::SingleAddr(public_key_y), BrilligVariable::BrilligArray(signature), message], + [BrilligVariable::SingleAddr(result_register)], ) = (function_arguments, function_results) { let message_hash = convert_array_or_vector(brillig_context, message, bb_func); let signature = brillig_context.array_to_vector(signature); brillig_context.black_box_op_instruction(BlackBoxOp::SchnorrVerify { - public_key_x: *public_key_x, - public_key_y: *public_key_y, + public_key_x: public_key_x.address, + public_key_y: public_key_y.address, message: message_hash.to_heap_vector(), signature: signature.to_heap_vector(), - result: *result_register, + result: result_register.address, }); } else { unreachable!("ICE: Schnorr verify expects two registers for the public key, an array for signature, an array for the message hash and one result register") @@ -181,13 +181,13 @@ pub(crate) fn convert_black_box_call( } BlackBoxFunc::FixedBaseScalarMul => { if let ( - [BrilligVariable::Simple(low), BrilligVariable::Simple(high)], + [BrilligVariable::SingleAddr(low), BrilligVariable::SingleAddr(high)], [BrilligVariable::BrilligArray(result_array)], ) = (function_arguments, function_results) { brillig_context.black_box_op_instruction(BlackBoxOp::FixedBaseScalarMul { - low: *low, - high: *high, + low: low.address, + high: high.address, result: result_array.to_heap_array(), }); } else { @@ -198,15 +198,15 @@ pub(crate) fn convert_black_box_call( } BlackBoxFunc::EmbeddedCurveAdd => { if let ( - [BrilligVariable::Simple(input1_x), BrilligVariable::Simple(input1_y), BrilligVariable::Simple(input2_x), BrilligVariable::Simple(input2_y)], + [BrilligVariable::SingleAddr(input1_x), BrilligVariable::SingleAddr(input1_y), BrilligVariable::SingleAddr(input2_x), BrilligVariable::SingleAddr(input2_y)], [BrilligVariable::BrilligArray(result_array)], ) = (function_arguments, function_results) { brillig_context.black_box_op_instruction(BlackBoxOp::EmbeddedCurveAdd { - input1_x: *input1_x, - input1_y: *input1_y, - input2_x: *input2_x, - input2_y: *input2_y, + input1_x: input1_x.address, + input1_y: input1_y.address, + input2_x: input2_x.address, + input2_y: input2_y.address, result: result_array.to_heap_array(), }); } else { @@ -229,14 +229,14 @@ pub(crate) fn convert_black_box_call( ), BlackBoxFunc::BigIntAdd => { if let ( - [BrilligVariable::Simple(lhs), BrilligVariable::Simple(rhs)], - [BrilligVariable::Simple(output)], + [BrilligVariable::SingleAddr(lhs), BrilligVariable::SingleAddr(rhs)], + [BrilligVariable::SingleAddr(output)], ) = (function_arguments, function_results) { brillig_context.black_box_op_instruction(BlackBoxOp::BigIntAdd { - lhs: *lhs, - rhs: *rhs, - output: *output, + lhs: lhs.address, + rhs: rhs.address, + output: output.address, }); } else { unreachable!( @@ -244,16 +244,16 @@ pub(crate) fn convert_black_box_call( ) } } - BlackBoxFunc::BigIntNeg => { + BlackBoxFunc::BigIntSub => { if let ( - [BrilligVariable::Simple(lhs), BrilligVariable::Simple(rhs)], - [BrilligVariable::Simple(output)], + [BrilligVariable::SingleAddr(lhs), BrilligVariable::SingleAddr(rhs)], + [BrilligVariable::SingleAddr(output)], ) = (function_arguments, function_results) { - brillig_context.black_box_op_instruction(BlackBoxOp::BigIntNeg { - lhs: *lhs, - rhs: *rhs, - output: *output, + brillig_context.black_box_op_instruction(BlackBoxOp::BigIntSub { + lhs: lhs.address, + rhs: rhs.address, + output: output.address, }); } else { unreachable!( @@ -263,14 +263,14 @@ pub(crate) fn convert_black_box_call( } BlackBoxFunc::BigIntMul => { if let ( - [BrilligVariable::Simple(lhs), BrilligVariable::Simple(rhs)], - [BrilligVariable::Simple(output)], + [BrilligVariable::SingleAddr(lhs), BrilligVariable::SingleAddr(rhs)], + [BrilligVariable::SingleAddr(output)], ) = (function_arguments, function_results) { brillig_context.black_box_op_instruction(BlackBoxOp::BigIntMul { - lhs: *lhs, - rhs: *rhs, - output: *output, + lhs: lhs.address, + rhs: rhs.address, + output: output.address, }); } else { unreachable!( @@ -280,14 +280,14 @@ pub(crate) fn convert_black_box_call( } BlackBoxFunc::BigIntDiv => { if let ( - [BrilligVariable::Simple(lhs), BrilligVariable::Simple(rhs)], - [BrilligVariable::Simple(output)], + [BrilligVariable::SingleAddr(lhs), BrilligVariable::SingleAddr(rhs)], + [BrilligVariable::SingleAddr(output)], ) = (function_arguments, function_results) { brillig_context.black_box_op_instruction(BlackBoxOp::BigIntDiv { - lhs: *lhs, - rhs: *rhs, - output: *output, + lhs: lhs.address, + rhs: rhs.address, + output: output.address, }); } else { unreachable!( @@ -296,7 +296,7 @@ pub(crate) fn convert_black_box_call( } } BlackBoxFunc::BigIntFromLeBytes => { - if let ([inputs, modulus], [BrilligVariable::Simple(output)]) = + if let ([inputs, modulus], [BrilligVariable::SingleAddr(output)]) = (function_arguments, function_results) { let inputs_vector = convert_array_or_vector(brillig_context, inputs, bb_func); @@ -304,7 +304,7 @@ pub(crate) fn convert_black_box_call( brillig_context.black_box_op_instruction(BlackBoxOp::BigIntFromLeBytes { inputs: inputs_vector.to_heap_vector(), modulus: modulus_vector.to_heap_vector(), - output: *output, + output: output.address, }); } else { unreachable!( @@ -314,12 +314,12 @@ pub(crate) fn convert_black_box_call( } BlackBoxFunc::BigIntToLeBytes => { if let ( - [BrilligVariable::Simple(input)], + [BrilligVariable::SingleAddr(input)], [BrilligVariable::BrilligVector(result_vector)], ) = (function_arguments, function_results) { brillig_context.black_box_op_instruction(BlackBoxOp::BigIntToLeBytes { - input: *input, + input: input.address, output: result_vector.to_heap_vector(), }); } else { @@ -330,7 +330,7 @@ pub(crate) fn convert_black_box_call( } BlackBoxFunc::Poseidon2Permutation => { if let ( - [message, BrilligVariable::Simple(state_len)], + [message, BrilligVariable::SingleAddr(state_len)], [BrilligVariable::BrilligArray(result_array)], ) = (function_arguments, function_results) { @@ -338,7 +338,7 @@ pub(crate) fn convert_black_box_call( brillig_context.black_box_op_instruction(BlackBoxOp::Poseidon2Permutation { message: message_vector.to_heap_vector(), output: result_array.to_heap_array(), - len: *state_len, + len: state_len.address, }); } else { unreachable!("ICE: Poseidon2Permutation expects one array argument, a length and one array result") diff --git a/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_block.rs b/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_block.rs index b084042981b..c04d8475f08 100644 --- a/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_block.rs +++ b/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_block.rs @@ -1,8 +1,9 @@ -use crate::brillig::brillig_ir::brillig_variable::{BrilligArray, BrilligVariable, BrilligVector}; -use crate::brillig::brillig_ir::{ - BrilligBinaryOp, BrilligContext, BRILLIG_INTEGER_ARITHMETIC_BIT_SIZE, +use crate::brillig::brillig_ir::brillig_variable::{ + type_to_heap_value_type, BrilligArray, BrilligVariable, BrilligVector, SingleAddrVariable, }; +use crate::brillig::brillig_ir::{BrilligBinaryOp, BrilligContext}; use crate::ssa::ir::dfg::CallStack; +use crate::ssa::ir::instruction::ConstrainError; use crate::ssa::ir::{ basic_block::{BasicBlock, BasicBlockId}, dfg::DataFlowGraph, @@ -13,7 +14,7 @@ use crate::ssa::ir::{ types::{NumericType, Type}, value::{Value, ValueId}, }; -use acvm::acir::brillig::{BinaryFieldOp, BinaryIntOp, RegisterIndex, RegisterOrMemory}; +use acvm::acir::brillig::{BinaryFieldOp, BinaryIntOp, MemoryAddress, ValueOrArray}; use acvm::brillig_vm::brillig::HeapVector; use acvm::FieldElement; use fxhash::{FxHashMap as HashMap, FxHashSet as HashSet}; @@ -22,7 +23,7 @@ use num_bigint::BigUint; use super::brillig_black_box::convert_black_box_call; use super::brillig_block_variables::BlockVariables; -use super::brillig_fn::FunctionContext; +use super::brillig_fn::{get_bit_size_from_ssa_type, FunctionContext}; /// Generate the compilation artifacts for compiling a function into brillig bytecode. pub(crate) struct BrilligBlock<'block> { @@ -85,16 +86,6 @@ impl<'block> BrilligBlock<'block> { self.convert_ssa_terminator(terminator_instruction, dfg); } - fn get_bit_size_from_ssa_type(typ: Type) -> u32 { - match typ { - Type::Numeric(num_type) => match num_type { - NumericType::Signed { bit_size } | NumericType::Unsigned { bit_size } => bit_size, - NumericType::NativeField => FieldElement::max_num_bits(), - }, - _ => unreachable!("ICE bitwise not on a non numeric type"), - } - } - /// Creates a unique global label for a block. /// /// This uses the current functions's function ID and the block ID @@ -124,9 +115,9 @@ impl<'block> BrilligBlock<'block> { ) { match terminator_instruction { TerminatorInstruction::JmpIf { condition, then_destination, else_destination } => { - let condition = self.convert_ssa_register_value(*condition, dfg); + let condition = self.convert_ssa_single_addr_value(*condition, dfg); self.brillig_context.jump_if_instruction( - condition, + condition.address, self.create_block_label_for_current_function(*then_destination), ); self.brillig_context.jump_instruction( @@ -171,10 +162,10 @@ impl<'block> BrilligBlock<'block> { fn pass_variable(&mut self, source: BrilligVariable, destination: BrilligVariable) { match (source, destination) { ( - BrilligVariable::Simple(source_register), - BrilligVariable::Simple(destination_register), + BrilligVariable::SingleAddr(source_var), + BrilligVariable::SingleAddr(destination_var), ) => { - self.brillig_context.mov_instruction(destination_register, source_register); + self.brillig_context.mov_instruction(destination_var.address, source_var.address); } ( BrilligVariable::BrilligArray(BrilligArray { @@ -248,16 +239,19 @@ impl<'block> BrilligBlock<'block> { match instruction { Instruction::Binary(binary) => { - let result_register = self.variables.define_register_variable( + let result_var = self.variables.define_single_addr_variable( self.function_context, self.brillig_context, dfg.instruction_results(instruction_id)[0], dfg, ); - self.convert_ssa_binary(binary, dfg, result_register); + self.convert_ssa_binary(binary, dfg, result_var); } Instruction::Constrain(lhs, rhs, assert_message) => { - let condition = self.brillig_context.allocate_register(); + let condition = SingleAddrVariable { + address: self.brillig_context.allocate_register(), + bit_size: 1, + }; self.convert_ssa_binary( &Binary { lhs: *lhs, rhs: *rhs, operator: BinaryOp::Eq }, @@ -265,12 +259,35 @@ impl<'block> BrilligBlock<'block> { condition, ); - self.brillig_context.constrain_instruction(condition, assert_message.clone()); - self.brillig_context.deallocate_register(condition); + let assert_message = if let Some(error) = assert_message { + match error.as_ref() { + ConstrainError::Static(string) => Some(string.clone()), + ConstrainError::Dynamic(call_instruction) => { + let Instruction::Call { func, arguments } = call_instruction else { + unreachable!("expected a call instruction") + }; + + let Value::Function(func_id) = &dfg[*func] else { + unreachable!("expected a function value") + }; + + self.convert_ssa_function_call(*func_id, arguments, dfg, &[]); + + // Dynamic assert messages are handled in the generated function call. + // We then don't need to attach one to the constrain instruction. + None + } + } + } else { + None + }; + + self.brillig_context.constrain_instruction(condition.address, assert_message); + self.brillig_context.deallocate_register(condition.address); } Instruction::Allocate => { let result_value = dfg.instruction_results(instruction_id)[0]; - let address_register = self.variables.define_register_variable( + let address_register = self.variables.define_single_addr_variable( self.function_context, self.brillig_context, result_value, @@ -280,15 +297,16 @@ impl<'block> BrilligBlock<'block> { Type::Reference(element) => match *element { Type::Array(..) => { self.brillig_context - .allocate_array_reference_instruction(address_register); + .allocate_array_reference_instruction(address_register.address); } Type::Slice(..) => { self.brillig_context - .allocate_vector_reference_instruction(address_register); + .allocate_vector_reference_instruction(address_register.address); } _ => { - self.brillig_context - .allocate_simple_reference_instruction(address_register); + self.brillig_context.allocate_single_addr_reference_instruction( + address_register.address, + ); } }, _ => { @@ -297,10 +315,11 @@ impl<'block> BrilligBlock<'block> { } } Instruction::Store { address, value } => { - let address_register = self.convert_ssa_register_value(*address, dfg); + let address_var = self.convert_ssa_single_addr_value(*address, dfg); let source_variable = self.convert_ssa_value(*value, dfg); - self.brillig_context.store_variable_instruction(address_register, source_variable); + self.brillig_context + .store_variable_instruction(address_var.address, source_variable); } Instruction::Load { address } => { let target_variable = self.variables.define_variable( @@ -310,48 +329,55 @@ impl<'block> BrilligBlock<'block> { dfg, ); - let address_register = self.convert_ssa_register_value(*address, dfg); + let address_variable = self.convert_ssa_single_addr_value(*address, dfg); - self.brillig_context.load_variable_instruction(target_variable, address_register); + self.brillig_context + .load_variable_instruction(target_variable, address_variable.address); } Instruction::Not(value) => { - let condition_register = self.convert_ssa_register_value(*value, dfg); - let result_register = self.variables.define_register_variable( + let condition_register = self.convert_ssa_single_addr_value(*value, dfg); + let result_register = self.variables.define_single_addr_variable( self.function_context, self.brillig_context, dfg.instruction_results(instruction_id)[0], dfg, ); - let bit_size = Self::get_bit_size_from_ssa_type(dfg.type_of_value(*value)); - self.brillig_context.not_instruction(condition_register, bit_size, result_register); + self.brillig_context.not_instruction(condition_register, result_register); } Instruction::Call { func, arguments } => match &dfg[*func] { Value::ForeignFunction(func_name) => { let result_ids = dfg.instruction_results(instruction_id); let input_registers = vecmap(arguments, |value_id| { - self.convert_ssa_value(*value_id, dfg).to_register_or_memory() + self.convert_ssa_value(*value_id, dfg).to_value_or_array() + }); + let input_value_types = vecmap(arguments, |value_id| { + let value_type = dfg.type_of_value(*value_id); + type_to_heap_value_type(&value_type) }); let output_registers = vecmap(result_ids, |value_id| { - self.allocate_external_call_result(*value_id, dfg).to_register_or_memory() + self.allocate_external_call_result(*value_id, dfg).to_value_or_array() + }); + let output_value_types = vecmap(result_ids, |value_id| { + let value_type = dfg.type_of_value(*value_id); + type_to_heap_value_type(&value_type) }); self.brillig_context.foreign_call_instruction( func_name.to_owned(), &input_registers, + &input_value_types, &output_registers, + &output_value_types, ); for (i, output_register) in output_registers.iter().enumerate() { - if let RegisterOrMemory::HeapVector(HeapVector { size, .. }) = - output_register - { + if let ValueOrArray::HeapVector(HeapVector { size, .. }) = output_register { // Update the stack pointer so that we do not overwrite // dynamic memory returned from other external calls self.brillig_context.update_stack_pointer(*size); // Update the dynamic slice length maintained in SSA - if let RegisterOrMemory::RegisterIndex(len_index) = - output_registers[i - 1] + if let ValueOrArray::MemoryAddress(len_index) = output_registers[i - 1] { let element_size = dfg[result_ids[i]].get_type().element_size(); self.brillig_context.mov_instruction(len_index, *size); @@ -369,7 +395,8 @@ impl<'block> BrilligBlock<'block> { } } Value::Function(func_id) => { - self.convert_ssa_function_call(*func_id, arguments, dfg, instruction_id); + let result_ids = dfg.instruction_results(instruction_id); + self.convert_ssa_function_call(*func_id, arguments, dfg, result_ids); } Value::Intrinsic(Intrinsic::BlackBox(bb_func)) => { // Slices are represented as a tuple of (length, slice contents). @@ -407,7 +434,7 @@ impl<'block> BrilligBlock<'block> { ); } Value::Intrinsic(Intrinsic::ArrayLen) => { - let result_register = self.variables.define_register_variable( + let result_variable = self.variables.define_single_addr_variable( self.function_context, self.brillig_context, dfg.instruction_results(instruction_id)[0], @@ -419,10 +446,11 @@ impl<'block> BrilligBlock<'block> { // or an array in the case of an array. if let Type::Numeric(_) = dfg.type_of_value(param_id) { let len_variable = self.convert_ssa_value(arguments[0], dfg); - let len_register_index = len_variable.extract_register(); - self.brillig_context.mov_instruction(result_register, len_register_index); + let length = len_variable.extract_single_addr(); + self.brillig_context + .mov_instruction(result_variable.address, length.address); } else { - self.convert_ssa_array_len(arguments[0], result_register, dfg); + self.convert_ssa_array_len(arguments[0], result_variable.address, dfg); } } Value::Intrinsic( @@ -441,13 +469,13 @@ impl<'block> BrilligBlock<'block> { ); } Value::Intrinsic(Intrinsic::ToRadix(endianness)) => { - let source = self.convert_ssa_register_value(arguments[0], dfg); - let radix = self.convert_ssa_register_value(arguments[1], dfg); - let limb_count = self.convert_ssa_register_value(arguments[2], dfg); + let source = self.convert_ssa_single_addr_value(arguments[0], dfg); + let radix = self.convert_ssa_single_addr_value(arguments[1], dfg); + let limb_count = self.convert_ssa_single_addr_value(arguments[2], dfg); let results = dfg.instruction_results(instruction_id); - let target_len = self.variables.define_register_variable( + let target_len = self.variables.define_single_addr_variable( self.function_context, self.brillig_context, results[0], @@ -465,19 +493,19 @@ impl<'block> BrilligBlock<'block> { .extract_vector(); // Update the user-facing slice length - self.brillig_context.mov_instruction(target_len, limb_count); + self.brillig_context.mov_instruction(target_len.address, limb_count.address); self.brillig_context.radix_instruction( - source, + source.address, target_vector, - radix, - limb_count, + radix.address, + limb_count.address, matches!(endianness, Endian::Big), ); } Value::Intrinsic(Intrinsic::ToBits(endianness)) => { - let source = self.convert_ssa_register_value(arguments[0], dfg); - let limb_count = self.convert_ssa_register_value(arguments[1], dfg); + let source = self.convert_ssa_single_addr_value(arguments[0], dfg); + let limb_count = self.convert_ssa_single_addr_value(arguments[1], dfg); let results = dfg.instruction_results(instruction_id); @@ -487,7 +515,7 @@ impl<'block> BrilligBlock<'block> { results[0], dfg, ); - let target_len = target_len_variable.extract_register(); + let target_len = target_len_variable.extract_single_addr(); let target_vector = match self.variables.define_variable( self.function_context, @@ -499,19 +527,21 @@ impl<'block> BrilligBlock<'block> { self.brillig_context.array_to_vector(&array) } BrilligVariable::BrilligVector(vector) => vector, - BrilligVariable::Simple(..) => unreachable!("ICE: ToBits on non-array"), + BrilligVariable::SingleAddr(..) => unreachable!("ICE: ToBits on non-array"), }; - let radix = self.brillig_context.make_constant(2_usize.into()); + let radix = self + .brillig_context + .make_constant(2_usize.into(), FieldElement::max_num_bits()); // Update the user-facing slice length - self.brillig_context.mov_instruction(target_len, limb_count); + self.brillig_context.mov_instruction(target_len.address, limb_count.address); self.brillig_context.radix_instruction( - source, + source.address, target_vector, radix, - limb_count, + limb_count.address, matches!(endianness, Endian::Big), ); @@ -523,13 +553,13 @@ impl<'block> BrilligBlock<'block> { }, Instruction::Truncate { value, bit_size, .. } => { let result_ids = dfg.instruction_results(instruction_id); - let destination_register = self.variables.define_register_variable( + let destination_register = self.variables.define_single_addr_variable( self.function_context, self.brillig_context, result_ids[0], dfg, ); - let source_register = self.convert_ssa_register_value(*value, dfg); + let source_register = self.convert_ssa_single_addr_value(*value, dfg); self.brillig_context.truncate_instruction( destination_register, source_register, @@ -538,14 +568,14 @@ impl<'block> BrilligBlock<'block> { } Instruction::Cast(value, _) => { let result_ids = dfg.instruction_results(instruction_id); - let destination_register = self.variables.define_register_variable( + let destination_variable = self.variables.define_single_addr_variable( self.function_context, self.brillig_context, result_ids[0], dfg, ); - let source_register = self.convert_ssa_register_value(*value, dfg); - self.convert_cast(destination_register, source_register); + let source_variable = self.convert_ssa_single_addr_value(*value, dfg); + self.convert_cast(destination_variable, source_variable); } Instruction::ArrayGet { array, index } => { let result_ids = dfg.instruction_results(instruction_id); @@ -563,17 +593,17 @@ impl<'block> BrilligBlock<'block> { _ => unreachable!("ICE: array get on non-array"), }; - let index_register = self.convert_ssa_register_value(*index, dfg); - self.validate_array_index(array_variable, index_register); + let index_variable = self.convert_ssa_single_addr_value(*index, dfg); + self.validate_array_index(array_variable, index_variable); self.retrieve_variable_from_array( array_pointer, - index_register, + index_variable.address, destination_variable, ); } Instruction::ArraySet { array, index, value, .. } => { let source_variable = self.convert_ssa_value(*array, dfg); - let index_register = self.convert_ssa_register_value(*index, dfg); + let index_register = self.convert_ssa_single_addr_value(*index, dfg); let value_variable = self.convert_ssa_value(*value, dfg); let result_ids = dfg.instruction_results(instruction_id); @@ -588,28 +618,46 @@ impl<'block> BrilligBlock<'block> { self.convert_ssa_array_set( source_variable, destination_variable, - index_register, + index_register.address, value_variable, ); } Instruction::RangeCheck { value, max_bit_size, assert_message } => { - let left = self.convert_ssa_register_value(*value, dfg); - let max = BigUint::from(2_u128).pow(*max_bit_size); - let right = self.brillig_context.allocate_register(); - self.brillig_context.const_instruction( - right, - FieldElement::from_be_bytes_reduce(&max.to_bytes_be()).into(), - ); + let value = self.convert_ssa_single_addr_value(*value, dfg); + // SSA generates redundant range checks. A range check with a max bit size >= value.bit_size will always pass. + if value.bit_size > *max_bit_size { + // Cast original value to field + let left = SingleAddrVariable { + address: self.brillig_context.allocate_register(), + bit_size: FieldElement::max_num_bits(), + }; + self.convert_cast(left, value); - let brillig_binary_op = BrilligBinaryOp::Integer { - op: BinaryIntOp::LessThan, - bit_size: max_bit_size + 1, - }; - let condition = self.brillig_context.allocate_register(); - self.brillig_context.binary_instruction(left, right, condition, brillig_binary_op); - self.brillig_context.constrain_instruction(condition, assert_message.clone()); - self.brillig_context.deallocate_register(condition); - self.brillig_context.deallocate_register(right); + // Create a field constant with the max + let max = BigUint::from(2_u128).pow(*max_bit_size) - BigUint::from(1_u128); + let right = self.brillig_context.make_constant( + FieldElement::from_be_bytes_reduce(&max.to_bytes_be()).into(), + FieldElement::max_num_bits(), + ); + + // Check if lte max + let brillig_binary_op = BrilligBinaryOp::Integer { + op: BinaryIntOp::LessThanEquals, + bit_size: FieldElement::max_num_bits(), + }; + let condition = self.brillig_context.allocate_register(); + self.brillig_context.binary_instruction( + left.address, + right, + condition, + brillig_binary_op, + ); + + self.brillig_context.constrain_instruction(condition, assert_message.clone()); + self.brillig_context.deallocate_register(condition); + self.brillig_context.deallocate_register(left.address); + self.brillig_context.deallocate_register(right); + } } Instruction::IncrementRc { value } => { let rc_register = match self.convert_ssa_value(*value, dfg) { @@ -628,7 +676,11 @@ impl<'block> BrilligBlock<'block> { .expect("Last uses for instruction should have been computed"); for dead_variable in dead_variables { - self.variables.remove_variable(dead_variable); + self.variables.remove_variable( + dead_variable, + self.function_context, + self.brillig_context, + ); } self.brillig_context.set_call_stack(CallStack::new()); } @@ -638,16 +690,14 @@ impl<'block> BrilligBlock<'block> { func_id: FunctionId, arguments: &[ValueId], dfg: &DataFlowGraph, - instruction_id: InstructionId, + result_ids: &[ValueId], ) { // Convert the arguments to registers casting those to the types of the receiving function - let argument_registers: Vec = arguments + let argument_registers: Vec = arguments .iter() .flat_map(|argument_id| self.convert_ssa_value(*argument_id, dfg).extract_registers()) .collect(); - let result_ids = dfg.instruction_results(instruction_id); - // Create label for the function that will be called let label_of_function_to_call = FunctionContext::function_id_to_function_label(func_id); @@ -677,7 +727,7 @@ impl<'block> BrilligBlock<'block> { }); // Collect the registers that should have been returned - let returned_registers: Vec = variables_assigned_to + let returned_registers: Vec = variables_assigned_to .iter() .flat_map(|returned_variable| returned_variable.extract_registers()) .collect(); @@ -695,11 +745,11 @@ impl<'block> BrilligBlock<'block> { fn validate_array_index( &mut self, array_variable: BrilligVariable, - index_register: RegisterIndex, + index_register: SingleAddrVariable, ) { let (size_as_register, should_deallocate_size) = match array_variable { BrilligVariable::BrilligArray(BrilligArray { size, .. }) => { - (self.brillig_context.make_constant(size.into()), true) + (self.brillig_context.make_usize_constant(size.into()), true) } BrilligVariable::BrilligVector(BrilligVector { size, .. }) => (size, false), _ => unreachable!("ICE: validate array index on non-array"), @@ -708,7 +758,7 @@ impl<'block> BrilligBlock<'block> { let condition = self.brillig_context.allocate_register(); self.brillig_context.memory_op( - index_register, + index_register.address, size_as_register, condition, BinaryIntOp::LessThan, @@ -725,13 +775,17 @@ impl<'block> BrilligBlock<'block> { pub(crate) fn retrieve_variable_from_array( &mut self, - array_pointer: RegisterIndex, - index_register: RegisterIndex, + array_pointer: MemoryAddress, + index_register: MemoryAddress, destination_variable: BrilligVariable, ) { match destination_variable { - BrilligVariable::Simple(destination_register) => { - self.brillig_context.array_get(array_pointer, index_register, destination_register); + BrilligVariable::SingleAddr(destination_register) => { + self.brillig_context.array_get( + array_pointer, + index_register, + destination_register.address, + ); } BrilligVariable::BrilligArray(..) | BrilligVariable::BrilligVector(..) => { let reference = self.brillig_context.allocate_register(); @@ -748,7 +802,7 @@ impl<'block> BrilligBlock<'block> { &mut self, source_variable: BrilligVariable, destination_variable: BrilligVariable, - index_register: RegisterIndex, + index_register: MemoryAddress, value_variable: BrilligVariable, ) { let destination_pointer = match destination_variable { @@ -766,7 +820,7 @@ impl<'block> BrilligBlock<'block> { let (source_pointer, source_size_as_register) = match source_variable { BrilligVariable::BrilligArray(BrilligArray { size, pointer, rc: _ }) => { let source_size_register = self.brillig_context.allocate_register(); - self.brillig_context.const_instruction(source_size_register, size.into()); + self.brillig_context.usize_const(source_size_register, size.into()); (pointer, source_size_register) } BrilligVariable::BrilligVector(BrilligVector { size, pointer, rc: _ }) => { @@ -777,7 +831,7 @@ impl<'block> BrilligBlock<'block> { _ => unreachable!("ICE: array set on non-array"), }; - let one = self.brillig_context.make_constant(1_usize.into()); + let one = self.brillig_context.make_usize_constant(1_usize.into()); let condition = self.brillig_context.allocate_register(); self.brillig_context.binary_instruction( @@ -805,7 +859,7 @@ impl<'block> BrilligBlock<'block> { match destination_variable { BrilligVariable::BrilligArray(BrilligArray { rc: target_rc, .. }) => { - self.brillig_context.const_instruction(target_rc, 1_usize.into()); + self.brillig_context.usize_const(target_rc, 1_usize.into()); } BrilligVariable::BrilligVector(BrilligVector { size: target_size, @@ -813,7 +867,7 @@ impl<'block> BrilligBlock<'block> { .. }) => { self.brillig_context.mov_instruction(target_size, source_size_as_register); - self.brillig_context.const_instruction(target_rc, 1_usize.into()); + self.brillig_context.usize_const(target_rc, 1_usize.into()); } _ => unreachable!("ICE: array set on non-array"), } @@ -828,16 +882,16 @@ impl<'block> BrilligBlock<'block> { pub(crate) fn store_variable_in_array_with_ctx( ctx: &mut BrilligContext, - destination_pointer: RegisterIndex, - index_register: RegisterIndex, + destination_pointer: MemoryAddress, + index_register: MemoryAddress, value_variable: BrilligVariable, ) { match value_variable { - BrilligVariable::Simple(value_register) => { - ctx.array_set(destination_pointer, index_register, value_register); + BrilligVariable::SingleAddr(value_variable) => { + ctx.array_set(destination_pointer, index_register, value_variable.address); } BrilligVariable::BrilligArray(_) => { - let reference: RegisterIndex = ctx.allocate_register(); + let reference: MemoryAddress = ctx.allocate_register(); ctx.allocate_array_reference_instruction(reference); ctx.store_variable_instruction(reference, value_variable); ctx.array_set(destination_pointer, index_register, reference); @@ -855,8 +909,8 @@ impl<'block> BrilligBlock<'block> { pub(crate) fn store_variable_in_array( &mut self, - destination_pointer: RegisterIndex, - index_register: RegisterIndex, + destination_pointer: MemoryAddress, + index_register: MemoryAddress, value_variable: BrilligVariable, ) { Self::store_variable_in_array_with_ctx( @@ -889,7 +943,7 @@ impl<'block> BrilligBlock<'block> { results[0], dfg, ) { - BrilligVariable::Simple(register_index) => register_index, + BrilligVariable::SingleAddr(register_index) => register_index, _ => unreachable!("ICE: first value of a slice must be a register index"), }; @@ -905,7 +959,7 @@ impl<'block> BrilligBlock<'block> { self.convert_ssa_value(*arg, dfg) }); - self.update_slice_length(target_len, arguments[0], dfg, BinaryIntOp::Add); + self.update_slice_length(target_len.address, arguments[0], dfg, BinaryIntOp::Add); self.slice_push_back_operation(target_vector, source_vector, &item_values); } @@ -916,7 +970,7 @@ impl<'block> BrilligBlock<'block> { results[0], dfg, ) { - BrilligVariable::Simple(register_index) => register_index, + BrilligVariable::SingleAddr(register_index) => register_index, _ => unreachable!("ICE: first value of a slice must be a register index"), }; @@ -931,7 +985,7 @@ impl<'block> BrilligBlock<'block> { self.convert_ssa_value(*arg, dfg) }); - self.update_slice_length(target_len, arguments[0], dfg, BinaryIntOp::Add); + self.update_slice_length(target_len.address, arguments[0], dfg, BinaryIntOp::Add); self.slice_push_front_operation(target_vector, source_vector, &item_values); } @@ -942,7 +996,7 @@ impl<'block> BrilligBlock<'block> { results[0], dfg, ) { - BrilligVariable::Simple(register_index) => register_index, + BrilligVariable::SingleAddr(register_index) => register_index, _ => unreachable!("ICE: first value of a slice must be a register index"), }; @@ -964,7 +1018,7 @@ impl<'block> BrilligBlock<'block> { ) }); - self.update_slice_length(target_len, arguments[0], dfg, BinaryIntOp::Sub); + self.update_slice_length(target_len.address, arguments[0], dfg, BinaryIntOp::Sub); self.slice_pop_back_operation(target_vector, source_vector, &pop_variables); } @@ -975,7 +1029,7 @@ impl<'block> BrilligBlock<'block> { results[element_size], dfg, ) { - BrilligVariable::Simple(register_index) => register_index, + BrilligVariable::SingleAddr(register_index) => register_index, _ => unreachable!("ICE: first value of a slice must be a register index"), }; @@ -996,7 +1050,7 @@ impl<'block> BrilligBlock<'block> { ); let target_vector = target_variable.extract_vector(); - self.update_slice_length(target_len, arguments[0], dfg, BinaryIntOp::Sub); + self.update_slice_length(target_len.address, arguments[0], dfg, BinaryIntOp::Sub); self.slice_pop_front_operation(target_vector, source_vector, &pop_variables); } @@ -1007,7 +1061,7 @@ impl<'block> BrilligBlock<'block> { results[0], dfg, ) { - BrilligVariable::Simple(register_index) => register_index, + BrilligVariable::SingleAddr(register_index) => register_index, _ => unreachable!("ICE: first value of a slice must be a register index"), }; @@ -1023,13 +1077,13 @@ impl<'block> BrilligBlock<'block> { // Remove if indexing in insert is changed to flattened indexing // https://github.com/noir-lang/noir/issues/1889#issuecomment-1668048587 - let user_index = self.convert_ssa_register_value(arguments[2], dfg); + let user_index = self.convert_ssa_single_addr_value(arguments[2], dfg); - let converted_index = self.brillig_context.make_constant(element_size.into()); + let converted_index = self.brillig_context.make_usize_constant(element_size.into()); self.brillig_context.memory_op( converted_index, - user_index, + user_index.address, converted_index, BinaryIntOp::Mul, ); @@ -1038,7 +1092,7 @@ impl<'block> BrilligBlock<'block> { self.convert_ssa_value(*arg, dfg) }); - self.update_slice_length(target_len, arguments[0], dfg, BinaryIntOp::Add); + self.update_slice_length(target_len.address, arguments[0], dfg, BinaryIntOp::Add); self.slice_insert_operation(target_vector, source_vector, converted_index, &items); self.brillig_context.deallocate_register(converted_index); @@ -1050,7 +1104,7 @@ impl<'block> BrilligBlock<'block> { results[0], dfg, ) { - BrilligVariable::Simple(register_index) => register_index, + BrilligVariable::SingleAddr(register_index) => register_index, _ => unreachable!("ICE: first value of a slice must be a register index"), }; @@ -1066,12 +1120,12 @@ impl<'block> BrilligBlock<'block> { // Remove if indexing in remove is changed to flattened indexing // https://github.com/noir-lang/noir/issues/1889#issuecomment-1668048587 - let user_index = self.convert_ssa_register_value(arguments[2], dfg); + let user_index = self.convert_ssa_single_addr_value(arguments[2], dfg); - let converted_index = self.brillig_context.make_constant(element_size.into()); + let converted_index = self.brillig_context.make_usize_constant(element_size.into()); self.brillig_context.memory_op( converted_index, - user_index, + user_index.address, converted_index, BinaryIntOp::Mul, ); @@ -1085,7 +1139,7 @@ impl<'block> BrilligBlock<'block> { ) }); - self.update_slice_length(target_len, arguments[0], dfg, BinaryIntOp::Sub); + self.update_slice_length(target_len.address, arguments[0], dfg, BinaryIntOp::Sub); self.slice_remove_operation( target_vector, @@ -1111,24 +1165,24 @@ impl<'block> BrilligBlock<'block> { /// of fields in the vector. fn update_slice_length( &mut self, - target_len: RegisterIndex, + target_len: MemoryAddress, source_value: ValueId, dfg: &DataFlowGraph, binary_op: BinaryIntOp, ) { let source_len_variable = self.convert_ssa_value(source_value, dfg); - let source_len = source_len_variable.extract_register(); + let source_len = source_len_variable.extract_single_addr(); - self.brillig_context.usize_op(source_len, target_len, binary_op, 1); + self.brillig_context.usize_op(source_len.address, target_len, binary_op, 1); } /// Converts an SSA cast to a sequence of Brillig opcodes. /// Casting is only necessary when shrinking the bit size of a numeric value. - fn convert_cast(&mut self, destination: RegisterIndex, source: RegisterIndex) { + fn convert_cast(&mut self, destination: SingleAddrVariable, source: SingleAddrVariable) { // We assume that `source` is a valid `target_type` as it's expected that a truncate instruction was emitted // to ensure this is the case. - self.brillig_context.mov_instruction(destination, source); + self.brillig_context.cast_instruction(destination, source); } /// Converts the Binary instruction into a sequence of Brillig opcodes. @@ -1136,18 +1190,110 @@ impl<'block> BrilligBlock<'block> { &mut self, binary: &Binary, dfg: &DataFlowGraph, - result_register: RegisterIndex, + result_variable: SingleAddrVariable, ) { let binary_type = type_of_binary_operation(dfg[binary.lhs].get_type(), dfg[binary.rhs].get_type()); - let left = self.convert_ssa_register_value(binary.lhs, dfg); - let right = self.convert_ssa_register_value(binary.rhs, dfg); + let left = self.convert_ssa_single_addr_value(binary.lhs, dfg); + let right = self.convert_ssa_single_addr_value(binary.rhs, dfg); - let brillig_binary_op = + let (brillig_binary_op, is_signed) = convert_ssa_binary_op_to_brillig_binary_op(binary.operator, &binary_type); - self.brillig_context.binary_instruction(left, right, result_register, brillig_binary_op); + self.brillig_context.binary_instruction( + left.address, + right.address, + result_variable.address, + brillig_binary_op, + ); + + self.add_overflow_check(brillig_binary_op, left, right, result_variable, is_signed); + } + + fn add_overflow_check( + &mut self, + binary_operation: BrilligBinaryOp, + left: SingleAddrVariable, + right: SingleAddrVariable, + result: SingleAddrVariable, + is_signed: bool, + ) { + let (op, bit_size) = if let BrilligBinaryOp::Integer { op, bit_size } = binary_operation { + (op, bit_size) + } else { + return; + }; + + match (op, is_signed) { + (BinaryIntOp::Add, false) => { + let condition = self.brillig_context.allocate_register(); + // Check that lhs <= result + self.brillig_context.binary_instruction( + left.address, + result.address, + condition, + BrilligBinaryOp::Integer { op: BinaryIntOp::LessThanEquals, bit_size }, + ); + self.brillig_context.constrain_instruction( + condition, + Some("attempt to add with overflow".to_string()), + ); + self.brillig_context.deallocate_register(condition); + } + (BinaryIntOp::Sub, false) => { + let condition = self.brillig_context.allocate_register(); + // Check that rhs <= lhs + self.brillig_context.binary_instruction( + right.address, + left.address, + condition, + BrilligBinaryOp::Integer { op: BinaryIntOp::LessThanEquals, bit_size }, + ); + self.brillig_context.constrain_instruction( + condition, + Some("attempt to subtract with overflow".to_string()), + ); + self.brillig_context.deallocate_register(condition); + } + (BinaryIntOp::Mul, false) => { + // Multiplication overflow is only possible for bit sizes > 1 + if bit_size > 1 { + let is_right_zero = self.brillig_context.allocate_register(); + let zero = self.brillig_context.make_constant(0_usize.into(), bit_size); + self.brillig_context.binary_instruction( + zero, + right.address, + is_right_zero, + BrilligBinaryOp::Integer { op: BinaryIntOp::Equals, bit_size }, + ); + self.brillig_context.if_not_instruction(is_right_zero, |ctx| { + let condition = ctx.allocate_register(); + // Check that result / rhs == lhs + ctx.binary_instruction( + result.address, + right.address, + condition, + BrilligBinaryOp::Integer { op: BinaryIntOp::UnsignedDiv, bit_size }, + ); + ctx.binary_instruction( + condition, + left.address, + condition, + BrilligBinaryOp::Integer { op: BinaryIntOp::Equals, bit_size }, + ); + ctx.constrain_instruction( + condition, + Some("attempt to multiply with overflow".to_string()), + ); + ctx.deallocate_register(condition); + }); + self.brillig_context.deallocate_register(is_right_zero); + self.brillig_context.deallocate_register(zero); + } + } + _ => {} + } } /// Converts an SSA `ValueId` into a `RegisterOrMemory`. Initializes if necessary. @@ -1161,7 +1307,7 @@ impl<'block> BrilligBlock<'block> { // converted to registers so we fetch from the cache. self.variables.get_allocation(self.function_context, value_id, dfg) } - Value::NumericConstant { constant, .. } => { + Value::NumericConstant { constant, typ } => { // Constants might have been converted previously or not, so we get or create and // (re)initialize the value inside. if let Some(variable) = self.variables.get_constant(value_id, dfg) { @@ -1169,9 +1315,13 @@ impl<'block> BrilligBlock<'block> { } else { let new_variable = self.variables.allocate_constant(self.brillig_context, value_id, dfg); - let register_index = new_variable.extract_register(); + let register_index = new_variable.extract_single_addr(); - self.brillig_context.const_instruction(register_index, (*constant).into()); + self.brillig_context.const_instruction( + register_index.address, + (*constant).into(), + get_bit_size_from_ssa_type(typ), + ); new_variable } } @@ -1187,16 +1337,15 @@ impl<'block> BrilligBlock<'block> { BrilligVariable::BrilligArray(brillig_array) => { self.brillig_context .allocate_fixed_length_array(brillig_array.pointer, array.len()); - self.brillig_context - .const_instruction(brillig_array.rc, 1_usize.into()); + self.brillig_context.usize_const(brillig_array.rc, 1_usize.into()); brillig_array.pointer } BrilligVariable::BrilligVector(vector) => { - self.brillig_context.const_instruction(vector.size, array.len().into()); + self.brillig_context.usize_const(vector.size, array.len().into()); self.brillig_context .allocate_array_instruction(vector.pointer, vector.size); - self.brillig_context.const_instruction(vector.rc, 1_usize.into()); + self.brillig_context.usize_const(vector.rc, 1_usize.into()); vector.pointer } @@ -1208,7 +1357,8 @@ impl<'block> BrilligBlock<'block> { // Write the items // Allocate a register for the iterator - let iterator_register = self.brillig_context.make_constant(0_usize.into()); + let iterator_register = + self.brillig_context.make_usize_constant(0_usize.into()); for element_id in array.iter() { let element_variable = self.convert_ssa_value(*element_id, dfg); @@ -1227,20 +1377,36 @@ impl<'block> BrilligBlock<'block> { new_variable } } - Value::Function(_) | Value::Intrinsic(_) | Value::ForeignFunction(_) => { + Value::Function(_) => { + // For the debugger instrumentation we want to allow passing + // around values representing function pointers, even though + // there is no interaction with the function possible given that + // value. + let new_variable = + self.variables.allocate_constant(self.brillig_context, value_id, dfg); + let register_index = new_variable.extract_single_addr(); + + self.brillig_context.const_instruction( + register_index.address, + value_id.to_usize().into(), + 32, + ); + new_variable + } + Value::Intrinsic(_) | Value::ForeignFunction(_) => { todo!("ICE: Cannot convert value {value:?}") } } } - /// Converts an SSA `ValueId` into a `RegisterIndex`. Initializes if necessary. - fn convert_ssa_register_value( + /// Converts an SSA `ValueId` into a `MemoryAddress`. Initializes if necessary. + fn convert_ssa_single_addr_value( &mut self, value_id: ValueId, dfg: &DataFlowGraph, - ) -> RegisterIndex { + ) -> SingleAddrVariable { let variable = self.convert_ssa_value(value_id, dfg); - variable.extract_register() + variable.extract_single_addr() } fn allocate_external_call_result( @@ -1266,7 +1432,7 @@ impl<'block> BrilligBlock<'block> { ); let array = variable.extract_array(); self.brillig_context.allocate_fixed_length_array(array.pointer, array.size); - self.brillig_context.const_instruction(array.rc, 1_usize.into()); + self.brillig_context.usize_const(array.rc, 1_usize.into()); variable } @@ -1283,7 +1449,7 @@ impl<'block> BrilligBlock<'block> { // The stack pointer will then be updated by the caller of this method // once the external call is resolved and the array size is known self.brillig_context.set_array_pointer(vector.pointer); - self.brillig_context.const_instruction(vector.rc, 1_usize.into()); + self.brillig_context.usize_const(vector.rc, 1_usize.into()); variable } @@ -1299,7 +1465,7 @@ impl<'block> BrilligBlock<'block> { fn convert_ssa_array_len( &mut self, array_id: ValueId, - result_register: RegisterIndex, + result_register: MemoryAddress, dfg: &DataFlowGraph, ) { let array_variable = self.convert_ssa_value(array_id, dfg); @@ -1307,8 +1473,7 @@ impl<'block> BrilligBlock<'block> { match array_variable { BrilligVariable::BrilligArray(BrilligArray { size, .. }) => { - self.brillig_context - .const_instruction(result_register, (size / element_size).into()); + self.brillig_context.usize_const(result_register, (size / element_size).into()); } BrilligVariable::BrilligVector(BrilligVector { size, .. }) => { self.brillig_context.usize_op( @@ -1326,8 +1491,6 @@ impl<'block> BrilligBlock<'block> { } /// Returns the type of the operation considering the types of the operands -/// TODO: SSA issues binary operations between fields and integers. -/// This probably should be explicitly casted in SSA to avoid having to coerce at this level. pub(crate) fn type_of_binary_operation(lhs_type: &Type, rhs_type: &Type) -> Type { match (lhs_type, rhs_type) { (_, Type::Function) | (Type::Function, _) => { @@ -1342,10 +1505,6 @@ pub(crate) fn type_of_binary_operation(lhs_type: &Type, rhs_type: &Type) -> Type (_, Type::Slice(..)) | (Type::Slice(..), _) => { unreachable!("Arrays are invalid in binary operations") } - // If either side is a Field constant then, we coerce into the type - // of the other operand - (Type::Numeric(NumericType::NativeField), typ) - | (typ, Type::Numeric(NumericType::NativeField)) => typ.clone(), // If both sides are numeric type, then we expect their types to be // the same. (Type::Numeric(lhs_type), Type::Numeric(rhs_type)) => { @@ -1364,7 +1523,7 @@ pub(crate) fn type_of_binary_operation(lhs_type: &Type, rhs_type: &Type) -> Type pub(crate) fn convert_ssa_binary_op_to_brillig_binary_op( ssa_op: BinaryOp, typ: &Type, -) -> BrilligBinaryOp { +) -> (BrilligBinaryOp, bool) { // First get the bit size and whether its a signed integer, if it is a numeric type // if it is not,then we return None, indicating that // it is a Field. @@ -1384,10 +1543,6 @@ pub(crate) fn convert_ssa_binary_op_to_brillig_binary_op( BinaryOp::Mul => BrilligBinaryOp::Field { op: BinaryFieldOp::Mul }, BinaryOp::Div => BrilligBinaryOp::Field { op: BinaryFieldOp::Div }, BinaryOp::Eq => BrilligBinaryOp::Field { op: BinaryFieldOp::Equals }, - BinaryOp::Lt => BrilligBinaryOp::Integer { - op: BinaryIntOp::LessThan, - bit_size: BRILLIG_INTEGER_ARITHMETIC_BIT_SIZE, - }, _ => unreachable!( "Field type cannot be used with {op}. This should have been caught by the frontend" ), @@ -1414,6 +1569,8 @@ pub(crate) fn convert_ssa_binary_op_to_brillig_binary_op( BinaryOp::And => BinaryIntOp::And, BinaryOp::Or => BinaryIntOp::Or, BinaryOp::Xor => BinaryIntOp::Xor, + BinaryOp::Shl => BinaryIntOp::Shl, + BinaryOp::Shr => BinaryIntOp::Shr, }; BrilligBinaryOp::Integer { op: operation, bit_size } @@ -1421,7 +1578,9 @@ pub(crate) fn convert_ssa_binary_op_to_brillig_binary_op( // If bit size is available then it is a binary integer operation match bit_size_signedness { - Some((bit_size, is_signed)) => binary_op_to_int_op(ssa_op, *bit_size, is_signed), - None => binary_op_to_field_op(ssa_op), + Some((bit_size, is_signed)) => { + (binary_op_to_int_op(ssa_op, *bit_size, is_signed), is_signed) + } + None => (binary_op_to_field_op(ssa_op), false), } } diff --git a/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_block_variables.rs b/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_block_variables.rs index f2e698c0aa9..f463bd4de4d 100644 --- a/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_block_variables.rs +++ b/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_block_variables.rs @@ -1,10 +1,9 @@ -use acvm::brillig_vm::brillig::RegisterIndex; use fxhash::{FxHashMap as HashMap, FxHashSet as HashSet}; use crate::{ brillig::brillig_ir::{ - brillig_variable::{BrilligArray, BrilligVariable, BrilligVector}, - BrilligContext, + brillig_variable::{BrilligArray, BrilligVariable, BrilligVector, SingleAddrVariable}, + BrilligContext, BRILLIG_MEMORY_ADDRESSING_BIT_SIZE, }, ssa::ir::{ basic_block::BasicBlockId, @@ -19,6 +18,7 @@ use super::brillig_fn::FunctionContext; #[derive(Debug, Default)] pub(crate) struct BlockVariables { available_variables: HashSet, + block_parameters: HashSet, available_constants: HashMap, } @@ -26,7 +26,8 @@ impl BlockVariables { /// Creates a BlockVariables instance. It uses the variables that are live in to the block and the global available variables (block parameters) pub(crate) fn new(live_in: HashSet, all_block_parameters: HashSet) -> Self { BlockVariables { - available_variables: live_in.into_iter().chain(all_block_parameters).collect(), + available_variables: live_in.into_iter().chain(all_block_parameters.clone()).collect(), + block_parameters: all_block_parameters, ..Default::default() } } @@ -69,20 +70,35 @@ impl BlockVariables { } /// Defines a variable that fits in a single register and returns the allocated register. - pub(crate) fn define_register_variable( + pub(crate) fn define_single_addr_variable( &mut self, function_context: &mut FunctionContext, brillig_context: &mut BrilligContext, value: ValueId, dfg: &DataFlowGraph, - ) -> RegisterIndex { + ) -> SingleAddrVariable { let variable = self.define_variable(function_context, brillig_context, value, dfg); - variable.extract_register() + variable.extract_single_addr() } /// Removes a variable so it's not used anymore within this block. - pub(crate) fn remove_variable(&mut self, value_id: &ValueId) { - self.available_variables.remove(value_id); + pub(crate) fn remove_variable( + &mut self, + value_id: &ValueId, + function_context: &mut FunctionContext, + brillig_context: &mut BrilligContext, + ) { + assert!(self.available_variables.remove(value_id), "ICE: Variable is not available"); + // Block parameters should not be deallocated + if !self.block_parameters.contains(value_id) { + let variable = function_context + .ssa_value_allocations + .get(value_id) + .expect("ICE: Variable allocation not found"); + variable.extract_registers().iter().for_each(|register| { + brillig_context.deallocate_register(*register); + }); + } } /// For a given SSA value id, return the corresponding cached allocation. @@ -173,9 +189,22 @@ pub(crate) fn allocate_value( let typ = dfg.type_of_value(value_id); match typ { - Type::Numeric(_) | Type::Reference(_) => { - let register = brillig_context.allocate_register(); - BrilligVariable::Simple(register) + Type::Numeric(numeric_type) => BrilligVariable::SingleAddr(SingleAddrVariable { + address: brillig_context.allocate_register(), + bit_size: numeric_type.bit_size(), + }), + Type::Reference(_) => BrilligVariable::SingleAddr(SingleAddrVariable { + address: brillig_context.allocate_register(), + bit_size: BRILLIG_MEMORY_ADDRESSING_BIT_SIZE, + }), + Type::Function => { + // NB. function references are converted to a constant when + // translating from SSA to Brillig (to allow for debugger + // instrumentation to work properly) + BrilligVariable::SingleAddr(SingleAddrVariable { + address: brillig_context.allocate_register(), + bit_size: 32, + }) } Type::Array(item_typ, elem_count) => { let pointer_register = brillig_context.allocate_register(); @@ -199,8 +228,5 @@ pub(crate) fn allocate_value( rc: rc_register, }) } - Type::Function => { - unreachable!("ICE: Function values should have been removed from the SSA") - } } } diff --git a/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_directive.rs b/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_directive.rs index a07865073ff..93c4b1a5042 100644 --- a/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_directive.rs +++ b/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_directive.rs @@ -1,5 +1,6 @@ -use acvm::acir::brillig::{ - BinaryFieldOp, BinaryIntOp, Opcode as BrilligOpcode, RegisterIndex, Value, +use acvm::{ + acir::brillig::{BinaryFieldOp, BinaryIntOp, MemoryAddress, Opcode as BrilligOpcode, Value}, + FieldElement, }; use crate::brillig::brillig_ir::artifact::GeneratedBrillig; @@ -13,17 +14,22 @@ pub(crate) fn directive_invert() -> GeneratedBrillig { // The input argument, ie the value that will be inverted. // We store the result in this register too. - let input = RegisterIndex::from(0); - let one_const = RegisterIndex::from(1); + let input = MemoryAddress::from(0); + let one_const = MemoryAddress::from(1); // Location of the stop opcode let stop_location = 3; GeneratedBrillig { byte_code: vec![ + BrilligOpcode::CalldataCopy { destination_address: input, size: 1, offset: 0 }, // If the input is zero, then we jump to the stop opcode BrilligOpcode::JumpIfNot { condition: input, location: stop_location }, // Put value one in register (1) - BrilligOpcode::Const { destination: one_const, value: Value::from(1_usize) }, + BrilligOpcode::Const { + destination: one_const, + value: Value::from(1_usize), + bit_size: FieldElement::max_num_bits(), + }, // Divide 1 by the input, and set the result of the division into register (0) BrilligOpcode::BinaryFieldOp { op: BinaryFieldOp::Div, @@ -31,7 +37,7 @@ pub(crate) fn directive_invert() -> GeneratedBrillig { rhs: input, destination: input, }, - BrilligOpcode::Stop, + BrilligOpcode::Stop { return_data_offset: 0, return_data_size: 1 }, ], assert_messages: Default::default(), locations: Default::default(), @@ -52,36 +58,41 @@ pub(crate) fn directive_quotient(bit_size: u32) -> GeneratedBrillig { // `b` is (1) GeneratedBrillig { byte_code: vec![ + BrilligOpcode::CalldataCopy { + destination_address: MemoryAddress::from(0), + size: 2, + offset: 0, + }, //q = a/b is set into register (2) BrilligOpcode::BinaryIntOp { op: BinaryIntOp::UnsignedDiv, - lhs: RegisterIndex::from(0), - rhs: RegisterIndex::from(1), - destination: RegisterIndex::from(2), + lhs: MemoryAddress::from(0), + rhs: MemoryAddress::from(1), + destination: MemoryAddress::from(2), bit_size, }, //(1)= q*b BrilligOpcode::BinaryIntOp { op: BinaryIntOp::Mul, - lhs: RegisterIndex::from(2), - rhs: RegisterIndex::from(1), - destination: RegisterIndex::from(1), + lhs: MemoryAddress::from(2), + rhs: MemoryAddress::from(1), + destination: MemoryAddress::from(1), bit_size, }, //(1) = a-q*b BrilligOpcode::BinaryIntOp { op: BinaryIntOp::Sub, - lhs: RegisterIndex::from(0), - rhs: RegisterIndex::from(1), - destination: RegisterIndex::from(1), + lhs: MemoryAddress::from(0), + rhs: MemoryAddress::from(1), + destination: MemoryAddress::from(1), bit_size, }, //(0) = q BrilligOpcode::Mov { - destination: RegisterIndex::from(0), - source: RegisterIndex::from(2), + destination: MemoryAddress::from(0), + source: MemoryAddress::from(2), }, - BrilligOpcode::Stop, + BrilligOpcode::Stop { return_data_offset: 0, return_data_size: 2 }, ], assert_messages: Default::default(), locations: Default::default(), diff --git a/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_fn.rs b/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_fn.rs index 026def4ef11..b5da8296ba5 100644 --- a/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_fn.rs +++ b/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_fn.rs @@ -1,16 +1,17 @@ +use acvm::FieldElement; use iter_extended::vecmap; use crate::{ brillig::brillig_ir::{ artifact::{BrilligParameter, Label}, brillig_variable::BrilligVariable, - BrilligContext, + BrilligContext, BRILLIG_MEMORY_ADDRESSING_BIT_SIZE, }, ssa::ir::{ basic_block::BasicBlockId, function::{Function, FunctionId}, post_order::PostOrder, - types::Type, + types::{NumericType, Type}, value::ValueId, }, }; @@ -72,7 +73,9 @@ impl FunctionContext { fn ssa_type_to_parameter(typ: &Type) -> BrilligParameter { match typ { - Type::Numeric(_) | Type::Reference(_) => BrilligParameter::Simple, + Type::Numeric(_) | Type::Reference(_) => { + BrilligParameter::SingleAddr(get_bit_size_from_ssa_type(typ)) + } Type::Array(item_type, size) => BrilligParameter::Array( vecmap(item_type.iter(), |item_typ| { FunctionContext::ssa_type_to_parameter(item_typ) @@ -110,3 +113,14 @@ impl FunctionContext { .collect() } } + +pub(crate) fn get_bit_size_from_ssa_type(typ: &Type) -> u32 { + match typ { + Type::Numeric(num_type) => match num_type { + NumericType::Signed { bit_size } | NumericType::Unsigned { bit_size } => *bit_size, + NumericType::NativeField => FieldElement::max_num_bits(), + }, + Type::Reference(_) => BRILLIG_MEMORY_ADDRESSING_BIT_SIZE, + _ => unreachable!("ICE bitwise not on a non numeric type"), + } +} diff --git a/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_slice_ops.rs b/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_slice_ops.rs index 6402e6f9d97..3fc0e981165 100644 --- a/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_slice_ops.rs +++ b/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_slice_ops.rs @@ -1,4 +1,4 @@ -use acvm::brillig_vm::brillig::{BinaryIntOp, RegisterIndex}; +use acvm::brillig_vm::brillig::{BinaryIntOp, MemoryAddress}; use crate::brillig::brillig_ir::brillig_variable::{BrilligVariable, BrilligVector}; @@ -20,7 +20,7 @@ impl<'block> BrilligBlock<'block> { ); self.brillig_context.allocate_array_instruction(target_vector.pointer, target_vector.size); // We initialize the RC of the target vector to 1 - self.brillig_context.const_instruction(target_vector.rc, 1_usize.into()); + self.brillig_context.usize_const(target_vector.rc, 1_usize.into()); // Now we copy the source vector into the target vector self.brillig_context.copy_array_instruction( @@ -30,7 +30,7 @@ impl<'block> BrilligBlock<'block> { ); for (index, variable) in variables_to_insert.iter().enumerate() { - let target_index = self.brillig_context.make_constant(index.into()); + let target_index = self.brillig_context.make_usize_constant(index.into()); self.brillig_context.memory_op( target_index, source_vector.size, @@ -57,7 +57,7 @@ impl<'block> BrilligBlock<'block> { ); self.brillig_context.allocate_array_instruction(target_vector.pointer, target_vector.size); // We initialize the RC of the target vector to 1 - self.brillig_context.const_instruction(target_vector.rc, 1_usize.into()); + self.brillig_context.usize_const(target_vector.rc, 1_usize.into()); // Now we offset the target pointer by variables_to_insert.len() let destination_copy_pointer = self.brillig_context.allocate_register(); @@ -77,7 +77,7 @@ impl<'block> BrilligBlock<'block> { // Then we write the items to insert at the start for (index, variable) in variables_to_insert.iter().enumerate() { - let target_index = self.brillig_context.make_constant(index.into()); + let target_index = self.brillig_context.make_usize_constant(index.into()); self.store_variable_in_array(target_vector.pointer, target_index, *variable); self.brillig_context.deallocate_register(target_index); } @@ -100,7 +100,7 @@ impl<'block> BrilligBlock<'block> { ); self.brillig_context.allocate_array_instruction(target_vector.pointer, target_vector.size); // We initialize the RC of the target vector to 1 - self.brillig_context.const_instruction(target_vector.rc, 1_usize.into()); + self.brillig_context.usize_const(target_vector.rc, 1_usize.into()); // Now we offset the source pointer by removed_items.len() let source_copy_pointer = self.brillig_context.allocate_register(); @@ -119,7 +119,7 @@ impl<'block> BrilligBlock<'block> { ); for (index, variable) in removed_items.iter().enumerate() { - let target_index = self.brillig_context.make_constant(index.into()); + let target_index = self.brillig_context.make_usize_constant(index.into()); self.retrieve_variable_from_array(source_vector.pointer, target_index, *variable); self.brillig_context.deallocate_register(target_index); } @@ -142,7 +142,7 @@ impl<'block> BrilligBlock<'block> { ); self.brillig_context.allocate_array_instruction(target_vector.pointer, target_vector.size); // We initialize the RC of the target vector to 1 - self.brillig_context.const_instruction(target_vector.rc, 1_usize.into()); + self.brillig_context.usize_const(target_vector.rc, 1_usize.into()); // Now we copy all elements except the last items into the target vector self.brillig_context.copy_array_instruction( @@ -152,7 +152,7 @@ impl<'block> BrilligBlock<'block> { ); for (index, variable) in removed_items.iter().enumerate() { - let target_index = self.brillig_context.make_constant(index.into()); + let target_index = self.brillig_context.make_usize_constant(index.into()); self.brillig_context.memory_op( target_index, target_vector.size, @@ -168,7 +168,7 @@ impl<'block> BrilligBlock<'block> { &mut self, target_vector: BrilligVector, source_vector: BrilligVector, - index: RegisterIndex, + index: MemoryAddress, items: &[BrilligVariable], ) { // First we need to allocate the target vector incrementing the size by items.len() @@ -180,7 +180,7 @@ impl<'block> BrilligBlock<'block> { ); self.brillig_context.allocate_array_instruction(target_vector.pointer, target_vector.size); // We initialize the RC of the target vector to 1 - self.brillig_context.const_instruction(target_vector.rc, 1_usize.into()); + self.brillig_context.usize_const(target_vector.rc, 1_usize.into()); // Copy the elements to the left of the index self.brillig_context.copy_array_instruction( @@ -225,7 +225,7 @@ impl<'block> BrilligBlock<'block> { // Write the items to insert starting at the index for (subitem_index, variable) in items.iter().enumerate() { - let target_index = self.brillig_context.make_constant(subitem_index.into()); + let target_index = self.brillig_context.make_usize_constant(subitem_index.into()); self.brillig_context.memory_op(target_index, index, target_index, BinaryIntOp::Add); self.store_variable_in_array(target_vector.pointer, target_index, *variable); self.brillig_context.deallocate_register(target_index); @@ -240,7 +240,7 @@ impl<'block> BrilligBlock<'block> { &mut self, target_vector: BrilligVector, source_vector: BrilligVector, - index: RegisterIndex, + index: MemoryAddress, removed_items: &[BrilligVariable], ) { // First we need to allocate the target vector decrementing the size by removed_items.len() @@ -252,7 +252,7 @@ impl<'block> BrilligBlock<'block> { ); self.brillig_context.allocate_array_instruction(target_vector.pointer, target_vector.size); // We initialize the RC of the target vector to 1 - self.brillig_context.const_instruction(target_vector.rc, 1_usize.into()); + self.brillig_context.usize_const(target_vector.rc, 1_usize.into()); // Copy the elements to the left of the index self.brillig_context.copy_array_instruction( @@ -298,7 +298,7 @@ impl<'block> BrilligBlock<'block> { // Get the removed items for (subitem_index, variable) in removed_items.iter().enumerate() { - let target_index = self.brillig_context.make_constant(subitem_index.into()); + let target_index = self.brillig_context.make_usize_constant(subitem_index.into()); self.brillig_context.memory_op(target_index, index, target_index, BinaryIntOp::Add); self.retrieve_variable_from_array(source_vector.pointer, target_index, *variable); self.brillig_context.deallocate_register(target_index); @@ -328,19 +328,18 @@ mod tests { use std::vec; use acvm::acir::brillig::Value; - use acvm::brillig_vm::brillig::RegisterIndex; use crate::brillig::brillig_gen::brillig_block::BrilligBlock; use crate::brillig::brillig_gen::brillig_block_variables::BlockVariables; use crate::brillig::brillig_gen::brillig_fn::FunctionContext; use crate::brillig::brillig_ir::artifact::BrilligParameter; use crate::brillig::brillig_ir::brillig_variable::{ - BrilligArray, BrilligVariable, BrilligVector, + BrilligArray, BrilligVariable, BrilligVector, SingleAddrVariable, }; use crate::brillig::brillig_ir::tests::{ create_and_run_vm, create_context, create_entry_point_bytecode, }; - use crate::brillig::brillig_ir::BrilligContext; + use crate::brillig::brillig_ir::{BrilligContext, BRILLIG_MEMORY_ADDRESSING_BIT_SIZE}; use crate::ssa::function_builder::FunctionBuilder; use crate::ssa::ir::function::RuntimeType; use crate::ssa::ir::map::Id; @@ -375,17 +374,20 @@ mod tests { fn test_case_push( push_back: bool, array: Vec, - expected_mem: Vec, item_to_push: Value, + expected_return: Vec, ) { let arguments = vec![ - BrilligParameter::Array(vec![BrilligParameter::Simple], array.len()), - BrilligParameter::Simple, - ]; - let returns = vec![ - BrilligParameter::Array(vec![BrilligParameter::Simple], array.len() + 1), - BrilligParameter::Simple, + BrilligParameter::Array( + vec![BrilligParameter::SingleAddr(BRILLIG_MEMORY_ADDRESSING_BIT_SIZE)], + array.len(), + ), + BrilligParameter::SingleAddr(BRILLIG_MEMORY_ADDRESSING_BIT_SIZE), ]; + let returns = vec![BrilligParameter::Array( + vec![BrilligParameter::SingleAddr(BRILLIG_MEMORY_ADDRESSING_BIT_SIZE)], + array.len() + 1, + )]; let (_, mut function_context, mut context) = create_test_environment(); @@ -395,7 +397,10 @@ mod tests { size: array.len(), rc: context.allocate_register(), }; - let item_to_insert = context.allocate_register(); + let item_to_insert = SingleAddrVariable { + address: context.allocate_register(), + bit_size: BRILLIG_MEMORY_ADDRESSING_BIT_SIZE, + }; // Cast the source array to a vector let source_vector = context.array_to_vector(&array_variable); @@ -413,65 +418,52 @@ mod tests { block.slice_push_back_operation( target_vector, source_vector, - &[BrilligVariable::Simple(item_to_insert)], + &[BrilligVariable::SingleAddr(item_to_insert)], ); } else { block.slice_push_front_operation( target_vector, source_vector, - &[BrilligVariable::Simple(item_to_insert)], + &[BrilligVariable::SingleAddr(item_to_insert)], ); } - context.return_instruction(&[ - target_vector.pointer, - target_vector.rc, - target_vector.size, - ]); + context.return_instruction(&[target_vector.pointer, target_vector.rc]); let bytecode = create_entry_point_bytecode(context, arguments, returns).byte_code; - let vm = create_and_run_vm( - array.clone(), - vec![Value::from(0_usize), item_to_push], - &bytecode, + let (vm, return_data_offset, return_data_size) = + create_and_run_vm(array.into_iter().chain(vec![item_to_push]).collect(), &bytecode); + assert_eq!(return_data_size, expected_return.len()); + assert_eq!( + vm.get_memory()[return_data_offset..(return_data_offset + expected_return.len())], + expected_return ); - - assert_eq!(vm.get_memory(), &expected_mem); - - assert_eq!(vm.get_registers().get(RegisterIndex(0)), Value::from(array.len())); - assert_eq!(vm.get_registers().get(RegisterIndex(1)), Value::from(array.len() + 1)); } test_case_push( true, vec![Value::from(1_usize), Value::from(2_usize), Value::from(3_usize)], + Value::from(27_usize), vec![ - Value::from(1_usize), - Value::from(2_usize), - Value::from(3_usize), Value::from(1_usize), Value::from(2_usize), Value::from(3_usize), Value::from(27_usize), ], - Value::from(27_usize), ); - test_case_push(true, vec![], vec![Value::from(27_usize)], Value::from(27_usize)); + test_case_push(true, vec![], Value::from(27_usize), vec![Value::from(27_usize)]); test_case_push( false, vec![Value::from(1_usize), Value::from(2_usize), Value::from(3_usize)], + Value::from(27_usize), vec![ - Value::from(1_usize), - Value::from(2_usize), - Value::from(3_usize), Value::from(27_usize), Value::from(1_usize), Value::from(2_usize), Value::from(3_usize), ], - Value::from(27_usize), ); - test_case_push(false, vec![], vec![Value::from(27_usize)], Value::from(27_usize)); + test_case_push(false, vec![], Value::from(27_usize), vec![Value::from(27_usize)]); } #[test] @@ -479,15 +471,19 @@ mod tests { fn test_case_pop( pop_back: bool, array: Vec, - expected_mem: Vec, - expected_removed_item: Value, + expected_return_array: Vec, + expected_return_item: Value, ) { - let arguments = - vec![BrilligParameter::Array(vec![BrilligParameter::Simple], array.len())]; + let arguments = vec![BrilligParameter::Array( + vec![BrilligParameter::SingleAddr(BRILLIG_MEMORY_ADDRESSING_BIT_SIZE)], + array.len(), + )]; let returns = vec![ - BrilligParameter::Array(vec![BrilligParameter::Simple], array.len() - 1), - BrilligParameter::Simple, - BrilligParameter::Simple, + BrilligParameter::Array( + vec![BrilligParameter::SingleAddr(BRILLIG_MEMORY_ADDRESSING_BIT_SIZE)], + array.len() - 1, + ), + BrilligParameter::SingleAddr(BRILLIG_MEMORY_ADDRESSING_BIT_SIZE), ]; let (_, mut function_context, mut context) = create_test_environment(); @@ -508,7 +504,10 @@ mod tests { size: context.allocate_register(), rc: context.allocate_register(), }; - let removed_item = context.allocate_register(); + let removed_item = SingleAddrVariable { + address: context.allocate_register(), + bit_size: BRILLIG_MEMORY_ADDRESSING_BIT_SIZE, + }; let mut block = create_brillig_block(&mut function_context, &mut context); @@ -516,61 +515,46 @@ mod tests { block.slice_pop_back_operation( target_vector, source_vector, - &[BrilligVariable::Simple(removed_item)], + &[BrilligVariable::SingleAddr(removed_item)], ); } else { block.slice_pop_front_operation( target_vector, source_vector, - &[BrilligVariable::Simple(removed_item)], + &[BrilligVariable::SingleAddr(removed_item)], ); } context.return_instruction(&[ target_vector.pointer, target_vector.rc, - target_vector.size, - removed_item, + removed_item.address, ]); let bytecode = create_entry_point_bytecode(context, arguments, returns).byte_code; - let vm = create_and_run_vm(array.clone(), vec![Value::from(0_usize)], &bytecode); - - assert_eq!(vm.get_memory(), &expected_mem); - - assert_eq!(vm.get_registers().get(RegisterIndex(0)), Value::from(array.len())); - assert_eq!(vm.get_registers().get(RegisterIndex(1)), Value::from(array.len() - 1)); - assert_eq!(vm.get_registers().get(RegisterIndex(2)), expected_removed_item); + let expected_return: Vec<_> = + expected_return_array.into_iter().chain(vec![expected_return_item]).collect(); + let (vm, return_data_offset, return_data_size) = + create_and_run_vm(array.clone(), &bytecode); + assert_eq!(return_data_size, expected_return.len()); + + assert_eq!( + vm.get_memory()[return_data_offset..(return_data_offset + expected_return.len())], + expected_return + ); } test_case_pop( true, vec![Value::from(1_usize), Value::from(2_usize), Value::from(3_usize)], - vec![ - Value::from(1_usize), - Value::from(2_usize), - Value::from(3_usize), - Value::from(1_usize), - Value::from(2_usize), - ], + vec![Value::from(1_usize), Value::from(2_usize)], Value::from(3_usize), ); - test_case_pop( - true, - vec![Value::from(1_usize)], - vec![Value::from(1_usize)], - Value::from(1_usize), - ); + test_case_pop(true, vec![Value::from(1_usize)], vec![], Value::from(1_usize)); test_case_pop( false, vec![Value::from(1_usize), Value::from(2_usize), Value::from(3_usize)], - vec![ - Value::from(1_usize), - Value::from(2_usize), - Value::from(3_usize), - Value::from(2_usize), - Value::from(3_usize), - ], + vec![Value::from(2_usize), Value::from(3_usize)], Value::from(1_usize), ); } @@ -579,19 +563,22 @@ mod tests { fn test_slice_insert_operation() { fn test_case_insert( array: Vec, - expected_mem: Vec, item: Value, index: Value, + expected_return: Vec, ) { let arguments = vec![ - BrilligParameter::Array(vec![BrilligParameter::Simple], array.len()), - BrilligParameter::Simple, - BrilligParameter::Simple, - ]; - let returns = vec![ - BrilligParameter::Array(vec![BrilligParameter::Simple], array.len() + 1), - BrilligParameter::Simple, + BrilligParameter::Array( + vec![BrilligParameter::SingleAddr(BRILLIG_MEMORY_ADDRESSING_BIT_SIZE)], + array.len(), + ), + BrilligParameter::SingleAddr(BRILLIG_MEMORY_ADDRESSING_BIT_SIZE), + BrilligParameter::SingleAddr(BRILLIG_MEMORY_ADDRESSING_BIT_SIZE), ]; + let returns = vec![BrilligParameter::Array( + vec![BrilligParameter::SingleAddr(BRILLIG_MEMORY_ADDRESSING_BIT_SIZE)], + array.len() + 1, + )]; let (_, mut function_context, mut context) = create_test_environment(); @@ -601,7 +588,10 @@ mod tests { size: array.len(), rc: context.allocate_register(), }; - let item_to_insert = context.allocate_register(); + let item_to_insert = SingleAddrVariable { + address: context.allocate_register(), + bit_size: BRILLIG_MEMORY_ADDRESSING_BIT_SIZE, + }; let index_to_insert = context.allocate_register(); // Cast the source array to a vector @@ -620,90 +610,72 @@ mod tests { target_vector, source_vector, index_to_insert, - &[BrilligVariable::Simple(item_to_insert)], + &[BrilligVariable::SingleAddr(item_to_insert)], ); - context.return_instruction(&[ - target_vector.pointer, - target_vector.rc, - target_vector.size, - ]); + context.return_instruction(&[target_vector.pointer, target_vector.rc]); + let calldata = array.into_iter().chain(vec![item]).chain(vec![index]).collect(); let bytecode = create_entry_point_bytecode(context, arguments, returns).byte_code; - let vm = create_and_run_vm( - array.clone(), - vec![Value::from(0_usize), item, index], - &bytecode, - ); - - assert_eq!(vm.get_memory(), &expected_mem); + let (vm, return_data_offset, return_data_size) = create_and_run_vm(calldata, &bytecode); + assert_eq!(return_data_size, expected_return.len()); - assert_eq!(vm.get_registers().get(RegisterIndex(0)), Value::from(array.len())); - assert_eq!(vm.get_registers().get(RegisterIndex(1)), Value::from(array.len() + 1)); + assert_eq!( + vm.get_memory()[return_data_offset..(return_data_offset + expected_return.len())], + expected_return + ); } test_case_insert( vec![Value::from(1_usize), Value::from(2_usize), Value::from(3_usize)], + Value::from(27_usize), + Value::from(1_usize), vec![ - Value::from(1_usize), - Value::from(2_usize), - Value::from(3_usize), Value::from(1_usize), Value::from(27_usize), Value::from(2_usize), Value::from(3_usize), ], - Value::from(27_usize), - Value::from(1_usize), ); test_case_insert( vec![Value::from(1_usize), Value::from(2_usize), Value::from(3_usize)], + Value::from(27_usize), + Value::from(0_usize), vec![ - Value::from(1_usize), - Value::from(2_usize), - Value::from(3_usize), Value::from(27_usize), Value::from(1_usize), Value::from(2_usize), Value::from(3_usize), ], - Value::from(27_usize), - Value::from(0_usize), ); test_case_insert( vec![Value::from(1_usize), Value::from(2_usize), Value::from(3_usize)], + Value::from(27_usize), + Value::from(2_usize), vec![ - Value::from(1_usize), - Value::from(2_usize), - Value::from(3_usize), Value::from(1_usize), Value::from(2_usize), Value::from(27_usize), Value::from(3_usize), ], - Value::from(27_usize), - Value::from(2_usize), ); test_case_insert( vec![Value::from(1_usize), Value::from(2_usize), Value::from(3_usize)], + Value::from(27_usize), + Value::from(3_usize), vec![ - Value::from(1_usize), - Value::from(2_usize), - Value::from(3_usize), Value::from(1_usize), Value::from(2_usize), Value::from(3_usize), Value::from(27_usize), ], - Value::from(27_usize), - Value::from(3_usize), ); test_case_insert( vec![], - vec![Value::from(27_usize)], Value::from(27_usize), Value::from(0_usize), + vec![Value::from(27_usize)], ); } @@ -711,18 +683,23 @@ mod tests { fn test_slice_remove_operation() { fn test_case_remove( array: Vec, - expected_mem: Vec, index: Value, + expected_array: Vec, expected_removed_item: Value, ) { let arguments = vec![ - BrilligParameter::Array(vec![BrilligParameter::Simple], array.len()), - BrilligParameter::Simple, + BrilligParameter::Array( + vec![BrilligParameter::SingleAddr(BRILLIG_MEMORY_ADDRESSING_BIT_SIZE)], + array.len(), + ), + BrilligParameter::SingleAddr(BRILLIG_MEMORY_ADDRESSING_BIT_SIZE), ]; let returns = vec![ - BrilligParameter::Array(vec![BrilligParameter::Simple], array.len() - 1), - BrilligParameter::Simple, - BrilligParameter::Simple, + BrilligParameter::Array( + vec![BrilligParameter::SingleAddr(BRILLIG_MEMORY_ADDRESSING_BIT_SIZE)], + array.len() - 1, + ), + BrilligParameter::SingleAddr(BRILLIG_MEMORY_ADDRESSING_BIT_SIZE), ]; let (_, mut function_context, mut context) = create_test_environment(); @@ -744,7 +721,10 @@ mod tests { size: context.allocate_register(), rc: context.allocate_register(), }; - let removed_item = context.allocate_register(); + let removed_item = SingleAddrVariable { + address: context.allocate_register(), + bit_size: BRILLIG_MEMORY_ADDRESSING_BIT_SIZE, + }; let mut block = create_brillig_block(&mut function_context, &mut context); @@ -752,68 +732,54 @@ mod tests { target_vector, source_vector, index_to_insert, - &[BrilligVariable::Simple(removed_item)], + &[BrilligVariable::SingleAddr(removed_item)], ); context.return_instruction(&[ target_vector.pointer, - target_vector.rc, target_vector.size, - removed_item, + removed_item.address, ]); + let calldata: Vec<_> = array.into_iter().chain(vec![index]).collect(); + let bytecode = create_entry_point_bytecode(context, arguments, returns).byte_code; - let vm = create_and_run_vm(array.clone(), vec![Value::from(0_usize), index], &bytecode); + let (vm, return_data_offset, return_data_size) = create_and_run_vm(calldata, &bytecode); - assert_eq!(vm.get_memory(), &expected_mem); + let expected_return: Vec<_> = + expected_array.into_iter().chain(vec![expected_removed_item]).collect(); + assert_eq!(return_data_size, expected_return.len()); - assert_eq!(vm.get_registers().get(RegisterIndex(0)), Value::from(array.len())); - assert_eq!(vm.get_registers().get(RegisterIndex(1)), Value::from(array.len() - 1)); - assert_eq!(vm.get_registers().get(RegisterIndex(2)), expected_removed_item); + assert_eq!( + vm.get_memory()[return_data_offset..(return_data_offset + expected_return.len())], + expected_return + ); } test_case_remove( vec![Value::from(1_usize), Value::from(2_usize), Value::from(3_usize)], - vec![ - Value::from(1_usize), - Value::from(2_usize), - Value::from(3_usize), - Value::from(2_usize), - Value::from(3_usize), - ], Value::from(0_usize), + vec![Value::from(2_usize), Value::from(3_usize)], Value::from(1_usize), ); test_case_remove( vec![Value::from(1_usize), Value::from(2_usize), Value::from(3_usize)], - vec![ - Value::from(1_usize), - Value::from(2_usize), - Value::from(3_usize), - Value::from(1_usize), - Value::from(3_usize), - ], Value::from(1_usize), + vec![Value::from(1_usize), Value::from(3_usize)], Value::from(2_usize), ); test_case_remove( vec![Value::from(1_usize), Value::from(2_usize), Value::from(3_usize)], - vec![ - Value::from(1_usize), - Value::from(2_usize), - Value::from(3_usize), - Value::from(1_usize), - Value::from(2_usize), - ], Value::from(2_usize), + vec![Value::from(1_usize), Value::from(2_usize)], Value::from(3_usize), ); test_case_remove( - vec![Value::from(1_usize)], vec![Value::from(1_usize)], Value::from(0_usize), + vec![], Value::from(1_usize), ); } diff --git a/compiler/noirc_evaluator/src/brillig/brillig_ir.rs b/compiler/noirc_evaluator/src/brillig/brillig_ir.rs index 3e6c3d4d7de..f1a8f24ed03 100644 --- a/compiler/noirc_evaluator/src/brillig/brillig_ir.rs +++ b/compiler/noirc_evaluator/src/brillig/brillig_ir.rs @@ -15,33 +15,24 @@ use crate::ssa::ir::dfg::CallStack; use self::{ artifact::{BrilligArtifact, UnresolvedJumpLocation}, - brillig_variable::{BrilligArray, BrilligVariable, BrilligVector}, + brillig_variable::{BrilligArray, BrilligVariable, BrilligVector, SingleAddrVariable}, registers::BrilligRegistersContext, }; use acvm::{ acir::brillig::{ - BinaryFieldOp, BinaryIntOp, BlackBoxOp, Opcode as BrilligOpcode, RegisterIndex, - RegisterOrMemory, Value, + BinaryFieldOp, BinaryIntOp, BlackBoxOp, MemoryAddress, Opcode as BrilligOpcode, Value, + ValueOrArray, }, + brillig_vm::brillig::HeapValueType, FieldElement, }; use debug_show::DebugShow; +use num_bigint::BigUint; -/// Integer arithmetic in Brillig is limited to 127 bit -/// integers. -/// -/// We could lift this in the future and have Brillig -/// do big integer arithmetic when it exceeds the field size -/// or we could have users re-implement big integer arithmetic -/// in Brillig. -/// Since constrained functions do not have this property, it -/// would mean that unconstrained functions will differ from -/// constrained functions in terms of syntax compatibility. -pub(crate) const BRILLIG_INTEGER_ARITHMETIC_BIT_SIZE: u32 = 127; /// The Brillig VM does not apply a limit to the memory address space, /// As a convention, we take use 64 bits. This means that we assume that /// memory has 2^64 memory slots. -pub(crate) const BRILLIG_MEMORY_ADDRESSING_BIT_SIZE: u32 = 64; +pub(crate) const BRILLIG_MEMORY_ADDRESSING_BIT_SIZE: u32 = 32; // Registers reserved in runtime for special purposes. pub(crate) enum ReservedRegisters { @@ -64,18 +55,18 @@ impl ReservedRegisters { } /// Returns the stack pointer register. This will get used to allocate memory in runtime. - pub(crate) fn stack_pointer() -> RegisterIndex { - RegisterIndex::from(ReservedRegisters::StackPointer as usize) + pub(crate) fn stack_pointer() -> MemoryAddress { + MemoryAddress::from(ReservedRegisters::StackPointer as usize) } /// Returns the previous stack pointer register. This will be used to restore the registers after a fn call. - pub(crate) fn previous_stack_pointer() -> RegisterIndex { - RegisterIndex::from(ReservedRegisters::PreviousStackPointer as usize) + pub(crate) fn previous_stack_pointer() -> MemoryAddress { + MemoryAddress::from(ReservedRegisters::PreviousStackPointer as usize) } /// Returns a user defined (non-reserved) register index. - fn user_register_index(index: usize) -> RegisterIndex { - RegisterIndex::from(index + ReservedRegisters::len()) + fn user_register_index(index: usize) -> MemoryAddress { + MemoryAddress::from(index + ReservedRegisters::len()) } } @@ -109,7 +100,7 @@ impl BrilligContext { } } - pub(crate) fn set_allocated_registers(&mut self, allocated_registers: Vec) { + pub(crate) fn set_allocated_registers(&mut self, allocated_registers: Vec) { self.registers = BrilligRegistersContext::from_preallocated_registers(allocated_registers); } @@ -127,27 +118,28 @@ impl BrilligContext { /// in `pointer_register` pub(crate) fn allocate_fixed_length_array( &mut self, - pointer_register: RegisterIndex, + pointer_register: MemoryAddress, size: usize, ) { // debug_show handled by allocate_array_instruction - let size_register = self.make_constant(size.into()); + let size_register = self.make_usize_constant(size.into()); self.allocate_array_instruction(pointer_register, size_register); + self.deallocate_register(size_register); } /// Allocates an array of size contained in size_register and stores the /// pointer to the array in `pointer_register` pub(crate) fn allocate_array_instruction( &mut self, - pointer_register: RegisterIndex, - size_register: RegisterIndex, + pointer_register: MemoryAddress, + size_register: MemoryAddress, ) { self.debug_show.allocate_array_instruction(pointer_register, size_register); self.set_array_pointer(pointer_register); self.update_stack_pointer(size_register); } - pub(crate) fn set_array_pointer(&mut self, pointer_register: RegisterIndex) { + pub(crate) fn set_array_pointer(&mut self, pointer_register: MemoryAddress) { self.debug_show.mov_instruction(pointer_register, ReservedRegisters::stack_pointer()); self.push_opcode(BrilligOpcode::Mov { destination: pointer_register, @@ -155,7 +147,7 @@ impl BrilligContext { }); } - pub(crate) fn update_stack_pointer(&mut self, size_register: RegisterIndex) { + pub(crate) fn update_stack_pointer(&mut self, size_register: MemoryAddress) { self.memory_op( ReservedRegisters::stack_pointer(), size_register, @@ -168,12 +160,12 @@ impl BrilligContext { /// pointer to the array in `pointer_register` fn allocate_variable_reference_instruction( &mut self, - pointer_register: RegisterIndex, + pointer_register: MemoryAddress, size: usize, ) { self.debug_show.allocate_instruction(pointer_register); // A variable can be stored in up to three values, so we reserve three values for that. - let size_register = self.make_constant(size.into()); + let size_register = self.make_usize_constant(size.into()); self.push_opcode(BrilligOpcode::Mov { destination: pointer_register, source: ReservedRegisters::stack_pointer(), @@ -184,16 +176,17 @@ impl BrilligContext { ReservedRegisters::stack_pointer(), BinaryIntOp::Add, ); + self.deallocate_register(size_register); } - pub(crate) fn allocate_simple_reference_instruction( + pub(crate) fn allocate_single_addr_reference_instruction( &mut self, - pointer_register: RegisterIndex, + pointer_register: MemoryAddress, ) { self.allocate_variable_reference_instruction(pointer_register, 1); } - pub(crate) fn allocate_array_reference_instruction(&mut self, pointer_register: RegisterIndex) { + pub(crate) fn allocate_array_reference_instruction(&mut self, pointer_register: MemoryAddress) { self.allocate_variable_reference_instruction( pointer_register, BrilligArray::registers_count(), @@ -202,7 +195,7 @@ impl BrilligContext { pub(crate) fn allocate_vector_reference_instruction( &mut self, - pointer_register: RegisterIndex, + pointer_register: MemoryAddress, ) { self.allocate_variable_reference_instruction( pointer_register, @@ -213,9 +206,9 @@ impl BrilligContext { /// Gets the value in the array at index `index` and stores it in `result` pub(crate) fn array_get( &mut self, - array_ptr: RegisterIndex, - index: RegisterIndex, - result: RegisterIndex, + array_ptr: MemoryAddress, + index: MemoryAddress, + result: MemoryAddress, ) { self.debug_show.array_get(array_ptr, index, result); // Computes array_ptr + index, ie array[index] @@ -235,9 +228,9 @@ impl BrilligContext { /// Sets the item in the array at index `index` to `value` pub(crate) fn array_set( &mut self, - array_ptr: RegisterIndex, - index: RegisterIndex, - value: RegisterIndex, + array_ptr: MemoryAddress, + index: MemoryAddress, + value: MemoryAddress, ) { self.debug_show.array_set(array_ptr, index, value); // Computes array_ptr + index, ie array[index] @@ -258,9 +251,9 @@ impl BrilligContext { /// Into the array pointed by destination pub(crate) fn copy_array_instruction( &mut self, - source_pointer: RegisterIndex, - destination_pointer: RegisterIndex, - num_elements_register: RegisterIndex, + source_pointer: MemoryAddress, + destination_pointer: MemoryAddress, + num_elements_register: MemoryAddress, ) { self.debug_show.copy_array_instruction( source_pointer, @@ -280,11 +273,11 @@ impl BrilligContext { /// This instruction will issue a loop that will iterate iteration_count times /// The body of the loop should be issued by the caller in the on_iteration closure. - pub(crate) fn loop_instruction(&mut self, iteration_count: RegisterIndex, on_iteration: F) + pub(crate) fn loop_instruction(&mut self, iteration_count: MemoryAddress, on_iteration: F) where - F: FnOnce(&mut BrilligContext, RegisterIndex), + F: FnOnce(&mut BrilligContext, MemoryAddress), { - let iterator_register = self.make_constant(0_u128.into()); + let iterator_register = self.make_usize_constant(0_u128.into()); let (loop_section, loop_label) = self.reserve_next_section_label(); self.enter_section(loop_section); @@ -292,18 +285,21 @@ impl BrilligContext { // Loop body // Check if iterator < iteration_count - let iterator_less_than_iterations = self.allocate_register(); + let iterator_less_than_iterations = + SingleAddrVariable { address: self.allocate_register(), bit_size: 1 }; + self.memory_op( iterator_register, iteration_count, - iterator_less_than_iterations, + iterator_less_than_iterations.address, BinaryIntOp::LessThan, ); let (exit_loop_section, exit_loop_label) = self.reserve_next_section_label(); - self.not_instruction(iterator_less_than_iterations, 1, iterator_less_than_iterations); - self.jump_if_instruction(iterator_less_than_iterations, exit_loop_label); + self.not_instruction(iterator_less_than_iterations, iterator_less_than_iterations); + + self.jump_if_instruction(iterator_less_than_iterations.address, exit_loop_label); // Call the on iteration function on_iteration(self, iterator_register); @@ -317,7 +313,7 @@ impl BrilligContext { self.enter_section(exit_loop_section); // Deallocate our temporary registers - self.deallocate_register(iterator_less_than_iterations); + self.deallocate_register(iterator_less_than_iterations.address); self.deallocate_register(iterator_register); } @@ -327,7 +323,7 @@ impl BrilligContext { /// functions to allow the given function to mutably alias its environment. pub(crate) fn branch_instruction( &mut self, - condition: RegisterIndex, + condition: MemoryAddress, mut f: impl FnMut(&mut BrilligContext, bool), ) { // Reserve 3 sections @@ -349,6 +345,21 @@ impl BrilligContext { self.enter_section(end_section); } + /// This instruction issues a branch that jumps over the code generated by the given function if the condition is truthy + pub(crate) fn if_not_instruction( + &mut self, + condition: MemoryAddress, + f: impl FnOnce(&mut BrilligContext), + ) { + let (end_section, end_label) = self.reserve_next_section_label(); + + self.jump_if_instruction(condition, end_label.clone()); + + f(self); + + self.enter_section(end_section); + } + /// Adds a label to the next opcode pub(crate) fn enter_context(&mut self, label: T) { self.debug_show.enter_context(label.to_string()); @@ -394,7 +405,7 @@ impl BrilligContext { /// Adds a unresolved `JumpIf` instruction to the bytecode. pub(crate) fn jump_if_instruction( &mut self, - condition: RegisterIndex, + condition: MemoryAddress, target_label: T, ) { self.debug_show.jump_if_instruction(condition, target_label.to_string()); @@ -414,14 +425,14 @@ impl BrilligContext { } /// Allocates an unused register. - pub(crate) fn allocate_register(&mut self) -> RegisterIndex { + pub(crate) fn allocate_register(&mut self) -> MemoryAddress { self.registers.allocate_register() } /// Push a register to the deallocation list, ready for reuse. /// TODO(AD): currently, register deallocation is only done with immediate values. /// TODO(AD): See https://github.com/noir-lang/noir/issues/1720 - pub(crate) fn deallocate_register(&mut self, register_index: RegisterIndex) { + pub(crate) fn deallocate_register(&mut self, register_index: MemoryAddress) { self.registers.deallocate_register(register_index); } } @@ -431,7 +442,7 @@ impl BrilligContext { /// is false. pub(crate) fn constrain_instruction( &mut self, - condition: RegisterIndex, + condition: MemoryAddress, assert_message: Option, ) { self.debug_show.constrain_instruction(condition); @@ -453,7 +464,7 @@ impl BrilligContext { /// Brillig does not have an explicit return instruction, so this /// method will move all register values to the first `N` values in /// the VM. - pub(crate) fn return_instruction(&mut self, return_registers: &[RegisterIndex]) { + pub(crate) fn return_instruction(&mut self, return_registers: &[MemoryAddress]) { self.debug_show.return_instruction(return_registers); let mut sources = Vec::with_capacity(return_registers.len()); let mut destinations = Vec::with_capacity(return_registers.len()); @@ -465,6 +476,9 @@ impl BrilligContext { sources.push(*return_register); destinations.push(destination_register); } + destinations + .iter() + .for_each(|destination| self.registers.ensure_register_is_allocated(*destination)); self.mov_registers_to_registers_instruction(sources, destinations); self.stop_instruction(); } @@ -473,8 +487,8 @@ impl BrilligContext { /// It first moves all sources to new allocated registers to avoid overwriting. pub(crate) fn mov_registers_to_registers_instruction( &mut self, - sources: Vec, - destinations: Vec, + sources: Vec, + destinations: Vec, ) { let new_sources: Vec<_> = sources .iter() @@ -493,23 +507,37 @@ impl BrilligContext { /// Emits a `mov` instruction. /// /// Copies the value at `source` into `destination` - pub(crate) fn mov_instruction(&mut self, destination: RegisterIndex, source: RegisterIndex) { + pub(crate) fn mov_instruction(&mut self, destination: MemoryAddress, source: MemoryAddress) { self.debug_show.mov_instruction(destination, source); self.push_opcode(BrilligOpcode::Mov { destination, source }); } + /// Cast truncates the value to the given bit size and converts the type of the value in memory to that bit size. + pub(crate) fn cast_instruction( + &mut self, + destination: SingleAddrVariable, + source: SingleAddrVariable, + ) { + self.debug_show.cast_instruction(destination.address, source.address, destination.bit_size); + self.push_opcode(BrilligOpcode::Cast { + destination: destination.address, + source: source.address, + bit_size: destination.bit_size, + }); + } + /// Processes a binary instruction according `operation`. /// /// This method will compute lhs rhs /// and store the result in the `result` register. pub(crate) fn binary_instruction( &mut self, - lhs: RegisterIndex, - rhs: RegisterIndex, - result: RegisterIndex, + lhs: MemoryAddress, + rhs: MemoryAddress, + result: MemoryAddress, operation: BrilligBinaryOp, ) { - self.debug_show.binary_instruction(lhs, rhs, result, operation.clone()); + self.debug_show.binary_instruction(lhs, rhs, result, operation); match operation { BrilligBinaryOp::Field { op } => { let opcode = BrilligOpcode::BinaryFieldOp { op, destination: result, lhs, rhs }; @@ -527,9 +555,18 @@ impl BrilligContext { } /// Stores the value of `constant` in the `result` register - pub(crate) fn const_instruction(&mut self, result: RegisterIndex, constant: Value) { + pub(crate) fn const_instruction( + &mut self, + result: MemoryAddress, + constant: Value, + bit_size: u32, + ) { self.debug_show.const_instruction(result, constant); - self.push_opcode(BrilligOpcode::Const { destination: result, value: constant }); + self.push_opcode(BrilligOpcode::Const { destination: result, value: constant, bit_size }); + } + + pub(crate) fn usize_const(&mut self, result: MemoryAddress, constant: Value) { + self.const_instruction(result, constant, BRILLIG_MEMORY_ADDRESSING_BIT_SIZE); } /// Processes a not instruction. @@ -538,21 +575,20 @@ impl BrilligContext { /// in Brillig. pub(crate) fn not_instruction( &mut self, - input: RegisterIndex, - bit_size: u32, - result: RegisterIndex, + input: SingleAddrVariable, + result: SingleAddrVariable, ) { - self.debug_show.not_instruction(input, bit_size, result); + self.debug_show.not_instruction(input.address, input.bit_size, result.address); // Compile !x as ((-1) - x) - let u_max = FieldElement::from(2_i128).pow(&FieldElement::from(bit_size as i128)) + let u_max = FieldElement::from(2_i128).pow(&FieldElement::from(input.bit_size as i128)) - FieldElement::one(); - let max = self.make_constant(Value::from(u_max)); + let max = self.make_constant(Value::from(u_max), input.bit_size); let opcode = BrilligOpcode::BinaryIntOp { - destination: result, + destination: result.address, op: BinaryIntOp::Sub, - bit_size, + bit_size: input.bit_size, lhs: max, - rhs: input, + rhs: input.address, }; self.push_opcode(opcode); self.deallocate_register(max); @@ -565,14 +601,20 @@ impl BrilligContext { pub(crate) fn foreign_call_instruction( &mut self, func_name: String, - inputs: &[RegisterOrMemory], - outputs: &[RegisterOrMemory], + inputs: &[ValueOrArray], + input_value_types: &[HeapValueType], + outputs: &[ValueOrArray], + output_value_types: &[HeapValueType], ) { + assert!(inputs.len() == input_value_types.len()); + assert!(outputs.len() == output_value_types.len()); self.debug_show.foreign_call_instruction(func_name.clone(), inputs, outputs); let opcode = BrilligOpcode::ForeignCall { function: func_name, destinations: outputs.to_vec(), + destination_value_types: output_value_types.to_vec(), inputs: inputs.to_vec(), + input_value_types: input_value_types.to_vec(), }; self.push_opcode(opcode); } @@ -580,8 +622,8 @@ impl BrilligContext { /// Emits a load instruction pub(crate) fn load_instruction( &mut self, - destination: RegisterIndex, - source_pointer: RegisterIndex, + destination: MemoryAddress, + source_pointer: MemoryAddress, ) { self.debug_show.load_instruction(destination, source_pointer); self.push_opcode(BrilligOpcode::Load { destination, source_pointer }); @@ -591,11 +633,11 @@ impl BrilligContext { pub(crate) fn load_variable_instruction( &mut self, destination: BrilligVariable, - variable_pointer: RegisterIndex, + variable_pointer: MemoryAddress, ) { match destination { - BrilligVariable::Simple(register_index) => { - self.load_instruction(register_index, variable_pointer); + BrilligVariable::SingleAddr(single_addr) => { + self.load_instruction(single_addr.address, variable_pointer); } BrilligVariable::BrilligArray(BrilligArray { pointer, size: _, rc }) => { self.load_instruction(pointer, variable_pointer); @@ -630,8 +672,8 @@ impl BrilligContext { /// Emits a store instruction pub(crate) fn store_instruction( &mut self, - destination_pointer: RegisterIndex, - source: RegisterIndex, + destination_pointer: MemoryAddress, + source: MemoryAddress, ) { self.debug_show.store_instruction(destination_pointer, source); self.push_opcode(BrilligOpcode::Store { destination_pointer, source }); @@ -640,17 +682,17 @@ impl BrilligContext { /// Stores a variable by saving its registers to memory pub(crate) fn store_variable_instruction( &mut self, - variable_pointer: RegisterIndex, + variable_pointer: MemoryAddress, source: BrilligVariable, ) { match source { - BrilligVariable::Simple(register_index) => { - self.store_instruction(variable_pointer, register_index); + BrilligVariable::SingleAddr(single_addr) => { + self.store_instruction(variable_pointer, single_addr.address); } BrilligVariable::BrilligArray(BrilligArray { pointer, size: _, rc }) => { self.store_instruction(variable_pointer, pointer); - let rc_pointer: RegisterIndex = self.allocate_register(); + let rc_pointer: MemoryAddress = self.allocate_register(); self.mov_instruction(rc_pointer, variable_pointer); self.usize_op_in_place(rc_pointer, BinaryIntOp::Add, 1_usize); self.store_instruction(rc_pointer, rc); @@ -664,7 +706,7 @@ impl BrilligContext { self.usize_op_in_place(size_pointer, BinaryIntOp::Add, 1_usize); self.store_instruction(size_pointer, size); - let rc_pointer: RegisterIndex = self.allocate_register(); + let rc_pointer: MemoryAddress = self.allocate_register(); self.mov_instruction(rc_pointer, variable_pointer); self.usize_op_in_place(rc_pointer, BinaryIntOp::Add, 2_usize); self.store_instruction(rc_pointer, rc); @@ -685,43 +727,55 @@ impl BrilligContext { /// For Brillig, all integer operations will overflow as its cheap. pub(crate) fn truncate_instruction( &mut self, - destination_of_truncated_value: RegisterIndex, - value_to_truncate: RegisterIndex, + destination_of_truncated_value: SingleAddrVariable, + value_to_truncate: SingleAddrVariable, bit_size: u32, ) { self.debug_show.truncate_instruction( - destination_of_truncated_value, - value_to_truncate, + destination_of_truncated_value.address, + value_to_truncate.address, bit_size, ); assert!( - bit_size <= BRILLIG_INTEGER_ARITHMETIC_BIT_SIZE, - "tried to truncate to a bit size greater than allowed {bit_size}" + bit_size <= value_to_truncate.bit_size, + "tried to truncate to a bit size {} greater than the variable size {}", + bit_size, + value_to_truncate.bit_size + ); + + let mask = BigUint::from(2_u32).pow(bit_size) - BigUint::from(1_u32); + let mask_constant = self.make_constant( + FieldElement::from_be_bytes_reduce(&mask.to_bytes_be()).into(), + value_to_truncate.bit_size, ); - // The brillig VM performs all arithmetic operations modulo 2**bit_size - // So to truncate any value to a target bit size we can just issue a no-op arithmetic operation - // With bit size equal to target_bit_size - let zero_register = self.make_constant(Value::from(FieldElement::zero())); self.binary_instruction( - value_to_truncate, - zero_register, - destination_of_truncated_value, - BrilligBinaryOp::Integer { op: BinaryIntOp::Add, bit_size }, + value_to_truncate.address, + mask_constant, + destination_of_truncated_value.address, + BrilligBinaryOp::Integer { op: BinaryIntOp::And, bit_size: value_to_truncate.bit_size }, ); - self.deallocate_register(zero_register); + + self.deallocate_register(mask_constant); } /// Emits a stop instruction pub(crate) fn stop_instruction(&mut self) { self.debug_show.stop_instruction(); - self.push_opcode(BrilligOpcode::Stop); + self.push_opcode(BrilligOpcode::Stop { return_data_offset: 0, return_data_size: 0 }); } /// Returns a register which holds the value of a constant - pub(crate) fn make_constant(&mut self, constant: Value) -> RegisterIndex { + pub(crate) fn make_constant(&mut self, constant: Value, bit_size: u32) -> MemoryAddress { + let register = self.allocate_register(); + self.const_instruction(register, constant, bit_size); + register + } + + /// Returns a register which holds the value of an usize constant + pub(crate) fn make_usize_constant(&mut self, constant: Value) -> MemoryAddress { let register = self.allocate_register(); - self.const_instruction(register, constant); + self.usize_const(register, constant); register } @@ -736,9 +790,9 @@ impl BrilligContext { /// to other binary instructions. pub(crate) fn modulo_instruction( &mut self, - result_register: RegisterIndex, - left: RegisterIndex, - right: RegisterIndex, + result_register: MemoryAddress, + left: MemoryAddress, + right: MemoryAddress, bit_size: u32, signed: bool, ) { @@ -791,12 +845,12 @@ impl BrilligContext { } /// Returns the i'th register after the reserved ones - pub(crate) fn register(&self, i: usize) -> RegisterIndex { - RegisterIndex::from(ReservedRegisters::NUM_RESERVED_REGISTERS + i) + pub(crate) fn register(&self, i: usize) -> MemoryAddress { + MemoryAddress::from(ReservedRegisters::NUM_RESERVED_REGISTERS + i) } /// Saves all of the registers that have been used up until this point. - fn save_registers_of_vars(&mut self, vars: &[BrilligVariable]) -> Vec { + fn save_registers_of_vars(&mut self, vars: &[BrilligVariable]) -> Vec { // Save all of the used registers at this point in memory // because the function call will/may overwrite them. // @@ -822,7 +876,7 @@ impl BrilligContext { } /// Loads all of the registers that have been save by save_all_used_registers. - fn load_all_saved_registers(&mut self, used_registers: &[RegisterIndex]) { + fn load_all_saved_registers(&mut self, used_registers: &[MemoryAddress]) { // Load all of the used registers that we saved. // We do all the reverse operations of save_all_used_registers. // Iterate our registers in reverse @@ -839,7 +893,7 @@ impl BrilligContext { /// Utility method to perform a binary instruction with a constant value in place pub(crate) fn usize_op_in_place( &mut self, - destination: RegisterIndex, + destination: MemoryAddress, op: BinaryIntOp, constant: usize, ) { @@ -849,12 +903,12 @@ impl BrilligContext { /// Utility method to perform a binary instruction with a constant value pub(crate) fn usize_op( &mut self, - operand: RegisterIndex, - destination: RegisterIndex, + operand: MemoryAddress, + destination: MemoryAddress, op: BinaryIntOp, constant: usize, ) { - let const_register = self.make_constant(Value::from(constant)); + let const_register = self.make_usize_constant(Value::from(constant)); self.memory_op(operand, const_register, destination, op); // Mark as no longer used for this purpose, frees for reuse self.deallocate_register(const_register); @@ -863,9 +917,9 @@ impl BrilligContext { /// Utility method to perform a binary instruction with a memory address pub(crate) fn memory_op( &mut self, - lhs: RegisterIndex, - rhs: RegisterIndex, - destination: RegisterIndex, + lhs: MemoryAddress, + rhs: MemoryAddress, + destination: MemoryAddress, op: BinaryIntOp, ) { self.binary_instruction( @@ -881,9 +935,9 @@ impl BrilligContext { // Move argument values to the front of the register indices. pub(crate) fn pre_call_save_registers_prep_args( &mut self, - arguments: &[RegisterIndex], + arguments: &[MemoryAddress], variables_to_save: &[BrilligVariable], - ) -> Vec { + ) -> Vec { // Save all the registers we have used to the stack. let saved_registers = self.save_registers_of_vars(variables_to_save); @@ -891,8 +945,11 @@ impl BrilligContext { // // This means that the arguments will be in the first `n` registers after // the number of reserved registers. - let (sources, destinations) = + let (sources, destinations): (Vec<_>, Vec<_>) = arguments.iter().enumerate().map(|(i, argument)| (*argument, self.register(i))).unzip(); + destinations + .iter() + .for_each(|destination| self.registers.ensure_register_is_allocated(*destination)); self.mov_registers_to_registers_instruction(sources, destinations); saved_registers } @@ -902,16 +959,17 @@ impl BrilligContext { // Load all the registers we have previous saved in save_registers_prep_args. pub(crate) fn post_call_prep_returns_load_registers( &mut self, - result_registers: &[RegisterIndex], - saved_registers: &[RegisterIndex], + result_registers: &[MemoryAddress], + saved_registers: &[MemoryAddress], ) { // Allocate our result registers and write into them // We assume the return values of our call are held in 0..num results register indices - let (sources, destinations) = result_registers + let (sources, destinations): (Vec<_>, Vec<_>) = result_registers .iter() .enumerate() .map(|(i, result_register)| (self.register(i), *result_register)) .unzip(); + sources.iter().for_each(|source| self.registers.ensure_register_is_allocated(*source)); self.mov_registers_to_registers_instruction(sources, destinations); // Restore all the same registers we have, in exact reverse order. @@ -924,13 +982,13 @@ impl BrilligContext { /// Utility method to transform a HeapArray to a HeapVector by making a runtime constant with the size. pub(crate) fn array_to_vector(&mut self, array: &BrilligArray) -> BrilligVector { - let size_register = self.make_constant(array.size.into()); + let size_register = self.make_usize_constant(array.size.into()); BrilligVector { size: size_register, pointer: array.pointer, rc: array.rc } } /// Issues a blackbox operation. pub(crate) fn black_box_op_instruction(&mut self, op: BlackBoxOp) { - self.debug_show.black_box_op_instruction(op); + self.debug_show.black_box_op_instruction(&op); self.push_opcode(BrilligOpcode::BlackBox(op)); } @@ -938,20 +996,20 @@ impl BrilligContext { /// And the radix register limb_count times to the target vector. pub(crate) fn radix_instruction( &mut self, - source: RegisterIndex, + source: MemoryAddress, target_vector: BrilligVector, - radix: RegisterIndex, - limb_count: RegisterIndex, + radix: MemoryAddress, + limb_count: MemoryAddress, big_endian: bool, ) { self.mov_instruction(target_vector.size, limb_count); - self.const_instruction(target_vector.rc, 1_usize.into()); + self.usize_const(target_vector.rc, 1_usize.into()); self.allocate_array_instruction(target_vector.pointer, target_vector.size); let shifted_register = self.allocate_register(); self.mov_instruction(shifted_register, source); - let modulus_register: RegisterIndex = self.allocate_register(); + let modulus_register: MemoryAddress = self.allocate_register(); self.loop_instruction(target_vector.size, |ctx, iterator_register| { // Compute the modulus @@ -1028,7 +1086,7 @@ impl BrilligContext { } /// Type to encapsulate the binary operation types in Brillig -#[derive(Clone)] +#[derive(Clone, Copy)] pub(crate) enum BrilligBinaryOp { Field { op: BinaryFieldOp }, Integer { op: BinaryIntOp, bit_size: u32 }, @@ -1042,10 +1100,11 @@ pub(crate) mod tests { use std::vec; use acvm::acir::brillig::{ - BinaryIntOp, ForeignCallParam, ForeignCallResult, HeapVector, RegisterIndex, - RegisterOrMemory, Value, + BinaryIntOp, ForeignCallParam, ForeignCallResult, HeapVector, MemoryAddress, Value, + ValueOrArray, }; - use acvm::brillig_vm::{Registers, VMStatus, VM}; + use acvm::brillig_vm::brillig::HeapValueType; + use acvm::brillig_vm::{VMStatus, VM}; use acvm::{BlackBoxFunctionSolver, BlackBoxResolutionError, FieldElement}; use crate::brillig::brillig_ir::BrilligContext; @@ -1096,6 +1155,14 @@ pub(crate) mod tests { ) -> Result<(FieldElement, FieldElement), BlackBoxResolutionError> { panic!("Path not trodden by this test") } + + fn poseidon2_permutation( + &self, + _inputs: &[FieldElement], + _len: u32, + ) -> Result, BlackBoxResolutionError> { + Ok(vec![0_u128.into(), 1_u128.into(), 2_u128.into(), 3_u128.into()]) + } } pub(crate) fn create_context() -> BrilligContext { @@ -1117,21 +1184,17 @@ pub(crate) mod tests { } pub(crate) fn create_and_run_vm( - memory: Vec, - param_registers: Vec, + calldata: Vec, bytecode: &[BrilligOpcode], - ) -> VM<'_, DummyBlackBoxSolver> { - let mut vm = VM::new( - Registers { inner: param_registers }, - memory, - bytecode, - vec![], - &DummyBlackBoxSolver, - ); + ) -> (VM<'_, DummyBlackBoxSolver>, usize, usize) { + let mut vm = VM::new(calldata, bytecode, vec![], &DummyBlackBoxSolver); let status = vm.process_opcodes(); - assert_eq!(status, VMStatus::Finished); - vm + if let VMStatus::Finished { return_data_offset, return_data_size } = status { + (vm, return_data_offset, return_data_size) + } else { + panic!("VM did not finish") + } } /// Test a Brillig foreign call returning a vector @@ -1150,18 +1213,20 @@ pub(crate) mod tests { let mut context = BrilligContext::new(true); let r_stack = ReservedRegisters::stack_pointer(); // Start stack pointer at 0 - context.const_instruction(r_stack, Value::from(0_usize)); - let r_input_size = RegisterIndex::from(ReservedRegisters::len()); - let r_array_ptr = RegisterIndex::from(ReservedRegisters::len() + 1); - let r_output_size = RegisterIndex::from(ReservedRegisters::len() + 2); - let r_equality = RegisterIndex::from(ReservedRegisters::len() + 3); - context.const_instruction(r_input_size, Value::from(12_usize)); + context.usize_const(r_stack, Value::from(ReservedRegisters::len() + 3)); + let r_input_size = MemoryAddress::from(ReservedRegisters::len()); + let r_array_ptr = MemoryAddress::from(ReservedRegisters::len() + 1); + let r_output_size = MemoryAddress::from(ReservedRegisters::len() + 2); + let r_equality = MemoryAddress::from(ReservedRegisters::len() + 3); + context.usize_const(r_input_size, Value::from(12_usize)); // copy our stack frame to r_array_ptr context.mov_instruction(r_array_ptr, r_stack); context.foreign_call_instruction( "make_number_sequence".into(), - &[RegisterOrMemory::RegisterIndex(r_input_size)], - &[RegisterOrMemory::HeapVector(HeapVector { pointer: r_stack, size: r_output_size })], + &[ValueOrArray::MemoryAddress(r_input_size)], + &[HeapValueType::Simple], + &[ValueOrArray::HeapVector(HeapVector { pointer: r_stack, size: r_output_size })], + &[HeapValueType::Vector { value_types: vec![HeapValueType::Simple] }], ); // push stack frame by r_returned_size context.memory_op(r_stack, r_output_size, r_stack, BinaryIntOp::Add); @@ -1178,13 +1243,12 @@ pub(crate) mod tests { let bytecode = context.artifact().finish().byte_code; let number_sequence: Vec = (0_usize..12_usize).map(Value::from).collect(); let mut vm = VM::new( - Registers { inner: vec![] }, vec![], &bytecode, vec![ForeignCallResult { values: vec![ForeignCallParam::Array(number_sequence)] }], &DummyBlackBoxSolver, ); let status = vm.process_opcodes(); - assert_eq!(status, VMStatus::Finished); + assert_eq!(status, VMStatus::Finished { return_data_offset: 0, return_data_size: 0 }); } } diff --git a/compiler/noirc_evaluator/src/brillig/brillig_ir/artifact.rs b/compiler/noirc_evaluator/src/brillig/brillig_ir/artifact.rs index 9b8c3913123..d10dcf13d9f 100644 --- a/compiler/noirc_evaluator/src/brillig/brillig_ir/artifact.rs +++ b/compiler/noirc_evaluator/src/brillig/brillig_ir/artifact.rs @@ -6,8 +6,11 @@ use crate::ssa::ir::dfg::CallStack; /// Represents a parameter or a return value of a function. #[derive(Debug, Clone)] pub(crate) enum BrilligParameter { - Simple, + /// A single address parameter or return value. Holds the bit size of the parameter. + SingleAddr(u32), + /// An array parameter or return value. Holds the type of an array item and its size. Array(Vec, usize), + /// A slice parameter or return value. Holds the type of a slice item. Slice(Vec), } @@ -97,7 +100,7 @@ impl BrilligArtifact { // Replace STOP with RETURN because this is not the end of the program now. let stop_position = byte_code .iter() - .position(|opcode| matches!(opcode, BrilligOpcode::Stop)) + .position(|opcode| matches!(opcode, BrilligOpcode::Stop { .. })) .expect("Trying to link with a function that does not have a stop opcode"); byte_code[stop_position] = BrilligOpcode::Return; diff --git a/compiler/noirc_evaluator/src/brillig/brillig_ir/brillig_variable.rs b/compiler/noirc_evaluator/src/brillig/brillig_ir/brillig_variable.rs index 46c54d55ecb..48ad3c5bae4 100644 --- a/compiler/noirc_evaluator/src/brillig/brillig_ir/brillig_variable.rs +++ b/compiler/noirc_evaluator/src/brillig/brillig_ir/brillig_variable.rs @@ -1,12 +1,22 @@ -use acvm::brillig_vm::brillig::{HeapArray, HeapVector, RegisterIndex, RegisterOrMemory}; +use acvm::brillig_vm::brillig::{ + HeapArray, HeapValueType, HeapVector, MemoryAddress, ValueOrArray, +}; use serde::{Deserialize, Serialize}; +use crate::ssa::ir::types::Type; + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Copy)] +pub(crate) struct SingleAddrVariable { + pub(crate) address: MemoryAddress, + pub(crate) bit_size: u32, +} + /// The representation of a noir array in the Brillig IR #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Copy)] pub(crate) struct BrilligArray { - pub(crate) pointer: RegisterIndex, + pub(crate) pointer: MemoryAddress, pub(crate) size: usize, - pub(crate) rc: RegisterIndex, + pub(crate) rc: MemoryAddress, } impl BrilligArray { @@ -18,7 +28,7 @@ impl BrilligArray { 2 } - pub(crate) fn extract_registers(self) -> Vec { + pub(crate) fn extract_registers(self) -> Vec { vec![self.pointer, self.rc] } } @@ -26,9 +36,9 @@ impl BrilligArray { /// The representation of a noir slice in the Brillig IR #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Copy)] pub(crate) struct BrilligVector { - pub(crate) pointer: RegisterIndex, - pub(crate) size: RegisterIndex, - pub(crate) rc: RegisterIndex, + pub(crate) pointer: MemoryAddress, + pub(crate) size: MemoryAddress, + pub(crate) rc: MemoryAddress, } impl BrilligVector { @@ -40,7 +50,7 @@ impl BrilligVector { 3 } - pub(crate) fn extract_registers(self) -> Vec { + pub(crate) fn extract_registers(self) -> Vec { vec![self.pointer, self.size, self.rc] } } @@ -48,15 +58,15 @@ impl BrilligVector { /// The representation of a noir value in the Brillig IR #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Copy)] pub(crate) enum BrilligVariable { - Simple(RegisterIndex), + SingleAddr(SingleAddrVariable), BrilligArray(BrilligArray), BrilligVector(BrilligVector), } impl BrilligVariable { - pub(crate) fn extract_register(self) -> RegisterIndex { + pub(crate) fn extract_single_addr(self) -> SingleAddrVariable { match self { - BrilligVariable::Simple(register_index) => register_index, + BrilligVariable::SingleAddr(single_addr) => single_addr, _ => unreachable!("ICE: Expected register, got {self:?}"), } } @@ -75,25 +85,36 @@ impl BrilligVariable { } } - pub(crate) fn extract_registers(self) -> Vec { + pub(crate) fn extract_registers(self) -> Vec { match self { - BrilligVariable::Simple(register_index) => vec![register_index], + BrilligVariable::SingleAddr(single_addr) => vec![single_addr.address], BrilligVariable::BrilligArray(array) => array.extract_registers(), BrilligVariable::BrilligVector(vector) => vector.extract_registers(), } } - pub(crate) fn to_register_or_memory(self) -> RegisterOrMemory { + pub(crate) fn to_value_or_array(self) -> ValueOrArray { match self { - BrilligVariable::Simple(register_index) => { - RegisterOrMemory::RegisterIndex(register_index) - } - BrilligVariable::BrilligArray(array) => { - RegisterOrMemory::HeapArray(array.to_heap_array()) + BrilligVariable::SingleAddr(single_addr) => { + ValueOrArray::MemoryAddress(single_addr.address) } + BrilligVariable::BrilligArray(array) => ValueOrArray::HeapArray(array.to_heap_array()), BrilligVariable::BrilligVector(vector) => { - RegisterOrMemory::HeapVector(vector.to_heap_vector()) + ValueOrArray::HeapVector(vector.to_heap_vector()) } } } } + +pub(crate) fn type_to_heap_value_type(typ: &Type) -> HeapValueType { + match typ { + Type::Numeric(_) | Type::Reference(_) | Type::Function => HeapValueType::Simple, + Type::Array(elem_type, size) => HeapValueType::Array { + value_types: elem_type.as_ref().iter().map(type_to_heap_value_type).collect(), + size: typ.element_size() * size, + }, + Type::Slice(elem_type) => HeapValueType::Vector { + value_types: elem_type.as_ref().iter().map(type_to_heap_value_type).collect(), + }, + } +} diff --git a/compiler/noirc_evaluator/src/brillig/brillig_ir/debug_show.rs b/compiler/noirc_evaluator/src/brillig/brillig_ir/debug_show.rs index a8563dc9efe..dd57f0c4426 100644 --- a/compiler/noirc_evaluator/src/brillig/brillig_ir/debug_show.rs +++ b/compiler/noirc_evaluator/src/brillig/brillig_ir/debug_show.rs @@ -3,8 +3,8 @@ use super::BrilligBinaryOp; use crate::brillig::brillig_ir::{ReservedRegisters, BRILLIG_MEMORY_ADDRESSING_BIT_SIZE}; use acvm::acir::brillig::{ - BinaryFieldOp, BinaryIntOp, BlackBoxOp, HeapArray, HeapVector, RegisterIndex, RegisterOrMemory, - Value, + BinaryFieldOp, BinaryIntOp, BlackBoxOp, HeapArray, HeapVector, MemoryAddress, Value, + ValueOrArray, }; /// Trait for converting values into debug-friendly strings. @@ -24,7 +24,7 @@ macro_rules! default_to_string_impl { default_to_string_impl! { str usize u32 } -impl DebugToString for RegisterIndex { +impl DebugToString for MemoryAddress { fn debug_to_string(&self) -> String { if *self == ReservedRegisters::stack_pointer() { "Stack".into() @@ -74,9 +74,8 @@ impl DebugToString for BinaryIntOp { BinaryIntOp::And => "&&".into(), BinaryIntOp::Or => "||".into(), BinaryIntOp::Xor => "^".into(), - BinaryIntOp::Shl | BinaryIntOp::Shr => { - unreachable!("bit shift should have been replaced") - } + BinaryIntOp::Shl => "<<".into(), + BinaryIntOp::Shr => ">>".into(), } } } @@ -112,12 +111,12 @@ impl DebugToString for Value { } } -impl DebugToString for RegisterOrMemory { +impl DebugToString for ValueOrArray { fn debug_to_string(&self) -> String { match self { - RegisterOrMemory::RegisterIndex(index) => index.debug_to_string(), - RegisterOrMemory::HeapArray(heap_array) => heap_array.debug_to_string(), - RegisterOrMemory::HeapVector(vector) => vector.debug_to_string(), + ValueOrArray::MemoryAddress(index) => index.debug_to_string(), + ValueOrArray::HeapArray(heap_array) => heap_array.debug_to_string(), + ValueOrArray::HeapVector(vector) => vector.debug_to_string(), } } } @@ -152,15 +151,15 @@ impl DebugShow { /// Emits brillig bytecode to jump to a trap condition if `condition` /// is false. - pub(crate) fn constrain_instruction(&self, condition: RegisterIndex) { + pub(crate) fn constrain_instruction(&self, condition: MemoryAddress) { debug_println!(self.enable_debug_trace, " ASSERT {} != 0", condition); } /// Processes a return instruction. - pub(crate) fn return_instruction(&self, return_registers: &[RegisterIndex]) { + pub(crate) fn return_instruction(&self, return_registers: &[MemoryAddress]) { let registers_string = return_registers .iter() - .map(RegisterIndex::debug_to_string) + .map(MemoryAddress::debug_to_string) .collect::>() .join(", "); @@ -168,32 +167,48 @@ impl DebugShow { } /// Emits a `mov` instruction. - pub(crate) fn mov_instruction(&self, destination: RegisterIndex, source: RegisterIndex) { + pub(crate) fn mov_instruction(&self, destination: MemoryAddress, source: MemoryAddress) { debug_println!(self.enable_debug_trace, " MOV {}, {}", destination, source); } + /// Emits a `cast` instruction. + pub(crate) fn cast_instruction( + &self, + destination: MemoryAddress, + source: MemoryAddress, + bit_size: u32, + ) { + debug_println!( + self.enable_debug_trace, + " CAST {}, {} as u{}", + destination, + source, + bit_size + ); + } + /// Processes a binary instruction according `operation`. pub(crate) fn binary_instruction( &self, - lhs: RegisterIndex, - rhs: RegisterIndex, - result: RegisterIndex, + lhs: MemoryAddress, + rhs: MemoryAddress, + result: MemoryAddress, operation: BrilligBinaryOp, ) { debug_println!(self.enable_debug_trace, " {} = {} {} {}", result, lhs, operation, rhs); } /// Stores the value of `constant` in the `result` register - pub(crate) fn const_instruction(&self, result: RegisterIndex, constant: Value) { + pub(crate) fn const_instruction(&self, result: MemoryAddress, constant: Value) { debug_println!(self.enable_debug_trace, " CONST {} = {}", result, constant); } /// Processes a not instruction. Append with "_" as this is a high-level instruction. pub(crate) fn not_instruction( &self, - condition: RegisterIndex, + condition: MemoryAddress, bit_size: u32, - result: RegisterIndex, + result: MemoryAddress, ) { debug_println!(self.enable_debug_trace, " i{}_NOT {} = !{}", bit_size, result, condition); } @@ -202,8 +217,8 @@ impl DebugShow { pub(crate) fn foreign_call_instruction( &self, func_name: String, - inputs: &[RegisterOrMemory], - outputs: &[RegisterOrMemory], + inputs: &[ValueOrArray], + outputs: &[ValueOrArray], ) { debug_println!( self.enable_debug_trace, @@ -217,8 +232,8 @@ impl DebugShow { /// Emits a load instruction pub(crate) fn load_instruction( &self, - destination: RegisterIndex, - source_pointer: RegisterIndex, + destination: MemoryAddress, + source_pointer: MemoryAddress, ) { debug_println!(self.enable_debug_trace, " LOAD {} = *{}", destination, source_pointer); } @@ -226,8 +241,8 @@ impl DebugShow { /// Emits a store instruction pub(crate) fn store_instruction( &self, - destination_pointer: RegisterIndex, - source: RegisterIndex, + destination_pointer: MemoryAddress, + source: MemoryAddress, ) { debug_println!(self.enable_debug_trace, " STORE *{} = {}", destination_pointer, source); } @@ -240,8 +255,8 @@ impl DebugShow { /// Debug function for allocate_array_instruction pub(crate) fn allocate_array_instruction( &self, - pointer_register: RegisterIndex, - size_register: RegisterIndex, + pointer_register: MemoryAddress, + size_register: MemoryAddress, ) { debug_println!( self.enable_debug_trace, @@ -252,16 +267,16 @@ impl DebugShow { } /// Debug function for allocate_instruction - pub(crate) fn allocate_instruction(&self, pointer_register: RegisterIndex) { + pub(crate) fn allocate_instruction(&self, pointer_register: MemoryAddress) { debug_println!(self.enable_debug_trace, " ALLOCATE {} ", pointer_register); } /// Debug function for array_get pub(crate) fn array_get( &self, - array_ptr: RegisterIndex, - index: RegisterIndex, - result: RegisterIndex, + array_ptr: MemoryAddress, + index: MemoryAddress, + result: MemoryAddress, ) { debug_println!( self.enable_debug_trace, @@ -275,9 +290,9 @@ impl DebugShow { /// Debug function for array_set pub(crate) fn array_set( &self, - array_ptr: RegisterIndex, - index: RegisterIndex, - value: RegisterIndex, + array_ptr: MemoryAddress, + index: MemoryAddress, + value: MemoryAddress, ) { debug_println!(self.enable_debug_trace, " ARRAY_SET {}[{}] = {}", array_ptr, index, value); } @@ -285,9 +300,9 @@ impl DebugShow { /// Debug function for copy_array_instruction pub(crate) fn copy_array_instruction( &self, - source: RegisterIndex, - destination: RegisterIndex, - num_elements_register: RegisterIndex, + source: MemoryAddress, + destination: MemoryAddress, + num_elements_register: MemoryAddress, ) { debug_println!( self.enable_debug_trace, @@ -314,7 +329,7 @@ impl DebugShow { /// Debug function for jump_if_instruction pub(crate) fn jump_if_instruction( &self, - condition: RegisterIndex, + condition: MemoryAddress, target_label: T, ) { debug_println!( @@ -328,8 +343,8 @@ impl DebugShow { /// Debug function for cast_instruction pub(crate) fn truncate_instruction( &self, - destination: RegisterIndex, - source: RegisterIndex, + destination: MemoryAddress, + source: MemoryAddress, target_bit_size: u32, ) { debug_println!( @@ -342,7 +357,7 @@ impl DebugShow { } /// Debug function for black_box_op - pub(crate) fn black_box_op_instruction(&self, op: BlackBoxOp) { + pub(crate) fn black_box_op_instruction(&self, op: &BlackBoxOp) { match op { BlackBoxOp::Sha256 { message, output } => { debug_println!(self.enable_debug_trace, " SHA256 {} -> {}", message, output); @@ -457,7 +472,7 @@ impl DebugShow { output ); } - BlackBoxOp::BigIntNeg { lhs, rhs, output } => { + BlackBoxOp::BigIntSub { lhs, rhs, output } => { debug_println!( self.enable_debug_trace, " BIGINT_NEG {} {} -> {}", diff --git a/compiler/noirc_evaluator/src/brillig/brillig_ir/entry_point.rs b/compiler/noirc_evaluator/src/brillig/brillig_ir/entry_point.rs index 36ca414f38e..9d186f9bc60 100644 --- a/compiler/noirc_evaluator/src/brillig/brillig_ir/entry_point.rs +++ b/compiler/noirc_evaluator/src/brillig/brillig_ir/entry_point.rs @@ -1,13 +1,16 @@ -use crate::brillig::brillig_ir::ReservedRegisters; - use super::{ artifact::{BrilligArtifact, BrilligParameter}, - brillig_variable::{BrilligArray, BrilligVariable}, + brillig_variable::{BrilligArray, BrilligVariable, SingleAddrVariable}, debug_show::DebugShow, registers::BrilligRegistersContext, - BrilligContext, + BrilligContext, ReservedRegisters, BRILLIG_MEMORY_ADDRESSING_BIT_SIZE, +}; +use acvm::{ + acir::brillig::{MemoryAddress, Opcode as BrilligOpcode}, + FieldElement, }; -use acvm::acir::brillig::{Opcode as BrilligOpcode, RegisterIndex}; + +pub(crate) const MAX_STACK_SIZE: usize = 1024; impl BrilligContext { /// Creates an entry point artifact that will jump to the function label provided. @@ -25,93 +28,121 @@ impl BrilligContext { debug_show: DebugShow::new(false), }; - context.entry_point_instruction(arguments); + context.entry_point_instruction(&arguments, &return_parameters); context.add_external_call_instruction(target_function); - context.exit_point_instruction(return_parameters); + context.exit_point_instruction(&arguments, &return_parameters); context.artifact() } /// Adds the instructions needed to handle entry point parameters - /// The runtime will leave the parameters in the first `n` registers. - /// Arrays will be passed as pointers to the first element, with all the nested arrays flattened. - /// First, reserve the registers that contain the parameters. - /// This function also sets the starting value of the reserved registers - fn entry_point_instruction(&mut self, arguments: Vec) { - let preallocated_registers: Vec<_> = - arguments.iter().enumerate().map(|(i, _)| RegisterIndex::from(i)).collect(); - self.set_allocated_registers(preallocated_registers.clone()); - - // Then allocate and initialize the variables that will hold the parameters - let argument_variables: Vec<_> = arguments + /// The runtime will leave the parameters in calldata. + /// Arrays will be passed flattened. + fn entry_point_instruction( + &mut self, + arguments: &[BrilligParameter], + return_parameters: &[BrilligParameter], + ) { + let calldata_size = BrilligContext::flattened_tuple_size(arguments); + let return_data_size = BrilligContext::flattened_tuple_size(return_parameters); + + // Set initial value of stack pointer: MAX_STACK_SIZE + calldata_size + return_data_size + self.push_opcode(BrilligOpcode::Const { + destination: ReservedRegisters::stack_pointer(), + value: (MAX_STACK_SIZE + calldata_size + return_data_size).into(), + bit_size: BRILLIG_MEMORY_ADDRESSING_BIT_SIZE, + }); + + // Copy calldata + self.copy_and_cast_calldata(arguments); + + // Allocate the variables for every argument: + let mut current_calldata_pointer = MAX_STACK_SIZE; + + let mut argument_variables: Vec<_> = arguments .iter() - .zip(preallocated_registers) - .map(|(argument, param_register)| match argument { - BrilligParameter::Simple => { - let variable_register = self.allocate_register(); - self.mov_instruction(variable_register, param_register); - BrilligVariable::Simple(variable_register) + .map(|argument| match argument { + BrilligParameter::SingleAddr(bit_size) => { + let single_address = self.allocate_register(); + let var = BrilligVariable::SingleAddr(SingleAddrVariable { + address: single_address, + bit_size: *bit_size, + }); + self.mov_instruction(single_address, MemoryAddress(current_calldata_pointer)); + current_calldata_pointer += 1; + var } - BrilligParameter::Array(item_types, item_count) => { - let pointer_register = self.allocate_register(); - let rc_register = self.allocate_register(); - self.mov_instruction(pointer_register, param_register); - self.const_instruction(rc_register, 1_usize.into()); - BrilligVariable::BrilligArray(BrilligArray { - pointer: pointer_register, - size: item_types.len() * item_count, + BrilligParameter::Array(_, _) => { + let pointer_to_the_array_in_calldata = + self.make_usize_constant(current_calldata_pointer.into()); + let rc_register = self.make_usize_constant(1_usize.into()); + let flattened_size = BrilligContext::flattened_size(argument); + let var = BrilligVariable::BrilligArray(BrilligArray { + pointer: pointer_to_the_array_in_calldata, + size: flattened_size, rc: rc_register, - }) + }); + + current_calldata_pointer += flattened_size; + var } BrilligParameter::Slice(_) => unimplemented!("Unsupported slices as parameter"), }) .collect(); - // Calculate the initial value for the stack pointer register - let size_arguments_memory: usize = arguments - .iter() - .map(|arg| match arg { - BrilligParameter::Simple => 0, - _ => BrilligContext::flattened_size(arg), - }) - .sum(); + // Deflatten arrays + for (argument_variable, argument) in argument_variables.iter_mut().zip(arguments) { + if let ( + BrilligVariable::BrilligArray(array), + BrilligParameter::Array(item_type, item_count), + ) = (argument_variable, argument) + { + if BrilligContext::has_nested_arrays(item_type) { + let deflattened_address = + self.deflatten_array(item_type, array.size, array.pointer); + self.mov_instruction(array.pointer, deflattened_address); + array.size = item_type.len() * item_count; + self.deallocate_register(deflattened_address); + } + } + } + } - // Set the initial value of the stack pointer register - self.push_opcode(BrilligOpcode::Const { - destination: ReservedRegisters::stack_pointer(), - value: size_arguments_memory.into(), - }); - // Set the initial value of the previous stack pointer register - self.push_opcode(BrilligOpcode::Const { - destination: ReservedRegisters::previous_stack_pointer(), - value: 0_usize.into(), + fn copy_and_cast_calldata(&mut self, arguments: &[BrilligParameter]) { + let calldata_size = BrilligContext::flattened_tuple_size(arguments); + self.push_opcode(BrilligOpcode::CalldataCopy { + destination_address: MemoryAddress(MAX_STACK_SIZE), + size: calldata_size, + offset: 0, }); - // Deflatten the arrays - for (parameter, assigned_variable) in arguments.iter().zip(&argument_variables) { - if let BrilligParameter::Array(item_type, item_count) = parameter { - if item_type.iter().any(|param| !matches!(param, BrilligParameter::Simple)) { - let pointer_register = assigned_variable.extract_array().pointer; - let deflattened_register = - self.deflatten_array(item_type, *item_count, pointer_register); - self.mov_instruction(pointer_register, deflattened_register); - } + fn flat_bit_sizes(param: &BrilligParameter) -> Box + '_> { + match param { + BrilligParameter::SingleAddr(bit_size) => Box::new(std::iter::once(*bit_size)), + BrilligParameter::Array(item_types, item_count) => Box::new( + (0..*item_count).flat_map(move |_| item_types.iter().flat_map(flat_bit_sizes)), + ), + BrilligParameter::Slice(..) => unimplemented!("Unsupported slices as parameter"), } } - // Move the parameters to the first user defined registers, to follow function call convention. - for (i, register) in - argument_variables.into_iter().flat_map(|arg| arg.extract_registers()).enumerate() - { - self.mov_instruction(ReservedRegisters::user_register_index(i), register); + for (i, bit_size) in arguments.iter().flat_map(flat_bit_sizes).enumerate() { + // Calldatacopy tags everything with field type, so when downcast when necessary + if bit_size < FieldElement::max_num_bits() { + self.push_opcode(BrilligOpcode::Cast { + destination: MemoryAddress(MAX_STACK_SIZE + i), + source: MemoryAddress(MAX_STACK_SIZE + i), + bit_size, + }); + } } } /// Computes the size of a parameter if it was flattened fn flattened_size(param: &BrilligParameter) -> usize { match param { - BrilligParameter::Simple => 1, + BrilligParameter::SingleAddr(_) => 1, BrilligParameter::Array(item_types, item_count) => { let item_size: usize = item_types.iter().map(BrilligContext::flattened_size).sum(); item_count * item_size @@ -122,101 +153,141 @@ impl BrilligContext { } } + /// Computes the size of a parameter if it was flattened + fn flattened_tuple_size(tuple: &[BrilligParameter]) -> usize { + tuple.iter().map(BrilligContext::flattened_size).sum() + } + + /// Computes the size of a parameter if it was flattened + fn has_nested_arrays(tuple: &[BrilligParameter]) -> bool { + tuple.iter().any(|param| !matches!(param, BrilligParameter::SingleAddr(_))) + } + /// Deflatten an array by recursively allocating nested arrays and copying the plain values. /// Returns the pointer to the deflattened items. fn deflatten_array( &mut self, item_type: &[BrilligParameter], item_count: usize, - flattened_array_pointer: RegisterIndex, - ) -> RegisterIndex { - let movement_register = self.allocate_register(); - let deflattened_array_pointer = self.allocate_register(); - - let target_item_size = item_type.len(); - let source_item_size: usize = item_type.iter().map(BrilligContext::flattened_size).sum(); - - self.allocate_fixed_length_array(deflattened_array_pointer, item_count * target_item_size); - - for item_index in 0..item_count { - let source_item_base_index = item_index * source_item_size; - let target_item_base_index = item_index * target_item_size; - - let mut source_offset = 0; - - for (subitem_index, subitem) in item_type.iter().enumerate() { - let source_index = - self.make_constant((source_item_base_index + source_offset).into()); - - let target_index = - self.make_constant((target_item_base_index + subitem_index).into()); - - match subitem { - BrilligParameter::Simple => { - self.array_get(flattened_array_pointer, source_index, movement_register); - self.array_set(deflattened_array_pointer, target_index, movement_register); - source_offset += 1; - } - BrilligParameter::Array(nested_array_item_type, nested_array_item_count) => { - let nested_array_pointer = self.allocate_register(); - self.mov_instruction(nested_array_pointer, flattened_array_pointer); - self.memory_op( - nested_array_pointer, - source_index, - nested_array_pointer, - acvm::brillig_vm::brillig::BinaryIntOp::Add, - ); - let deflattened_nested_array_pointer = self.deflatten_array( + flattened_array_pointer: MemoryAddress, + ) -> MemoryAddress { + if BrilligContext::has_nested_arrays(item_type) { + let movement_register = self.allocate_register(); + let deflattened_array_pointer = self.allocate_register(); + + let target_item_size = item_type.len(); + let source_item_size = BrilligContext::flattened_tuple_size(item_type); + + self.allocate_fixed_length_array( + deflattened_array_pointer, + item_count * target_item_size, + ); + + for item_index in 0..item_count { + let source_item_base_index = item_index * source_item_size; + let target_item_base_index = item_index * target_item_size; + + let mut source_offset = 0; + + for (subitem_index, subitem) in item_type.iter().enumerate() { + let source_index = + self.make_usize_constant((source_item_base_index + source_offset).into()); + + let target_index = + self.make_usize_constant((target_item_base_index + subitem_index).into()); + + match subitem { + BrilligParameter::SingleAddr(_) => { + self.array_get( + flattened_array_pointer, + source_index, + movement_register, + ); + self.array_set( + deflattened_array_pointer, + target_index, + movement_register, + ); + source_offset += 1; + } + BrilligParameter::Array( nested_array_item_type, - *nested_array_item_count, - nested_array_pointer, - ); - let reference = self.allocate_register(); - let rc = self.allocate_register(); - self.const_instruction(rc, 1_usize.into()); - - self.allocate_array_reference_instruction(reference); - self.store_variable_instruction( - reference, - BrilligVariable::BrilligArray(BrilligArray { + nested_array_item_count, + ) => { + let nested_array_pointer = self.allocate_register(); + self.mov_instruction(nested_array_pointer, flattened_array_pointer); + self.memory_op( + nested_array_pointer, + source_index, + nested_array_pointer, + acvm::brillig_vm::brillig::BinaryIntOp::Add, + ); + let deflattened_nested_array_pointer = self.deflatten_array( + nested_array_item_type, + *nested_array_item_count, + nested_array_pointer, + ); + let reference = self.allocate_register(); + let rc = self.allocate_register(); + self.usize_const(rc, 1_usize.into()); + + self.allocate_array_reference_instruction(reference); + let array_variable = BrilligVariable::BrilligArray(BrilligArray { pointer: deflattened_nested_array_pointer, size: nested_array_item_type.len() * nested_array_item_count, rc, - }), - ); + }); + self.store_variable_instruction(reference, array_variable); - self.array_set(deflattened_array_pointer, target_index, reference); + self.array_set(deflattened_array_pointer, target_index, reference); - self.deallocate_register(nested_array_pointer); - self.deallocate_register(reference); - self.deallocate_register(rc); + self.deallocate_register(nested_array_pointer); + self.deallocate_register(reference); + array_variable + .extract_registers() + .into_iter() + .for_each(|register| self.deallocate_register(register)); - source_offset += BrilligContext::flattened_size(subitem); + source_offset += BrilligContext::flattened_size(subitem); + } + BrilligParameter::Slice(..) => unreachable!("ICE: Cannot deflatten slices"), } - BrilligParameter::Slice(..) => unreachable!("ICE: Cannot deflatten slices"), - } - self.deallocate_register(source_index); - self.deallocate_register(target_index); + self.deallocate_register(source_index); + self.deallocate_register(target_index); + } } - } - self.deallocate_register(movement_register); + self.deallocate_register(movement_register); - deflattened_array_pointer + deflattened_array_pointer + } else { + let deflattened_array_pointer = self.allocate_register(); + self.mov_instruction(deflattened_array_pointer, flattened_array_pointer); + deflattened_array_pointer + } } /// Adds the instructions needed to handle return parameters - /// The runtime expects the results in the first `n` registers. - /// Arrays are expected to be returned as pointers to the first element with all the nested arrays flattened. + /// The runtime expects the results in a contiguous memory region. + /// Arrays are expected to be returned with all the nested arrays flattened. /// However, the function called returns variables (that have extra data) and the returned arrays are deflattened. - fn exit_point_instruction(&mut self, return_parameters: Vec) { + fn exit_point_instruction( + &mut self, + arguments: &[BrilligParameter], + return_parameters: &[BrilligParameter], + ) { // First, we allocate the registers that hold the returned variables from the function call. self.set_allocated_registers(vec![]); let returned_variables: Vec<_> = return_parameters .iter() .map(|return_parameter| match return_parameter { - BrilligParameter::Simple => BrilligVariable::Simple(self.allocate_register()), + BrilligParameter::SingleAddr(bit_size) => { + BrilligVariable::SingleAddr(SingleAddrVariable { + address: self.allocate_register(), + bit_size: *bit_size, + }) + } BrilligParameter::Array(item_types, item_count) => { BrilligVariable::BrilligArray(BrilligArray { pointer: self.allocate_register(), @@ -227,43 +298,45 @@ impl BrilligContext { BrilligParameter::Slice(..) => unreachable!("ICE: Cannot return slices"), }) .collect(); - // Now, we deflatten the returned arrays - for (return_param, returned_variable) in return_parameters.iter().zip(&returned_variables) { - if let BrilligParameter::Array(item_type, item_count) = return_param { - if item_type.iter().any(|item| !matches!(item, BrilligParameter::Simple)) { - let returned_pointer = returned_variable.extract_array().pointer; - let flattened_array_pointer = self.allocate_register(); - self.allocate_fixed_length_array( - flattened_array_pointer, - BrilligContext::flattened_size(return_param), + // Now, we deflatten the return data + let calldata_size = BrilligContext::flattened_tuple_size(arguments); + let return_data_size = BrilligContext::flattened_tuple_size(return_parameters); + + // Return data has a reserved space after calldata + let return_data_offset = MAX_STACK_SIZE + calldata_size; + let mut return_data_index = return_data_offset; + + for (return_param, returned_variable) in return_parameters.iter().zip(&returned_variables) { + match return_param { + BrilligParameter::SingleAddr(_) => { + self.mov_instruction( + MemoryAddress(return_data_index), + returned_variable.extract_single_addr().address, ); + return_data_index += 1; + } + BrilligParameter::Array(item_type, item_count) => { + let returned_pointer = returned_variable.extract_array().pointer; + let pointer_to_return_data = self.make_usize_constant(return_data_index.into()); self.flatten_array( item_type, *item_count, - flattened_array_pointer, + pointer_to_return_data, returned_pointer, ); - self.mov_instruction(returned_pointer, flattened_array_pointer); + self.deallocate_register(pointer_to_return_data); + return_data_index += BrilligContext::flattened_size(return_param); + } + BrilligParameter::Slice(..) => { + unreachable!("ICE: Cannot return slices from brillig entrypoints") } } } - // The VM expects us to follow the calling convention of returning - // their results in the first `n` registers. So we to move the return values - // to the first `n` registers once completed. - - // Move the results to registers 0..n - for (i, returned_variable) in returned_variables.into_iter().enumerate() { - let register = match returned_variable { - BrilligVariable::Simple(register) => register, - BrilligVariable::BrilligArray(array) => array.pointer, - BrilligVariable::BrilligVector(vector) => vector.pointer, - }; - self.push_opcode(BrilligOpcode::Mov { destination: i.into(), source: register }); - } - self.push_opcode(BrilligOpcode::Stop); + + self.push_opcode(BrilligOpcode::Stop { return_data_offset, return_data_size }); } // Flattens an array by recursively copying nested arrays and regular items. @@ -271,96 +344,119 @@ impl BrilligContext { &mut self, item_type: &[BrilligParameter], item_count: usize, - flattened_array_pointer: RegisterIndex, - deflattened_array_pointer: RegisterIndex, + flattened_array_pointer: MemoryAddress, + deflattened_array_pointer: MemoryAddress, ) { - let movement_register = self.allocate_register(); - - let source_item_size = item_type.len(); - let target_item_size: usize = item_type.iter().map(BrilligContext::flattened_size).sum(); - - for item_index in 0..item_count { - let source_item_base_index = item_index * source_item_size; - let target_item_base_index = item_index * target_item_size; - - let mut target_offset = 0; - - for (subitem_index, subitem) in item_type.iter().enumerate() { - let source_index = - self.make_constant((source_item_base_index + subitem_index).into()); - let target_index = - self.make_constant((target_item_base_index + target_offset).into()); - - match subitem { - BrilligParameter::Simple => { - self.array_get(deflattened_array_pointer, source_index, movement_register); - self.array_set(flattened_array_pointer, target_index, movement_register); - target_offset += 1; - } - BrilligParameter::Array(nested_array_item_type, nested_array_item_count) => { - let nested_array_reference = self.allocate_register(); - self.array_get( - deflattened_array_pointer, - source_index, - nested_array_reference, - ); - - let nested_array_variable = BrilligVariable::BrilligArray(BrilligArray { - pointer: self.allocate_register(), - size: nested_array_item_type.len() * nested_array_item_count, - rc: self.allocate_register(), - }); - - self.load_variable_instruction( - nested_array_variable, - nested_array_reference, - ); - - let flattened_nested_array_pointer = self.allocate_register(); - - self.mov_instruction( - flattened_nested_array_pointer, - flattened_array_pointer, - ); - - self.memory_op( - flattened_nested_array_pointer, - target_index, - flattened_nested_array_pointer, - acvm::brillig_vm::brillig::BinaryIntOp::Add, - ); - - self.flatten_array( + if BrilligContext::has_nested_arrays(item_type) { + let movement_register = self.allocate_register(); + + let source_item_size = item_type.len(); + let target_item_size: usize = + item_type.iter().map(BrilligContext::flattened_size).sum(); + + for item_index in 0..item_count { + let source_item_base_index = item_index * source_item_size; + let target_item_base_index = item_index * target_item_size; + + let mut target_offset = 0; + + for (subitem_index, subitem) in item_type.iter().enumerate() { + let source_index = + self.make_usize_constant((source_item_base_index + subitem_index).into()); + let target_index = + self.make_usize_constant((target_item_base_index + target_offset).into()); + + match subitem { + BrilligParameter::SingleAddr(_) => { + self.array_get( + deflattened_array_pointer, + source_index, + movement_register, + ); + self.array_set( + flattened_array_pointer, + target_index, + movement_register, + ); + target_offset += 1; + } + BrilligParameter::Array( nested_array_item_type, - *nested_array_item_count, - flattened_nested_array_pointer, - nested_array_variable.extract_array().pointer, - ); - - self.deallocate_register(nested_array_reference); - self.deallocate_register(flattened_nested_array_pointer); - nested_array_variable - .extract_registers() - .into_iter() - .for_each(|register| self.deallocate_register(register)); - - target_offset += BrilligContext::flattened_size(subitem); + nested_array_item_count, + ) => { + let nested_array_reference = self.allocate_register(); + self.array_get( + deflattened_array_pointer, + source_index, + nested_array_reference, + ); + + let nested_array_variable = + BrilligVariable::BrilligArray(BrilligArray { + pointer: self.allocate_register(), + size: nested_array_item_type.len() * nested_array_item_count, + rc: self.allocate_register(), + }); + + self.load_variable_instruction( + nested_array_variable, + nested_array_reference, + ); + + let flattened_nested_array_pointer = self.allocate_register(); + + self.mov_instruction( + flattened_nested_array_pointer, + flattened_array_pointer, + ); + + self.memory_op( + flattened_nested_array_pointer, + target_index, + flattened_nested_array_pointer, + acvm::brillig_vm::brillig::BinaryIntOp::Add, + ); + + self.flatten_array( + nested_array_item_type, + *nested_array_item_count, + flattened_nested_array_pointer, + nested_array_variable.extract_array().pointer, + ); + + self.deallocate_register(nested_array_reference); + self.deallocate_register(flattened_nested_array_pointer); + nested_array_variable + .extract_registers() + .into_iter() + .for_each(|register| self.deallocate_register(register)); + + target_offset += BrilligContext::flattened_size(subitem); + } + BrilligParameter::Slice(..) => unreachable!("ICE: Cannot flatten slices"), } - BrilligParameter::Slice(..) => unreachable!("ICE: Cannot flatten slices"), - } - self.deallocate_register(source_index); - self.deallocate_register(target_index); + self.deallocate_register(source_index); + self.deallocate_register(target_index); + } } - } - self.deallocate_register(movement_register); + self.deallocate_register(movement_register); + } else { + let item_count = self.make_usize_constant((item_count * item_type.len()).into()); + self.copy_array_instruction( + deflattened_array_pointer, + flattened_array_pointer, + item_count, + ); + self.deallocate_register(item_count); + } } } #[cfg(test)] mod tests { - use acvm::brillig_vm::brillig::{RegisterIndex, Value}; + use acvm::brillig_vm::brillig::Value; use crate::brillig::brillig_ir::{ artifact::BrilligParameter, @@ -370,7 +466,7 @@ mod tests { #[test] fn entry_point_with_nested_array_parameter() { - let flattened_array = vec![ + let calldata = vec![ Value::from(1_usize), Value::from(2_usize), Value::from(3_usize), @@ -380,55 +476,30 @@ mod tests { ]; let arguments = vec![BrilligParameter::Array( vec![ - BrilligParameter::Array(vec![BrilligParameter::Simple], 2), - BrilligParameter::Simple, + BrilligParameter::Array(vec![BrilligParameter::SingleAddr(8)], 2), + BrilligParameter::SingleAddr(8), ], 2, )]; - let returns = vec![BrilligParameter::Simple]; + let returns = vec![BrilligParameter::SingleAddr(8)]; let mut context = create_context(); // Allocate the parameter let array_pointer = context.allocate_register(); + let array_value = context.allocate_register(); - context.return_instruction(&[array_pointer]); + context.load_instruction(array_pointer, array_pointer); + context.load_instruction(array_pointer, array_pointer); + context.load_instruction(array_value, array_pointer); - let bytecode = create_entry_point_bytecode(context, arguments, returns).byte_code; - let vm = create_and_run_vm(flattened_array.clone(), vec![Value::from(0_usize)], &bytecode); - let memory = vm.get_memory(); + context.return_instruction(&[array_value]); - assert_eq!(vm.get_registers().get(RegisterIndex(0)), Value::from(flattened_array.len())); - assert_eq!( - memory, - &vec![ - // The original flattened values - Value::from(1_usize), - Value::from(2_usize), - Value::from(3_usize), - Value::from(4_usize), - Value::from(5_usize), - Value::from(6_usize), - // The pointer to the nested reference of the first item - Value::from(12_usize), - Value::from(3_usize), - // The pointer to the nested reference of the second item - Value::from(16_usize), - Value::from(6_usize), - // The nested array of the first item - Value::from(1_usize), - Value::from(2_usize), - // The nested reference of the first item - Value::from(10_usize), - Value::from(1_usize), - // The nested array of the second item - Value::from(4_usize), - Value::from(5_usize), - // The nested reference of the second item - Value::from(14_usize), - Value::from(1_usize), - ] - ); + let bytecode = create_entry_point_bytecode(context, arguments, returns).byte_code; + let (vm, return_data_offset, return_data_size) = + create_and_run_vm(calldata.clone(), &bytecode); + assert_eq!(return_data_size, 1, "Return data size is incorrect"); + assert_eq!(vm.get_memory()[return_data_offset], Value::from(1_usize)); } #[test] @@ -443,8 +514,8 @@ mod tests { ]; let array_param = BrilligParameter::Array( vec![ - BrilligParameter::Array(vec![BrilligParameter::Simple], 2), - BrilligParameter::Simple, + BrilligParameter::Array(vec![BrilligParameter::SingleAddr(8)], 2), + BrilligParameter::SingleAddr(8), ], 2, ); @@ -463,46 +534,14 @@ mod tests { context.return_instruction(&brillig_array.extract_registers()); let bytecode = create_entry_point_bytecode(context, arguments, returns).byte_code; - let vm = create_and_run_vm(flattened_array.clone(), vec![Value::from(0_usize)], &bytecode); + let (vm, return_data_pointer, return_data_size) = + create_and_run_vm(flattened_array.clone(), &bytecode); let memory = vm.get_memory(); assert_eq!( - memory, - &vec![ - // The original flattened values - Value::from(1_usize), - Value::from(2_usize), - Value::from(3_usize), - Value::from(4_usize), - Value::from(5_usize), - Value::from(6_usize), - // The pointer to the nested reference of the first item - Value::from(12_usize), - Value::from(3_usize), - // The pointer to the nested reference of the second item - Value::from(16_usize), - Value::from(6_usize), - // The nested array of the first item - Value::from(1_usize), - Value::from(2_usize), - // The nested reference of the first item - Value::from(10_usize), - Value::from(1_usize), - // The nested array of the second item - Value::from(4_usize), - Value::from(5_usize), - // The nested reference of the second item - Value::from(14_usize), - Value::from(1_usize), - // The original flattened again - Value::from(1_usize), - Value::from(2_usize), - Value::from(3_usize), - Value::from(4_usize), - Value::from(5_usize), - Value::from(6_usize), - ] + memory[return_data_pointer..(return_data_pointer + flattened_array.len())], + flattened_array ); - assert_eq!(vm.get_registers().get(RegisterIndex(0)), 18_usize.into()); + assert_eq!(return_data_size, flattened_array.len()); } } diff --git a/compiler/noirc_evaluator/src/brillig/brillig_ir/registers.rs b/compiler/noirc_evaluator/src/brillig/brillig_ir/registers.rs index e7ab1492acb..8c0e36215a9 100644 --- a/compiler/noirc_evaluator/src/brillig/brillig_ir/registers.rs +++ b/compiler/noirc_evaluator/src/brillig/brillig_ir/registers.rs @@ -1,4 +1,6 @@ -use acvm::acir::brillig::RegisterIndex; +use acvm::acir::brillig::MemoryAddress; + +use crate::brillig::brillig_ir::entry_point::MAX_STACK_SIZE; use super::ReservedRegisters; @@ -8,7 +10,7 @@ use super::ReservedRegisters; /// Each has a stack base pointer from which all stack allocations can be offset. pub(crate) struct BrilligRegistersContext { /// A free-list of registers that have been deallocated and can be used again. - deallocated_registers: Vec, + deallocated_registers: Vec, /// A usize indicating the next un-used register. next_free_register_index: usize, } @@ -23,7 +25,7 @@ impl BrilligRegistersContext { } /// Creates a new register context from a set of registers allocated previously. - pub(crate) fn from_preallocated_registers(preallocated_registers: Vec) -> Self { + pub(crate) fn from_preallocated_registers(preallocated_registers: Vec) -> Self { let next_free_register_index = preallocated_registers.iter().fold( ReservedRegisters::len(), |free_register_index, preallocated_register| { @@ -36,8 +38,8 @@ impl BrilligRegistersContext { ); let mut deallocated_registers = Vec::new(); for i in ReservedRegisters::len()..next_free_register_index { - if !preallocated_registers.contains(&RegisterIndex::from(i)) { - deallocated_registers.push(RegisterIndex::from(i)); + if !preallocated_registers.contains(&MemoryAddress::from(i)) { + deallocated_registers.push(MemoryAddress::from(i)); } } @@ -45,7 +47,7 @@ impl BrilligRegistersContext { } /// Ensures a register is allocated. - pub(crate) fn ensure_register_is_allocated(&mut self, register: RegisterIndex) { + pub(crate) fn ensure_register_is_allocated(&mut self, register: MemoryAddress) { let index = register.to_usize(); if index < self.next_free_register_index { // If it could be allocated, check if it's in the deallocated list and remove it from there @@ -53,26 +55,28 @@ impl BrilligRegistersContext { } else { // If it couldn't yet be, expand the register space. self.next_free_register_index = index + 1; + assert!(self.next_free_register_index < MAX_STACK_SIZE, "Stack too deep"); } } /// Creates a new register. - pub(crate) fn allocate_register(&mut self) -> RegisterIndex { + pub(crate) fn allocate_register(&mut self) -> MemoryAddress { // If we have a register in our free list of deallocated registers, // consume it first. This prioritizes reuse. if let Some(register) = self.deallocated_registers.pop() { return register; } // Otherwise, move to our latest register. - let register = RegisterIndex::from(self.next_free_register_index); + let register = MemoryAddress::from(self.next_free_register_index); self.next_free_register_index += 1; + assert!(self.next_free_register_index < MAX_STACK_SIZE, "Stack too deep"); register } /// Push a register to the deallocation list, ready for reuse. /// TODO(AD): currently, register deallocation is only done with immediate values. /// TODO(AD): See https://github.com/noir-lang/noir/issues/1720 - pub(crate) fn deallocate_register(&mut self, register_index: RegisterIndex) { + pub(crate) fn deallocate_register(&mut self, register_index: MemoryAddress) { assert!(!self.deallocated_registers.contains(®ister_index)); self.deallocated_registers.push(register_index); } diff --git a/compiler/noirc_evaluator/src/errors.rs b/compiler/noirc_evaluator/src/errors.rs index 73b6e671bd5..40f4336e0b5 100644 --- a/compiler/noirc_evaluator/src/errors.rs +++ b/compiler/noirc_evaluator/src/errors.rs @@ -46,6 +46,8 @@ pub enum RuntimeError { NestedSlice { call_stack: CallStack }, #[error("Big Integer modulus do no match")] BigIntModulus { call_stack: CallStack }, + #[error("Slices cannot be returned from an unconstrained runtime to a constrained runtime")] + UnconstrainedSliceReturnToConstrained { call_stack: CallStack }, } // We avoid showing the actual lhs and rhs since most of the time they are just 0 @@ -135,7 +137,8 @@ impl RuntimeError { | RuntimeError::IntegerOutOfBounds { call_stack, .. } | RuntimeError::UnsupportedIntegerSize { call_stack, .. } | RuntimeError::NestedSlice { call_stack, .. } - | RuntimeError::BigIntModulus { call_stack, .. } => call_stack, + | RuntimeError::BigIntModulus { call_stack, .. } + | RuntimeError::UnconstrainedSliceReturnToConstrained { call_stack } => call_stack, } } } @@ -155,15 +158,26 @@ impl RuntimeError { RuntimeError::InternalError(cause) => { Diagnostic::simple_error( "Internal Consistency Evaluators Errors: \n - This is likely a bug. Consider Opening an issue at https://github.com/noir-lang/noir/issues".to_owned(), + This is likely a bug. Consider opening an issue at https://github.com/noir-lang/noir/issues".to_owned(), cause.to_string(), noirc_errors::Span::inclusive(0, 0) ) } + RuntimeError::UnknownLoopBound { .. } => { + let primary_message = self.to_string(); + let location = + self.call_stack().back().expect("Expected RuntimeError to have a location"); + + Diagnostic::simple_error( + primary_message, + "If attempting to fetch the length of a slice, try converting to an array. Slices only use dynamic lengths.".to_string(), + location.span, + ) + } _ => { let message = self.to_string(); let location = - self.call_stack().back().expect("Expected RuntimeError to have a location"); + self.call_stack().back().unwrap_or_else(|| panic!("Expected RuntimeError to have a location. Error message: {message}")); Diagnostic::simple_error(message, String::new(), location.span) } diff --git a/compiler/noirc_evaluator/src/ssa.rs b/compiler/noirc_evaluator/src/ssa.rs index 0e3076923e0..0bb81efe977 100644 --- a/compiler/noirc_evaluator/src/ssa.rs +++ b/compiler/noirc_evaluator/src/ssa.rs @@ -14,7 +14,7 @@ use crate::{ errors::{RuntimeError, SsaReport}, }; use acvm::acir::{ - circuit::{Circuit, PublicInputs}, + circuit::{Circuit, ExpressionWidth, PublicInputs}, native_types::Witness, }; @@ -40,12 +40,13 @@ pub(crate) fn optimize_into_acir( program: Program, print_ssa_passes: bool, print_brillig_trace: bool, + force_brillig_output: bool, ) -> Result { let abi_distinctness = program.return_distinctness; let ssa_gen_span = span!(Level::TRACE, "ssa_generation"); let ssa_gen_span_guard = ssa_gen_span.enter(); - let ssa = SsaBuilder::new(program, print_ssa_passes)? + let ssa = SsaBuilder::new(program, print_ssa_passes, force_brillig_output)? .run_pass(Ssa::defunctionalize, "After Defunctionalization:") .run_pass(Ssa::inline_functions, "After Inlining:") // Run mem2reg with the CFG separated into blocks @@ -53,15 +54,15 @@ pub(crate) fn optimize_into_acir( .try_run_pass(Ssa::evaluate_assert_constant, "After Assert Constant:")? .try_run_pass(Ssa::unroll_loops, "After Unrolling:")? .run_pass(Ssa::simplify_cfg, "After Simplifying:") - // Run mem2reg before flattening to handle any promotion - // of values that can be accessed after loop unrolling. - // If there are slice mergers uncovered by loop unrolling - // and this pass is missed, slice merging will fail inside of flattening. - .run_pass(Ssa::mem2reg, "After Mem2Reg:") .run_pass(Ssa::flatten_cfg, "After Flattening:") + .run_pass(Ssa::remove_bit_shifts, "After Removing Bit Shifts:") // Run mem2reg once more with the flattened CFG to catch any remaining loads/stores .run_pass(Ssa::mem2reg, "After Mem2Reg:") .run_pass(Ssa::fold_constants, "After Constant Folding:") + .run_pass( + Ssa::fold_constants_using_constraints, + "After Constant Folding With Constraint Info:", + ) .run_pass(Ssa::dead_instruction_elimination, "After Dead Instruction Elimination:") .finish(); @@ -83,10 +84,18 @@ pub fn create_circuit( program: Program, enable_ssa_logging: bool, enable_brillig_logging: bool, + force_brillig_output: bool, ) -> Result<(Circuit, DebugInfo, Vec, Vec, Vec), RuntimeError> { + let debug_variables = program.debug_variables.clone(); + let debug_types = program.debug_types.clone(); let func_sig = program.main_function_signature.clone(); - let mut generated_acir = - optimize_into_acir(program, enable_ssa_logging, enable_brillig_logging)?; + let recursive = program.recursive; + let mut generated_acir = optimize_into_acir( + program, + enable_ssa_logging, + enable_brillig_logging, + force_brillig_output, + )?; let opcodes = generated_acir.take_opcodes(); let current_witness_index = generated_acir.current_witness_index().0; let GeneratedAcir { @@ -106,11 +115,13 @@ pub fn create_circuit( let circuit = Circuit { current_witness_index, + expression_width: ExpressionWidth::Unbounded, opcodes, private_parameters, public_parameters, return_values, assert_messages: assert_messages.into_iter().collect(), + recursive, }; // This converts each im::Vector in the BTreeMap to a Vec @@ -119,7 +130,7 @@ pub fn create_circuit( .map(|(index, locations)| (index, locations.into_iter().collect())) .collect(); - let mut debug_info = DebugInfo::new(locations); + let mut debug_info = DebugInfo::new(locations, debug_variables, debug_types); // Perform any ACIR-level optimizations let (optimized_circuit, transformation_map) = acvm::compiler::optimize(circuit); @@ -169,8 +180,12 @@ struct SsaBuilder { } impl SsaBuilder { - fn new(program: Program, print_ssa_passes: bool) -> Result { - let ssa = ssa_gen::generate_ssa(program)?; + fn new( + program: Program, + print_ssa_passes: bool, + force_brillig_runtime: bool, + ) -> Result { + let ssa = ssa_gen::generate_ssa(program, force_brillig_runtime)?; Ok(SsaBuilder { print_ssa_passes, ssa }.print("Initial SSA:")) } diff --git a/compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir.rs b/compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir.rs index 1ddbae0f339..090d5bb0a83 100644 --- a/compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir.rs +++ b/compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir.rs @@ -1,4 +1,3 @@ pub(crate) mod acir_variable; pub(crate) mod big_int; pub(crate) mod generated_acir; -pub(crate) mod sort; diff --git a/compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir/acir_variable.rs b/compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir/acir_variable.rs index a419dac9d93..fb11bae556c 100644 --- a/compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir/acir_variable.rs +++ b/compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir/acir_variable.rs @@ -11,7 +11,7 @@ use acvm::acir::circuit::brillig::{BrilligInputs, BrilligOutputs}; use acvm::acir::circuit::opcodes::{BlockId, MemOp}; use acvm::acir::circuit::Opcode; use acvm::blackbox_solver; -use acvm::brillig_vm::{brillig::Value, Registers, VMStatus, VM}; +use acvm::brillig_vm::{brillig::Value, VMStatus, VM}; use acvm::{ acir::{ brillig::Opcode as BrilligOpcode, @@ -67,6 +67,13 @@ impl AcirType { pub(crate) fn unsigned(bit_size: u32) -> Self { AcirType::NumericType(NumericType::Unsigned { bit_size }) } + + pub(crate) fn to_numeric_type(&self) -> NumericType { + match self { + AcirType::NumericType(numeric_type) => *numeric_type, + AcirType::Array(_, _) => unreachable!("cannot fetch a numeric type for an array type"), + } + } } impl From for AcirType { @@ -88,6 +95,12 @@ impl<'a> From<&'a SsaType> for AcirType { } } +impl From for AcirType { + fn from(value: NumericType) -> Self { + AcirType::NumericType(value) + } +} + #[derive(Debug, Default)] /// Context object which holds the relationship between /// `Variables`(AcirVar) and types such as `Expression` and `Witness` @@ -319,6 +332,7 @@ impl AcirContext { vec![AcirValue::Var(var, AcirType::field())], vec![AcirType::field()], true, + false, )?; let inverted_var = Self::expect_one_var(results); @@ -631,18 +645,22 @@ impl AcirContext { bit_size: u32, predicate: AcirVar, ) -> Result<(AcirVar, AcirVar), RuntimeError> { - // lhs = rhs * q + r - // - // If predicate is zero, `q_witness` and `r_witness` will be 0 let zero = self.add_constant(FieldElement::zero()); - if self.var_to_expression(predicate)?.is_zero() { - return Ok((zero, zero)); - } + let one = self.add_constant(FieldElement::one()); + + let lhs_expr = self.var_to_expression(lhs)?; + let rhs_expr = self.var_to_expression(rhs)?; + let predicate_expr = self.var_to_expression(predicate)?; + + match (lhs_expr.to_const(), rhs_expr.to_const(), predicate_expr.to_const()) { + // If predicate is zero, `quotient_var` and `remainder_var` will be 0. + (_, _, Some(predicate_const)) if predicate_const.is_zero() => { + return Ok((zero, zero)); + } - match (self.var_to_expression(lhs)?.to_const(), self.var_to_expression(rhs)?.to_const()) { // If `lhs` and `rhs` are known constants then we can calculate the result at compile time. // `rhs` must be non-zero. - (Some(lhs_const), Some(rhs_const)) if rhs_const != FieldElement::zero() => { + (Some(lhs_const), Some(rhs_const), _) if rhs_const != FieldElement::zero() => { let quotient = lhs_const.to_u128() / rhs_const.to_u128(); let remainder = lhs_const.to_u128() - quotient * rhs_const.to_u128(); @@ -652,36 +670,29 @@ impl AcirContext { } // If `rhs` is one then the division is a noop. - (_, Some(rhs_const)) if rhs_const == FieldElement::one() => { + (_, Some(rhs_const), _) if rhs_const == FieldElement::one() => { return Ok((lhs, zero)); } - _ => (), - } - - // Check that we the rhs is not zero. - // Otherwise, when executing the brillig quotient we may attempt to divide by zero, causing a VM panic. - // - // When the predicate is 0, the equation always passes. - // When the predicate is 1, the rhs must not be 0. - let one = self.add_constant(FieldElement::one()); + // After this point, we cannot perform the division at compile-time. + // + // We need to check that the rhs is not zero, otherwise when executing the brillig quotient, + // we may attempt to divide by zero and cause a VM panic. + // + // When the predicate is 0, the division always succeeds (as it is skipped). + // When the predicate is 1, the rhs must not be 0. - let rhs_expr = self.var_to_expression(rhs)?; - let rhs_is_nonzero_const = rhs_expr.is_const() && !rhs_expr.is_zero(); - if !rhs_is_nonzero_const { - match self.var_to_expression(predicate)?.to_const() { - Some(predicate) if predicate.is_one() => { - // If the predicate is known to be active, we simply assert that an inverse must exist. - // This implies that `rhs != 0`. - let _inverse = self.inv_var(rhs, one)?; - } + // If the predicate is known to be active, we simply assert that an inverse must exist. + // This implies that `rhs != 0`. + (_, _, Some(predicate_const)) if predicate_const.is_one() => { + let _inverse = self.inv_var(rhs, one)?; + } - _ => { - // Otherwise we must handle both potential cases. - let rhs_is_zero = self.eq_var(rhs, zero)?; - let rhs_is_not_zero = self.mul_var(rhs_is_zero, predicate)?; - self.assert_eq_var(rhs_is_not_zero, zero, None)?; - } + // Otherwise we must handle both potential cases. + _ => { + let rhs_is_zero = self.eq_var(rhs, zero)?; + let rhs_is_zero_and_predicate_active = self.mul_var(rhs_is_zero, predicate)?; + self.assert_eq_var(rhs_is_zero_and_predicate_active, zero, None)?; } } @@ -689,7 +700,7 @@ impl AcirContext { let mut max_q_bits = bit_size; let mut max_rhs_bits = bit_size; // when rhs is constant, we can better estimate the maximum bit sizes - if let Some(rhs_const) = self.var_to_expression(rhs)?.to_const() { + if let Some(rhs_const) = rhs_expr.to_const() { max_rhs_bits = rhs_const.num_bits(); if max_rhs_bits != 0 { if max_rhs_bits > bit_size { @@ -699,18 +710,6 @@ impl AcirContext { } } - // Avoids overflow: 'q*b+r < 2^max_q_bits*2^max_rhs_bits' - let mut avoid_overflow = false; - if max_q_bits + max_rhs_bits >= FieldElement::max_num_bits() - 1 { - // q*b+r can overflow; we avoid this when b is constant - if self.var_to_expression(rhs)?.is_const() { - avoid_overflow = true; - } else { - // we do not support unbounded division - unreachable!("overflow in unbounded division"); - } - } - let [q_value, r_value]: [AcirValue; 2] = self .brillig( predicate, @@ -721,6 +720,7 @@ impl AcirContext { ], vec![AcirType::unsigned(max_q_bits), AcirType::unsigned(max_rhs_bits)], true, + false, )? .try_into() .expect("quotient only returns two values"); @@ -761,7 +761,19 @@ impl AcirContext { let lhs_constraint = self.mul_var(lhs, predicate)?; self.assert_eq_var(lhs_constraint, rhs_constraint, None)?; - if let Some(rhs_const) = self.var_to_expression(rhs)?.to_const() { + // Avoids overflow: 'q*b+r < 2^max_q_bits*2^max_rhs_bits' + let mut avoid_overflow = false; + if max_q_bits + max_rhs_bits >= FieldElement::max_num_bits() - 1 { + // q*b+r can overflow; we avoid this when b is constant + if rhs_expr.is_const() { + avoid_overflow = true; + } else { + // we do not support unbounded division + unreachable!("overflow in unbounded division"); + } + } + + if let Some(rhs_const) = rhs_expr.to_const() { if avoid_overflow { // we compute q0 = p/rhs let rhs_big = BigUint::from_bytes_be(&rhs_const.to_be_bytes()); @@ -1199,7 +1211,7 @@ impl AcirContext { (vec![state_len], Vec::new()) } BlackBoxFunc::BigIntAdd - | BlackBoxFunc::BigIntNeg + | BlackBoxFunc::BigIntSub | BlackBoxFunc::BigIntMul | BlackBoxFunc::BigIntDiv => { assert_eq!(inputs.len(), 4, "ICE - bigint operation requires 4 inputs"); @@ -1245,7 +1257,8 @@ impl AcirContext { for i in const_inputs { field_inputs.push(i?); } - let modulus = self.big_int_ctx.modulus(field_inputs[0]); + let bigint = self.big_int_ctx.get(field_inputs[0]); + let modulus = self.big_int_ctx.modulus(bigint.modulus_id()); let bytes_len = ((modulus - BigUint::from(1_u32)).bits() - 1) / 8 + 1; output_count = bytes_len as usize; (field_inputs, vec![FieldElement::from(bytes_len as u128)]) @@ -1317,7 +1330,7 @@ impl AcirContext { let mut witnesses = Vec::new(); for input in inputs { let mut single_val_witnesses = Vec::new(); - for (input, typ) in input.flatten() { + for (input, typ) in self.flatten(input)? { // Intrinsics only accept Witnesses. This is not a limitation of the // intrinsics, its just how we have defined things. Ideally, we allow // constants too. @@ -1399,16 +1412,32 @@ impl AcirContext { self.radix_decompose(endian, input_var, two_var, limb_count_var, result_element_type) } - /// Recursive helper for flatten_values to flatten a single AcirValue into the result vector. - pub(crate) fn flatten_value(acir_vars: &mut Vec, value: AcirValue) { + /// Recursive helper to flatten a single AcirValue into the result vector. + /// This helper differs from `flatten()` on the `AcirValue` type, as this method has access to the AcirContext + /// which lets us flatten an `AcirValue::DynamicArray` by reading its variables from memory. + pub(crate) fn flatten( + &mut self, + value: AcirValue, + ) -> Result, InternalError> { match value { - AcirValue::Var(acir_var, _) => acir_vars.push(acir_var), + AcirValue::Var(acir_var, typ) => Ok(vec![(acir_var, typ)]), AcirValue::Array(array) => { + let mut values = Vec::new(); for value in array { - Self::flatten_value(acir_vars, value); + values.append(&mut self.flatten(value)?); } + Ok(values) + } + AcirValue::DynamicArray(AcirDynamicArray { block_id, len, value_types, .. }) => { + try_vecmap(0..len, |i| { + let index_var = self.add_constant(i); + + Ok::<(AcirVar, AcirType), InternalError>(( + self.read_from_memory(block_id, &index_var)?, + value_types[i].into(), + )) + }) } - AcirValue::DynamicArray(_) => unreachable!("Cannot flatten a dynamic array"), } } @@ -1441,20 +1470,21 @@ impl AcirContext { inputs: Vec, outputs: Vec, attempt_execution: bool, - ) -> Result, InternalError> { - let b_inputs = try_vecmap(inputs, |i| match i { - AcirValue::Var(var, _) => Ok(BrilligInputs::Single(self.var_to_expression(var)?)), - AcirValue::Array(vars) => { - let mut var_expressions: Vec = Vec::new(); - for var in vars { - self.brillig_array_input(&mut var_expressions, var)?; + unsafe_return_values: bool, + ) -> Result, RuntimeError> { + let b_inputs = try_vecmap(inputs, |i| -> Result<_, InternalError> { + match i { + AcirValue::Var(var, _) => Ok(BrilligInputs::Single(self.var_to_expression(var)?)), + AcirValue::Array(vars) => { + let mut var_expressions: Vec = Vec::new(); + for var in vars { + self.brillig_array_input(&mut var_expressions, var)?; + } + Ok(BrilligInputs::Array(var_expressions)) + } + AcirValue::DynamicArray(AcirDynamicArray { block_id, .. }) => { + Ok(BrilligInputs::MemoryArray(block_id)) } - Ok(BrilligInputs::Array(var_expressions)) - } - AcirValue::DynamicArray(_) => { - let mut var_expressions = Vec::new(); - self.brillig_array_input(&mut var_expressions, i)?; - Ok(BrilligInputs::Array(var_expressions)) } })?; @@ -1489,6 +1519,37 @@ impl AcirContext { let predicate = self.var_to_expression(predicate)?; self.acir_ir.brillig(Some(predicate), generated_brillig, b_inputs, b_outputs); + fn range_constraint_value( + context: &mut AcirContext, + value: &AcirValue, + ) -> Result<(), RuntimeError> { + match value { + AcirValue::Var(var, typ) => { + let numeric_type = match typ { + AcirType::NumericType(numeric_type) => numeric_type, + _ => unreachable!("`AcirValue::Var` may only hold primitive values"), + }; + context.range_constrain_var(*var, numeric_type, None)?; + } + AcirValue::Array(values) => { + for value in values { + range_constraint_value(context, value)?; + } + } + AcirValue::DynamicArray(_) => { + unreachable!("Brillig opcodes cannot return dynamic arrays") + } + } + Ok(()) + } + + // This is a hack to ensure that if we're compiling a brillig entrypoint function then + // we don't also add a number of range constraints. + if !unsafe_return_values { + for output_var in &outputs_var { + range_constraint_value(self, output_var)?; + } + } Ok(outputs_var) } @@ -1557,23 +1618,17 @@ impl AcirContext { inputs: &[BrilligInputs], outputs_types: &[AcirType], ) -> Option> { - let (registers, memory) = execute_brillig(code, inputs)?; - - let outputs_var = vecmap(outputs_types.iter().enumerate(), |(index, output)| { - let register_value = registers.get(index.into()); - match output { - AcirType::NumericType(_) => { - let var = self.add_data(AcirVarData::Const(register_value.to_field())); - AcirValue::Var(var, output.clone()) - } - AcirType::Array(element_types, size) => { - let mem_ptr = register_value.to_usize(); - self.brillig_constant_array_output( - element_types, - *size, - &mut memory.iter().skip(mem_ptr), - ) - } + let mut memory = (execute_brillig(code, inputs)?).into_iter(); + + let outputs_var = vecmap(outputs_types.iter(), |output| match output { + AcirType::NumericType(_) => { + let var = self.add_data(AcirVarData::Const( + memory.next().expect("Missing return data").to_field(), + )); + AcirValue::Var(var, output.clone()) + } + AcirType::Array(element_types, size) => { + self.brillig_constant_array_output(element_types, *size, &mut memory) } }); @@ -1581,11 +1636,11 @@ impl AcirContext { } /// Recursively create [`AcirValue`]s for returned arrays. This is necessary because a brillig returned array can have nested arrays as elements. - fn brillig_constant_array_output<'a>( + fn brillig_constant_array_output( &mut self, element_types: &[AcirType], size: usize, - memory_iter: &mut impl Iterator, + memory_iter: &mut impl Iterator, ) -> AcirValue { let mut array_values = im::Vector::new(); for _ in 0..size { @@ -1611,50 +1666,6 @@ impl AcirContext { AcirValue::Array(array_values) } - /// Generate output variables that are constrained to be the sorted inputs - /// The outputs are the sorted inputs iff - /// outputs are sorted and - /// outputs are a permutation of the inputs - pub(crate) fn sort( - &mut self, - inputs: Vec, - bit_size: u32, - predicate: AcirVar, - ) -> Result, RuntimeError> { - let len = inputs.len(); - // Convert the inputs into expressions - let inputs_expr = try_vecmap(inputs, |input| self.var_to_expression(input))?; - // Generate output witnesses - let outputs_witness = vecmap(0..len, |_| self.acir_ir.next_witness_index()); - let output_expr = - vecmap(&outputs_witness, |witness_index| Expression::from(*witness_index)); - let outputs_var = vecmap(&outputs_witness, |witness_index| { - self.add_data(AcirVarData::Witness(*witness_index)) - }); - - // Enforce the outputs to be a permutation of the inputs - self.acir_ir.permutation(&inputs_expr, &output_expr)?; - - // Enforce the outputs to be sorted - for i in 0..(outputs_var.len() - 1) { - self.less_than_constrain(outputs_var[i], outputs_var[i + 1], bit_size, predicate)?; - } - - Ok(outputs_var) - } - - /// Constrain lhs to be less than rhs - fn less_than_constrain( - &mut self, - lhs: AcirVar, - rhs: AcirVar, - bit_size: u32, - predicate: AcirVar, - ) -> Result<(), RuntimeError> { - let lhs_less_than_rhs = self.more_than_eq_var(rhs, lhs, bit_size)?; - self.maybe_eq_predicate(lhs_less_than_rhs, predicate) - } - /// Returns a Variable that is constrained to be the result of reading /// from the memory `block_id` at the given `index`. pub(crate) fn read_from_memory( @@ -1828,42 +1839,31 @@ pub(crate) struct AcirVar(usize); /// Returns the finished state of the Brillig VM if execution can complete. /// /// Returns `None` if complete execution of the Brillig bytecode is not possible. -fn execute_brillig( - code: &[BrilligOpcode], - inputs: &[BrilligInputs], -) -> Option<(Registers, Vec)> { +fn execute_brillig(code: &[BrilligOpcode], inputs: &[BrilligInputs]) -> Option> { // Set input values - let mut input_register_values: Vec = Vec::with_capacity(inputs.len()); - let mut input_memory: Vec = Vec::new(); + let mut calldata: Vec = Vec::new(); + // Each input represents a constant or array of constants. // Iterate over each input and push it into registers and/or memory. for input in inputs { match input { BrilligInputs::Single(expr) => { - input_register_values.push(expr.to_const()?.into()); + calldata.push(expr.to_const()?.into()); } BrilligInputs::Array(expr_arr) => { // Attempt to fetch all array input values - let memory_pointer = input_memory.len(); for expr in expr_arr.iter() { - input_memory.push(expr.to_const()?.into()); + calldata.push(expr.to_const()?.into()); } - - // Push value of the array pointer as a register - input_register_values.push(Value::from(memory_pointer)); + } + BrilligInputs::MemoryArray(_) => { + return None; } } } // Instantiate a Brillig VM given the solved input registers and memory, along with the Brillig bytecode. - let input_registers = Registers::load(input_register_values); - let mut vm = VM::new( - input_registers, - input_memory, - code, - Vec::new(), - &blackbox_solver::StubbedBlackBoxSolver, - ); + let mut vm = VM::new(calldata, code, Vec::new(), &blackbox_solver::StubbedBlackBoxSolver); // Run the Brillig VM on these inputs, bytecode, etc! let vm_status = vm.process_opcodes(); @@ -1872,7 +1872,9 @@ fn execute_brillig( // It may be finished, in-progress, failed, or may be waiting for results of a foreign call. // If it's finished then we can omit the opcode and just write in the return values. match vm_status { - VMStatus::Finished => Some((vm.get_registers().clone(), vm.get_memory().to_vec())), + VMStatus::Finished { return_data_offset, return_data_size } => Some( + vm.get_memory()[return_data_offset..(return_data_offset + return_data_size)].to_vec(), + ), VMStatus::InProgress => unreachable!("Brillig VM has not completed execution"), VMStatus::Failure { .. } => { // TODO: Return an error stating that the brillig function failed. diff --git a/compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir/generated_acir.rs b/compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir/generated_acir.rs index b86fc4eeb5f..1d05e998b13 100644 --- a/compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir/generated_acir.rs +++ b/compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir/generated_acir.rs @@ -74,6 +74,10 @@ impl GeneratedAcir { } } + pub(crate) fn opcodes(&self) -> &[AcirOpcode] { + &self.opcodes + } + pub(crate) fn take_opcodes(&mut self) -> Vec { std::mem::take(&mut self.opcodes) } @@ -252,7 +256,7 @@ impl GeneratedAcir { rhs: constant_inputs[1].to_u128() as u32, output: constant_outputs[0].to_u128() as u32, }, - BlackBoxFunc::BigIntNeg => BlackBoxFuncCall::BigIntNeg { + BlackBoxFunc::BigIntSub => BlackBoxFuncCall::BigIntSub { lhs: constant_inputs[0].to_u128() as u32, rhs: constant_inputs[1].to_u128() as u32, output: constant_outputs[0].to_u128() as u32, @@ -563,40 +567,6 @@ impl GeneratedAcir { } } - /// Generate gates and control bits witnesses which ensure that out_expr is a permutation of in_expr - /// Add the control bits of the sorting network used to generate the constrains - /// into the PermutationSort directive for solving in ACVM. - /// The directive is solving the control bits so that the outputs are sorted in increasing order. - /// - /// n.b. A sorting network is a predetermined set of switches, - /// the control bits indicate the configuration of each switch: false for pass-through and true for cross-over - pub(crate) fn permutation( - &mut self, - in_expr: &[Expression], - out_expr: &[Expression], - ) -> Result<(), RuntimeError> { - let mut bits_len = 0; - for i in 0..in_expr.len() { - bits_len += ((i + 1) as f32).log2().ceil() as u32; - } - - let bits = vecmap(0..bits_len, |_| self.next_witness_index()); - let inputs = in_expr.iter().map(|a| vec![a.clone()]).collect(); - self.push_opcode(AcirOpcode::Directive(Directive::PermutationSort { - inputs, - tuple: 1, - bits: bits.clone(), - sort_by: vec![0], - })); - let (_, b) = self.permutation_layer(in_expr, &bits, false)?; - - // Constrain the network output to out_expr - for (b, o) in b.iter().zip(out_expr) { - self.push_opcode(AcirOpcode::AssertZero(b - o)); - } - Ok(()) - } - pub(crate) fn last_acir_opcode_location(&self) -> OpcodeLocation { OpcodeLocation::Acir(self.opcodes.len() - 1) } @@ -646,7 +616,7 @@ fn black_box_func_expected_input_size(name: BlackBoxFunc) -> Option { // Big integer operations take in 0 inputs. They use constants for their inputs. BlackBoxFunc::BigIntAdd - | BlackBoxFunc::BigIntNeg + | BlackBoxFunc::BigIntSub | BlackBoxFunc::BigIntMul | BlackBoxFunc::BigIntDiv | BlackBoxFunc::BigIntToLeBytes => Some(0), @@ -696,7 +666,7 @@ fn black_box_expected_output_size(name: BlackBoxFunc) -> Option { // Big integer operations return a big integer BlackBoxFunc::BigIntAdd - | BlackBoxFunc::BigIntNeg + | BlackBoxFunc::BigIntSub | BlackBoxFunc::BigIntMul | BlackBoxFunc::BigIntDiv | BlackBoxFunc::BigIntFromLeBytes => Some(0), diff --git a/compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir/sort.rs b/compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir/sort.rs deleted file mode 100644 index 52640d32337..00000000000 --- a/compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir/sort.rs +++ /dev/null @@ -1,88 +0,0 @@ -use crate::errors::InternalError; - -use super::generated_acir::GeneratedAcir; -use acvm::acir::native_types::{Expression, Witness}; - -impl GeneratedAcir { - // Generates gates for a sorting network - // returns witness corresponding to the network configuration and the expressions corresponding to the network output - // in_expr: inputs of the sorting network - // if generate_witness is false, it uses the witness provided in bits instead of generating them - // in both cases it returns the witness of the network configuration - // if generate_witness is true, bits is ignored - pub(crate) fn permutation_layer( - &mut self, - in_expr: &[Expression], - bits: &[Witness], - generate_witness: bool, - ) -> Result<(Vec, Vec), InternalError> { - let n = in_expr.len(); - if n == 1 { - return Ok((Vec::new(), in_expr.to_vec())); - } - let n1 = n / 2; - - // witness for the input switches - let mut conf = iter_extended::vecmap(0..n1, |i| { - if generate_witness { - self.next_witness_index() - } else { - bits[i] - } - }); - - // compute expressions after the input switches - // If inputs are a1,a2, and the switch value is c, then we compute expressions b1,b2 where - // b1 = a1+q, b2 = a2-q, q = c(a2-a1) - let mut in_sub1 = Vec::new(); - let mut in_sub2 = Vec::new(); - for i in 0..n1 { - //q = c*(a2-a1); - let intermediate = self.mul_with_witness( - &Expression::from(conf[i]), - &(&in_expr[2 * i + 1] - &in_expr[2 * i]), - ); - //b1=a1+q - in_sub1.push(&intermediate + &in_expr[2 * i]); - //b2=a2-q - in_sub2.push(&in_expr[2 * i + 1] - &intermediate); - } - if n % 2 == 1 { - in_sub2.push(match in_expr.last() { - Some(in_expr) => in_expr.clone(), - None => { - return Err(InternalError::EmptyArray { call_stack: self.call_stack.clone() }) - } - }); - } - let mut out_expr = Vec::new(); - // compute results for the sub networks - let bits1 = if generate_witness { bits } else { &bits[n1 + (n - 1) / 2..] }; - let (w1, b1) = self.permutation_layer(&in_sub1, bits1, generate_witness)?; - let bits2 = if generate_witness { bits } else { &bits[n1 + (n - 1) / 2 + w1.len()..] }; - let (w2, b2) = self.permutation_layer(&in_sub2, bits2, generate_witness)?; - // apply the output switches - for i in 0..(n - 1) / 2 { - let c = if generate_witness { self.next_witness_index() } else { bits[n1 + i] }; - conf.push(c); - let intermediate = self.mul_with_witness(&Expression::from(c), &(&b2[i] - &b1[i])); - out_expr.push(&intermediate + &b1[i]); - out_expr.push(&b2[i] - &intermediate); - } - if n % 2 == 0 { - out_expr.push(match b1.last() { - Some(b1) => b1.clone(), - None => { - return Err(InternalError::EmptyArray { call_stack: self.call_stack.clone() }) - } - }); - } - out_expr.push(match b2.last() { - Some(b2) => b2.clone(), - None => return Err(InternalError::EmptyArray { call_stack: self.call_stack.clone() }), - }); - conf.extend(w1); - conf.extend(w2); - Ok((conf, out_expr)) - } -} diff --git a/compiler/noirc_evaluator/src/ssa/acir_gen/mod.rs b/compiler/noirc_evaluator/src/ssa/acir_gen/mod.rs index 120c5bf25df..8d4d0668534 100644 --- a/compiler/noirc_evaluator/src/ssa/acir_gen/mod.rs +++ b/compiler/noirc_evaluator/src/ssa/acir_gen/mod.rs @@ -7,6 +7,7 @@ use std::fmt::Debug; use self::acir_ir::acir_variable::{AcirContext, AcirType, AcirVar}; use super::function_builder::data_bus::DataBus; use super::ir::dfg::CallStack; +use super::ir::instruction::ConstrainError; use super::{ ir::{ dfg::DataFlowGraph, @@ -98,6 +99,18 @@ pub(crate) struct AcirDynamicArray { block_id: BlockId, /// Length of the array len: usize, + /// An ACIR dynamic array is a flat structure, so we use + /// the inner structure of an `AcirType::NumericType` directly. + /// Some usages of ACIR arrays (e.g. black box functions) require the bit size + /// of every value to be known, thus we store the types as part of the dynamic + /// array definition. + /// + /// A dynamic non-homogenous array can potentially have values of differing types. + /// Thus, we store a vector of types rather than a single type, as a dynamic non-homogenous array + /// is still represented in ACIR by a single `AcirDynamicArray` structure. + /// + /// The length of the value types vector must match the `len` field in this structure. + value_types: Vec, /// Identification for the ACIR dynamic array /// inner element type sizes array element_type_sizes: Option, @@ -149,6 +162,16 @@ impl AcirValue { AcirValue::DynamicArray(_) => unimplemented!("Cannot flatten a dynamic array"), } } + + fn flat_numeric_types(self) -> Vec { + match self { + AcirValue::Array(_) => { + self.flatten().into_iter().map(|(_, typ)| typ.to_numeric_type()).collect() + } + AcirValue::DynamicArray(AcirDynamicArray { value_types, .. }) => value_types, + _ => unreachable!("An AcirValue::Var cannot be used as an array value"), + } + } } impl Ssa { @@ -269,6 +292,7 @@ impl Context { inputs, outputs, false, + true, )?; let output_vars: Vec<_> = output_values .iter() @@ -279,7 +303,16 @@ impl Context { for acir_var in output_vars { self.acir_context.return_var(acir_var)?; } - Ok(self.acir_context.finish(witness_inputs, Vec::new())) + + let generated_acir = self.acir_context.finish(witness_inputs, Vec::new()); + + assert_eq!( + generated_acir.opcodes().len(), + 1, + "Unconstrained programs should only generate a single opcode but multiple were emitted" + ); + + Ok(generated_acir) } /// Adds and binds `AcirVar`s for each numeric block parameter or block parameter array element. @@ -413,15 +446,94 @@ impl Context { let lhs = self.convert_numeric_value(*lhs, dfg)?; let rhs = self.convert_numeric_value(*rhs, dfg)?; - self.acir_context.assert_eq_var(lhs, rhs, assert_message.clone())?; + let assert_message = if let Some(error) = assert_message { + match error.as_ref() { + ConstrainError::Static(string) => Some(string.clone()), + ConstrainError::Dynamic(call_instruction) => { + self.convert_ssa_call(call_instruction, dfg, ssa, brillig, &[])?; + None + } + } + } else { + None + }; + + self.acir_context.assert_eq_var(lhs, rhs, assert_message)?; } Instruction::Cast(value_id, _) => { let acir_var = self.convert_numeric_value(*value_id, dfg)?; self.define_result_var(dfg, instruction_id, acir_var); } - Instruction::Call { func, arguments } => { + Instruction::Call { .. } => { let result_ids = dfg.instruction_results(instruction_id); - match &dfg[*func] { + warnings.extend(self.convert_ssa_call( + instruction, + dfg, + ssa, + brillig, + result_ids, + )?); + } + Instruction::Not(value_id) => { + let (acir_var, typ) = match self.convert_value(*value_id, dfg) { + AcirValue::Var(acir_var, typ) => (acir_var, typ), + _ => unreachable!("NOT is only applied to numerics"), + }; + let result_acir_var = self.acir_context.not_var(acir_var, typ)?; + self.define_result_var(dfg, instruction_id, result_acir_var); + } + Instruction::Truncate { value, bit_size, max_bit_size } => { + let result_acir_var = + self.convert_ssa_truncate(*value, *bit_size, *max_bit_size, dfg)?; + self.define_result_var(dfg, instruction_id, result_acir_var); + } + Instruction::EnableSideEffects { condition } => { + let acir_var = self.convert_numeric_value(*condition, dfg)?; + self.current_side_effects_enabled_var = acir_var; + } + Instruction::ArrayGet { .. } | Instruction::ArraySet { .. } => { + self.handle_array_operation(instruction_id, dfg, last_array_uses)?; + } + Instruction::Allocate => { + unreachable!("Expected all allocate instructions to be removed before acir_gen") + } + Instruction::Store { .. } => { + unreachable!("Expected all store instructions to be removed before acir_gen") + } + Instruction::Load { .. } => { + unreachable!("Expected all load instructions to be removed before acir_gen") + } + Instruction::IncrementRc { .. } => { + // Do nothing. Only Brillig needs to worry about reference counted arrays + } + Instruction::RangeCheck { value, max_bit_size, assert_message } => { + let acir_var = self.convert_numeric_value(*value, dfg)?; + self.acir_context.range_constrain_var( + acir_var, + &NumericType::Unsigned { bit_size: *max_bit_size }, + assert_message.clone(), + )?; + } + } + + self.acir_context.set_call_stack(CallStack::new()); + Ok(warnings) + } + + fn convert_ssa_call( + &mut self, + instruction: &Instruction, + dfg: &DataFlowGraph, + ssa: &Ssa, + brillig: &Brillig, + result_ids: &[ValueId], + ) -> Result, RuntimeError> { + let mut warnings = Vec::new(); + + match instruction { + Instruction::Call { func, arguments } => { + let function_value = &dfg[*func]; + match function_value { Value::Function(id) => { let func = &ssa.functions[id]; match func.runtime() { @@ -429,13 +541,21 @@ impl Context { "expected an intrinsic/brillig call, but found {func:?}. All ACIR methods should be inlined" ), RuntimeType::Brillig => { + // Check that we are not attempting to return a slice from + // an unconstrained runtime to a constrained runtime + for result_id in result_ids { + if dfg.type_of_value(*result_id).contains_slice_element() { + return Err(RuntimeError::UnconstrainedSliceReturnToConstrained { call_stack: self.acir_context.get_call_stack() }) + } + } + let inputs = vecmap(arguments, |arg| self.convert_value(*arg, dfg)); let code = self.gen_brillig_for(func, brillig)?; let outputs: Vec = vecmap(result_ids, |result_id| dfg.type_of_value(*result_id).into()); - let output_values = self.acir_context.brillig(self.current_side_effects_enabled_var, code, inputs, outputs, true)?; + let output_values = self.acir_context.brillig(self.current_side_effects_enabled_var, code, inputs, outputs, true, false)?; // Compiler sanity check assert_eq!(result_ids.len(), output_values.len(), "ICE: The number of Brillig output values should match the result ids in SSA"); @@ -495,51 +615,11 @@ impl Context { Value::ForeignFunction(_) => unreachable!( "All `oracle` methods should be wrapped in an unconstrained fn" ), - _ => unreachable!("expected calling a function"), + _ => unreachable!("expected calling a function but got {function_value:?}"), } } - Instruction::Not(value_id) => { - let (acir_var, typ) = match self.convert_value(*value_id, dfg) { - AcirValue::Var(acir_var, typ) => (acir_var, typ), - _ => unreachable!("NOT is only applied to numerics"), - }; - let result_acir_var = self.acir_context.not_var(acir_var, typ)?; - self.define_result_var(dfg, instruction_id, result_acir_var); - } - Instruction::Truncate { value, bit_size, max_bit_size } => { - let result_acir_var = - self.convert_ssa_truncate(*value, *bit_size, *max_bit_size, dfg)?; - self.define_result_var(dfg, instruction_id, result_acir_var); - } - Instruction::EnableSideEffects { condition } => { - let acir_var = self.convert_numeric_value(*condition, dfg)?; - self.current_side_effects_enabled_var = acir_var; - } - Instruction::ArrayGet { .. } | Instruction::ArraySet { .. } => { - self.handle_array_operation(instruction_id, dfg, last_array_uses)?; - } - Instruction::Allocate => { - unreachable!("Expected all allocate instructions to be removed before acir_gen") - } - Instruction::Store { .. } => { - unreachable!("Expected all store instructions to be removed before acir_gen") - } - Instruction::Load { .. } => { - unreachable!("Expected all load instructions to be removed before acir_gen") - } - Instruction::IncrementRc { .. } => { - // Do nothing. Only Brillig needs to worry about reference counted arrays - } - Instruction::RangeCheck { value, max_bit_size, assert_message } => { - let acir_var = self.convert_numeric_value(*value, dfg)?; - self.acir_context.range_constrain_var( - acir_var, - &NumericType::Unsigned { bit_size: *max_bit_size }, - assert_message.clone(), - )?; - } + _ => unreachable!("expected calling a call instruction"), } - self.acir_context.set_call_stack(CallStack::new()); Ok(warnings) } @@ -621,11 +701,11 @@ impl Context { instruction: InstructionId, dfg: &DataFlowGraph, index: ValueId, - array: ValueId, + array_id: ValueId, store_value: Option, ) -> Result { let index_const = dfg.get_numeric_constant(index); - let value_type = dfg.type_of_value(array); + let value_type = dfg.type_of_value(array_id); // Compiler sanity checks assert!( !value_type.is_nested_slice(), @@ -635,7 +715,7 @@ impl Context { unreachable!("ICE: expected array or slice type"); }; - match self.convert_value(array, dfg) { + match self.convert_value(array_id, dfg) { AcirValue::Var(acir_var, _) => { return Err(RuntimeError::InternalError(InternalError::Unexpected { expected: "an array value".to_string(), @@ -949,9 +1029,15 @@ impl Context { } else { None }; + + let value_types = self.convert_value(array_id, dfg).flat_numeric_types(); + // Compiler sanity check + assert_eq!(value_types.len(), array_len, "ICE: The length of the flattened type array should match the length of the dynamic array"); + let result_value = AcirValue::DynamicArray(AcirDynamicArray { block_id: result_block_id, len: array_len, + value_types, element_type_sizes, }); self.define_result(dfg, instruction, result_value); @@ -1035,7 +1121,7 @@ impl Context { &mut self, array_typ: &Type, array_id: ValueId, - array_acir_value: Option, + supplied_acir_value: Option<&AcirValue>, dfg: &DataFlowGraph, ) -> Result { let element_type_sizes = self.internal_block_id(&array_id); @@ -1061,26 +1147,23 @@ impl Context { Value::Instruction { .. } | Value::Param { .. } => { // An instruction representing the slice means it has been processed previously during ACIR gen. // Use the previously defined result of an array operation to fetch the internal type information. - let array_acir_value = if let Some(array_acir_value) = array_acir_value { - array_acir_value - } else { - self.convert_value(array_id, dfg) - }; + let array_acir_value = &self.convert_value(array_id, dfg); + let array_acir_value = supplied_acir_value.unwrap_or(array_acir_value); match array_acir_value { AcirValue::DynamicArray(AcirDynamicArray { element_type_sizes: inner_elem_type_sizes, .. }) => { if let Some(inner_elem_type_sizes) = inner_elem_type_sizes { - if self.initialized_arrays.contains(&inner_elem_type_sizes) { - let type_sizes_array_len = self.internal_mem_block_lengths.get(&inner_elem_type_sizes).copied().ok_or_else(|| + if self.initialized_arrays.contains(inner_elem_type_sizes) { + let type_sizes_array_len = *self.internal_mem_block_lengths.get(inner_elem_type_sizes).ok_or_else(|| InternalError::General { message: format!("Array {array_id}'s inner element type sizes array does not have a tracked length"), call_stack: self.acir_context.get_call_stack(), } )?; self.copy_dynamic_array( - inner_elem_type_sizes, + *inner_elem_type_sizes, element_type_sizes, type_sizes_array_len, )?; @@ -1286,7 +1369,7 @@ impl Context { // The return value may or may not be an array reference. Calling `flatten_value_list` // will expand the array if there is one. - let return_acir_vars = self.flatten_value_list(return_values, dfg); + let return_acir_vars = self.flatten_value_list(return_values, dfg)?; let mut warnings = Vec::new(); for acir_var in return_acir_vars { if self.acir_context.is_constant(&acir_var) { @@ -1328,7 +1411,13 @@ impl Context { AcirValue::Array(elements.collect()) } Value::Intrinsic(..) => todo!(), - Value::Function(..) => unreachable!("ICE: All functions should have been inlined"), + Value::Function(function_id) => { + // This conversion is for debugging support only, to allow the + // debugging instrumentation code to work. Taking the reference + // of a function in ACIR is useless. + let id = self.acir_context.add_constant(function_id.to_usize()); + AcirValue::Var(id, AcirType::field()) + } Value::ForeignFunction(_) => unimplemented!( "Oracle calls directly in constrained functions are not yet available." ), @@ -1419,6 +1508,9 @@ impl Context { bit_count, self.current_side_effects_enabled_var, ), + BinaryOp::Shl | BinaryOp::Shr => unreachable!( + "ICE - bit shift operators do not exist in ACIR and should have been replaced" + ), } } @@ -1563,33 +1655,6 @@ impl Context { self.acir_context.bit_decompose(endian, field, bit_size, result_type) } - Intrinsic::Sort => { - let inputs = vecmap(arguments, |arg| self.convert_value(*arg, dfg)); - // We flatten the inputs and retrieve the bit_size of the elements - let mut input_vars = Vec::new(); - let mut bit_size = 0; - for input in inputs { - for (var, typ) in input.flatten() { - input_vars.push(var); - if bit_size == 0 { - bit_size = typ.bit_size(); - } else { - assert_eq!( - bit_size, - typ.bit_size(), - "cannot sort element of different bit size" - ); - } - } - } - // Generate the sorted output variables - let out_vars = self - .acir_context - .sort(input_vars, bit_size, self.current_side_effects_enabled_var) - .expect("Could not sort"); - - Ok(self.convert_vars_to_values(out_vars, dfg, result_ids)) - } Intrinsic::ArrayLen => { let len = match self.convert_value(arguments[0], dfg) { AcirValue::Var(_, _) => unreachable!("Non-array passed to array.len() method"), @@ -1643,15 +1708,24 @@ impl Context { Some(self.init_element_type_sizes_array( &slice_typ, slice_contents, - Some(new_slice_val), + Some(&new_slice_val), dfg, )?) } else { None }; + + let value_types = new_slice_val.flat_numeric_types(); + assert_eq!( + value_types.len(), + new_elem_size, + "ICE: Value types array must match new slice size" + ); + let result = AcirValue::DynamicArray(AcirDynamicArray { block_id: result_block_id, len: new_elem_size, + value_types, element_type_sizes, }); Ok(vec![AcirValue::Var(new_slice_length, AcirType::field()), result]) @@ -1698,15 +1772,24 @@ impl Context { Some(self.init_element_type_sizes_array( &slice_typ, slice_contents, - Some(new_slice_val), + Some(&new_slice_val), dfg, )?) } else { None }; + + let value_types = new_slice_val.flat_numeric_types(); + assert_eq!( + value_types.len(), + new_slice_size, + "ICE: Value types array must match new slice size" + ); + let result = AcirValue::DynamicArray(AcirDynamicArray { block_id: result_block_id, len: new_slice_size, + value_types, element_type_sizes, }); @@ -1903,15 +1986,24 @@ impl Context { Some(self.init_element_type_sizes_array( &slice_typ, slice_contents, - Some(slice), + Some(&slice), dfg, )?) } else { None }; + + let value_types = slice.flat_numeric_types(); + assert_eq!( + value_types.len(), + slice_size, + "ICE: Value types array must match new slice size" + ); + let result = AcirValue::DynamicArray(AcirDynamicArray { block_id: result_block_id, len: slice_size, + value_types, element_type_sizes, }); @@ -2019,15 +2111,24 @@ impl Context { Some(self.init_element_type_sizes_array( &slice_typ, slice_contents, - Some(new_slice_val), + Some(&new_slice_val), dfg, )?) } else { None }; + + let value_types = new_slice_val.flat_numeric_types(); + assert_eq!( + value_types.len(), + slice_size, + "ICE: Value types array must match new slice size" + ); + let result = AcirValue::DynamicArray(AcirDynamicArray { block_id: result_block_id, len: slice_size, + value_types, element_type_sizes, }); @@ -2089,13 +2190,19 @@ impl Context { /// Maps an ssa value list, for which some values may be references to arrays, by inlining /// the `AcirVar`s corresponding to the contents of each array into the list of `AcirVar`s /// that correspond to other values. - fn flatten_value_list(&mut self, arguments: &[ValueId], dfg: &DataFlowGraph) -> Vec { + fn flatten_value_list( + &mut self, + arguments: &[ValueId], + dfg: &DataFlowGraph, + ) -> Result, InternalError> { let mut acir_vars = Vec::with_capacity(arguments.len()); for value_id in arguments { let value = self.convert_value(*value_id, dfg); - AcirContext::flatten_value(&mut acir_vars, value); + acir_vars.append( + &mut self.acir_context.flatten(value)?.iter().map(|(var, _)| *var).collect(), + ); } - acir_vars + Ok(acir_vars) } /// Convert a Vec into a Vec using the given result ids. diff --git a/compiler/noirc_evaluator/src/ssa/function_builder/mod.rs b/compiler/noirc_evaluator/src/ssa/function_builder/mod.rs index e7e8f00bd42..bf34a47485b 100644 --- a/compiler/noirc_evaluator/src/ssa/function_builder/mod.rs +++ b/compiler/noirc_evaluator/src/ssa/function_builder/mod.rs @@ -18,8 +18,7 @@ use super::{ basic_block::BasicBlock, dfg::{CallStack, InsertInstructionResult}, function::RuntimeType, - instruction::{Endian, InstructionId, Intrinsic}, - types::NumericType, + instruction::{ConstrainError, InstructionId, Intrinsic}, }, ssa_gen::Ssa, }; @@ -116,6 +115,11 @@ impl FunctionBuilder { self.numeric_constant(value.into(), Type::field()) } + /// Insert a numeric constant into the current function of type Type::length_type() + pub(crate) fn length_constant(&mut self, value: impl Into) -> ValueId { + self.numeric_constant(value.into(), Type::length_type()) + } + /// Insert an array constant into the current function with the given element values. pub(crate) fn array_constant(&mut self, elements: im::Vector, typ: Type) -> ValueId { self.current_function.dfg.make_array(elements, typ) @@ -217,6 +221,11 @@ impl FunctionBuilder { operator: BinaryOp, rhs: ValueId, ) -> ValueId { + assert_eq!( + self.type_of_value(lhs), + self.type_of_value(rhs), + "ICE - Binary instruction operands must have the same type" + ); let instruction = Instruction::Binary(Binary { lhs, rhs, operator }); self.insert_instruction(instruction, None).first() } @@ -250,7 +259,7 @@ impl FunctionBuilder { &mut self, lhs: ValueId, rhs: ValueId, - assert_message: Option, + assert_message: Option>, ) { self.insert_instruction(Instruction::Constrain(lhs, rhs, assert_message), None); } @@ -279,108 +288,6 @@ impl FunctionBuilder { self.insert_instruction(Instruction::Call { func, arguments }, Some(result_types)).results() } - /// Insert ssa instructions which computes lhs << rhs by doing lhs*2^rhs - /// and truncate the result to bit_size - pub(crate) fn insert_wrapping_shift_left( - &mut self, - lhs: ValueId, - rhs: ValueId, - bit_size: u32, - ) -> ValueId { - let base = self.field_constant(FieldElement::from(2_u128)); - let typ = self.current_function.dfg.type_of_value(lhs); - let (max_bit, pow) = - if let Some(rhs_constant) = self.current_function.dfg.get_numeric_constant(rhs) { - // Happy case is that we know precisely by how many bits the the integer will - // increase: lhs_bit_size + rhs - let bit_shift_size = rhs_constant.to_u128() as u32; - - let (rhs_bit_size_pow_2, overflows) = 2_u128.overflowing_pow(bit_shift_size); - if overflows { - assert!(bit_size < 128, "ICE - shift left with big integers are not supported"); - if bit_size < 128 { - let zero = self.numeric_constant(FieldElement::zero(), typ); - return InsertInstructionResult::SimplifiedTo(zero).first(); - } - } - let pow = self.numeric_constant(FieldElement::from(rhs_bit_size_pow_2), typ); - - let max_lhs_bits = self.current_function.dfg.get_value_max_num_bits(lhs); - - (max_lhs_bits + bit_shift_size, pow) - } else { - // we use a predicate to nullify the result in case of overflow - let bit_size_var = - self.numeric_constant(FieldElement::from(bit_size as u128), typ.clone()); - let overflow = self.insert_binary(rhs, BinaryOp::Lt, bit_size_var); - let predicate = self.insert_cast(overflow, typ.clone()); - // we can safely cast to unsigned because overflow_checks prevent bit-shift with a negative value - let rhs_unsigned = self.insert_cast(rhs, Type::unsigned(bit_size)); - let pow = self.pow(base, rhs_unsigned); - let pow = self.insert_cast(pow, typ); - (FieldElement::max_num_bits(), self.insert_binary(predicate, BinaryOp::Mul, pow)) - }; - - if max_bit <= bit_size { - self.insert_binary(lhs, BinaryOp::Mul, pow) - } else { - let result = self.insert_binary(lhs, BinaryOp::Mul, pow); - self.insert_truncate(result, bit_size, max_bit) - } - } - - /// Insert ssa instructions which computes lhs >> rhs by doing lhs/2^rhs - pub(crate) fn insert_shift_right( - &mut self, - lhs: ValueId, - rhs: ValueId, - bit_size: u32, - ) -> ValueId { - let base = self.field_constant(FieldElement::from(2_u128)); - // we can safely cast to unsigned because overflow_checks prevent bit-shift with a negative value - let rhs_unsigned = self.insert_cast(rhs, Type::unsigned(bit_size)); - let pow = self.pow(base, rhs_unsigned); - self.insert_binary(lhs, BinaryOp::Div, pow) - } - - /// Computes lhs^rhs via square&multiply, using the bits decomposition of rhs - /// Pseudo-code of the computation: - /// let mut r = 1; - /// let rhs_bits = to_bits(rhs); - /// for i in 1 .. bit_size + 1 { - /// let r_squared = r * r; - /// let b = rhs_bits[bit_size - i]; - /// r = (r_squared * lhs * b) + (1 - b) * r_squared; - /// } - pub(crate) fn pow(&mut self, lhs: ValueId, rhs: ValueId) -> ValueId { - let typ = self.current_function.dfg.type_of_value(rhs); - if let Type::Numeric(NumericType::Unsigned { bit_size }) = typ { - let to_bits = self.import_intrinsic_id(Intrinsic::ToBits(Endian::Little)); - let length = self.field_constant(FieldElement::from(bit_size as i128)); - let result_types = - vec![Type::field(), Type::Array(Rc::new(vec![Type::bool()]), bit_size as usize)]; - let rhs_bits = self.insert_call(to_bits, vec![rhs, length], result_types); - let rhs_bits = rhs_bits[1]; - let one = self.field_constant(FieldElement::one()); - let mut r = one; - for i in 1..bit_size + 1 { - let r_squared = self.insert_binary(r, BinaryOp::Mul, r); - let a = self.insert_binary(r_squared, BinaryOp::Mul, lhs); - let idx = self.field_constant(FieldElement::from((bit_size - i) as i128)); - let b = self.insert_array_get(rhs_bits, idx, Type::bool()); - let not_b = self.insert_not(b); - let b = self.insert_cast(b, Type::field()); - let not_b = self.insert_cast(not_b, Type::field()); - let r1 = self.insert_binary(a, BinaryOp::Mul, b); - let r2 = self.insert_binary(r_squared, BinaryOp::Mul, not_b); - r = self.insert_binary(r1, BinaryOp::Add, r2); - } - r - } else { - unreachable!("Value must be unsigned in power operation"); - } - } - /// Insert an instruction to extract an element from an array pub(crate) fn insert_array_get( &mut self, diff --git a/compiler/noirc_evaluator/src/ssa/ir/instruction.rs b/compiler/noirc_evaluator/src/ssa/ir/instruction.rs index 331a02a6974..0b6c7074e45 100644 --- a/compiler/noirc_evaluator/src/ssa/ir/instruction.rs +++ b/compiler/noirc_evaluator/src/ssa/ir/instruction.rs @@ -36,7 +36,6 @@ pub(crate) type InstructionId = Id; /// of this is println. #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] pub(crate) enum Intrinsic { - Sort, ArrayLen, AssertConstant, SlicePushBack, @@ -57,7 +56,6 @@ pub(crate) enum Intrinsic { impl std::fmt::Display for Intrinsic { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { - Intrinsic::Sort => write!(f, "arraysort"), Intrinsic::ArrayLen => write!(f, "array_len"), Intrinsic::AssertConstant => write!(f, "assert_constant"), Intrinsic::SlicePushBack => write!(f, "slice_push_back"), @@ -90,8 +88,7 @@ impl Intrinsic { // These apply a constraint that the input must fit into a specified number of limbs. Intrinsic::ToBits(_) | Intrinsic::ToRadix(_) => true, - Intrinsic::Sort - | Intrinsic::ArrayLen + Intrinsic::ArrayLen | Intrinsic::SlicePushBack | Intrinsic::SlicePushFront | Intrinsic::SlicePopBack @@ -111,7 +108,6 @@ impl Intrinsic { /// If there is no such intrinsic by that name, None is returned. pub(crate) fn lookup(name: &str) -> Option { match name { - "arraysort" => Some(Intrinsic::Sort), "array_len" => Some(Intrinsic::ArrayLen), "assert_constant" => Some(Intrinsic::AssertConstant), "apply_range_constraint" => Some(Intrinsic::ApplyRangeConstraint), @@ -157,7 +153,7 @@ pub(crate) enum Instruction { Truncate { value: ValueId, bit_size: u32, max_bit_size: u32 }, /// Constrains two values to be equal to one another. - Constrain(ValueId, ValueId, Option), + Constrain(ValueId, ValueId, Option>), /// Range constrain `value` to `max_bit_size` RangeCheck { value: ValueId, max_bit_size: u32, assert_message: Option }, @@ -326,7 +322,17 @@ impl Instruction { max_bit_size: *max_bit_size, }, Instruction::Constrain(lhs, rhs, assert_message) => { - Instruction::Constrain(f(*lhs), f(*rhs), assert_message.clone()) + // Must map the `lhs` and `rhs` first as the value `f` is moved with the closure + let lhs = f(*lhs); + let rhs = f(*rhs); + let assert_message = assert_message.as_ref().map(|error| match error.as_ref() { + ConstrainError::Dynamic(call_instr) => { + let new_instr = call_instr.map_values(f); + Box::new(ConstrainError::Dynamic(new_instr)) + } + _ => error.clone(), + }); + Instruction::Constrain(lhs, rhs, assert_message) } Instruction::Call { func, arguments } => Instruction::Call { func: f(*func), @@ -376,9 +382,14 @@ impl Instruction { | Instruction::Load { address: value } => { f(*value); } - Instruction::Constrain(lhs, rhs, _) => { + Instruction::Constrain(lhs, rhs, assert_error) => { f(*lhs); f(*rhs); + if let Some(error) = assert_error.as_ref() { + if let ConstrainError::Dynamic(call_instr) = error.as_ref() { + call_instr.for_each_value(f); + } + } } Instruction::Store { address, value } => { @@ -425,9 +436,10 @@ impl Instruction { // Limit optimizing ! on constants to only booleans. If we tried it on fields, // there is no Not on FieldElement, so we'd need to convert between u128. This // would be incorrect however since the extra bits on the field would not be flipped. - Value::NumericConstant { constant, typ } if *typ == Type::bool() => { - let value = constant.is_zero() as u128; - SimplifiedTo(dfg.make_constant(value.into(), Type::bool())) + Value::NumericConstant { constant, typ } if typ.is_unsigned() => { + // As we're casting to a `u128`, we need to clear out any upper bits that the NOT fills. + let value = !constant.to_u128() % (1 << typ.bit_size()); + SimplifiedTo(dfg.make_constant(value.into(), typ.clone())) } Value::Instruction { instruction, .. } => { // !!v => v @@ -441,7 +453,7 @@ impl Instruction { } } Instruction::Constrain(lhs, rhs, msg) => { - let constraints = decompose_constrain(*lhs, *rhs, msg.clone(), dfg); + let constraints = decompose_constrain(*lhs, *rhs, msg, dfg); if constraints.is_empty() { Remove } else { @@ -475,6 +487,9 @@ impl Instruction { None } Instruction::Truncate { value, bit_size, max_bit_size } => { + if bit_size == max_bit_size { + return SimplifiedTo(*value); + } if let Some((numeric_constant, typ)) = dfg.get_numeric_constant_with_type(*value) { let integer_modulus = 2_u128.pow(*bit_size); let truncated = numeric_constant.to_u128() % integer_modulus; @@ -551,6 +566,28 @@ impl Instruction { } } +#[derive(Debug, PartialEq, Eq, Hash, Clone)] +pub(crate) enum ConstrainError { + // These are errors which have been hardcoded during SSA gen + Static(String), + // These are errors which come from runtime expressions specified by a Noir program + // We store an `Instruction` as we want this Instruction to be atomic in SSA with + // a constrain instruction, and leave codegen of this instruction to lower level passes. + Dynamic(Instruction), +} + +impl From for ConstrainError { + fn from(value: String) -> Self { + ConstrainError::Static(value) + } +} + +impl From for Box { + fn from(value: String) -> Self { + Box::new(value.into()) + } +} + /// The possible return values for Instruction::return_types pub(crate) enum InstructionResultType { /// The result type of this instruction matches that of this operand diff --git a/compiler/noirc_evaluator/src/ssa/ir/instruction/binary.rs b/compiler/noirc_evaluator/src/ssa/ir/instruction/binary.rs index 1cb32d94148..36f3ae8620b 100644 --- a/compiler/noirc_evaluator/src/ssa/ir/instruction/binary.rs +++ b/compiler/noirc_evaluator/src/ssa/ir/instruction/binary.rs @@ -38,6 +38,10 @@ pub(crate) enum BinaryOp { Or, /// Bitwise xor (^) Xor, + /// Bitshift left (<<) + Shl, + /// Bitshift right (>>) + Shr, } impl std::fmt::Display for BinaryOp { @@ -53,6 +57,8 @@ impl std::fmt::Display for BinaryOp { BinaryOp::And => write!(f, "and"), BinaryOp::Or => write!(f, "or"), BinaryOp::Xor => write!(f, "xor"), + BinaryOp::Shl => write!(f, "shl"), + BinaryOp::Shr => write!(f, "shr"), } } } @@ -135,6 +141,23 @@ impl Binary { let zero = dfg.make_constant(FieldElement::zero(), operand_type); return SimplifyResult::SimplifiedTo(zero); } + if operand_type.is_unsigned() { + // lhs % 2**bit_size is equivalent to truncating `lhs` to `bit_size` bits. + // We then convert to a truncation for consistency, allowing more optimizations. + if let Some(modulus) = rhs { + let modulus = modulus.to_u128(); + if modulus.is_power_of_two() { + let bit_size = modulus.ilog2(); + return SimplifyResult::SimplifiedToInstruction( + Instruction::Truncate { + value: self.lhs, + bit_size, + max_bit_size: operand_type.bit_size(), + }, + ); + } + } + } } BinaryOp::Eq => { if dfg.resolve(self.lhs) == dfg.resolve(self.rhs) { @@ -191,6 +214,32 @@ impl Binary { let instruction = Instruction::binary(BinaryOp::Mul, self.lhs, self.rhs); return SimplifyResult::SimplifiedToInstruction(instruction); } + if operand_type.is_unsigned() { + // It's common in other programming languages to truncate values to a certain bit size using + // a bitwise AND with a bit mask. However this operation is quite inefficient inside a snark. + // + // We then replace this bitwise operation with an equivalent truncation instruction. + match (lhs, rhs) { + (Some(bitmask), None) | (None, Some(bitmask)) => { + // This substitution requires the bitmask to retain all of the lower bits. + // The bitmask must then be one less than a power of 2. + let bitmask_plus_one = bitmask.to_u128() + 1; + if bitmask_plus_one.is_power_of_two() { + let value = if lhs.is_some() { self.rhs } else { self.lhs }; + let num_bits = bitmask_plus_one.ilog2(); + return SimplifyResult::SimplifiedToInstruction( + Instruction::Truncate { + value, + bit_size: num_bits, + max_bit_size: operand_type.bit_size(), + }, + ); + } + } + + _ => (), + } + } } BinaryOp::Or => { if lhs_is_zero { @@ -215,7 +264,27 @@ impl Binary { return SimplifyResult::SimplifiedTo(zero); } } - } + BinaryOp::Shl => return SimplifyResult::None, + BinaryOp::Shr => { + // Bit shifts by constants can be treated as divisions. + if let Some(rhs_const) = rhs { + if rhs_const >= FieldElement::from(operand_type.bit_size() as u128) { + // Shifting by the full width of the operand type, any `lhs` goes to zero. + let zero = dfg.make_constant(FieldElement::zero(), operand_type); + return SimplifyResult::SimplifiedTo(zero); + } + + // `two_pow_rhs` is limited to be at most `2 ^ {operand_bitsize - 1}` so it fits in `operand_type`. + let two_pow_rhs = FieldElement::from(2u128).pow(&rhs_const); + let two_pow_rhs = dfg.make_constant(two_pow_rhs, operand_type); + return SimplifyResult::SimplifiedToInstruction(Instruction::binary( + BinaryOp::Div, + self.lhs, + two_pow_rhs, + )); + } + } + }; SimplifyResult::None } } @@ -314,6 +383,8 @@ impl BinaryOp { BinaryOp::And => None, BinaryOp::Or => None, BinaryOp::Xor => None, + BinaryOp::Shl => None, + BinaryOp::Shr => None, } } @@ -329,6 +400,8 @@ impl BinaryOp { BinaryOp::Xor => |x, y| Some(x ^ y), BinaryOp::Eq => |x, y| Some((x == y) as u128), BinaryOp::Lt => |x, y| Some((x < y) as u128), + BinaryOp::Shl => |x, y| Some(x << y), + BinaryOp::Shr => |x, y| Some(x >> y), } } @@ -344,6 +417,8 @@ impl BinaryOp { BinaryOp::Xor => |x, y| Some(x ^ y), BinaryOp::Eq => |x, y| Some((x == y) as i128), BinaryOp::Lt => |x, y| Some((x < y) as i128), + BinaryOp::Shl => |x, y| Some(x << y), + BinaryOp::Shr => |x, y| Some(x >> y), } } } diff --git a/compiler/noirc_evaluator/src/ssa/ir/instruction/call.rs b/compiler/noirc_evaluator/src/ssa/ir/instruction/call.rs index 0178ae9dba1..9349d58c4d9 100644 --- a/compiler/noirc_evaluator/src/ssa/ir/instruction/call.rs +++ b/compiler/noirc_evaluator/src/ssa/ir/instruction/call.rs @@ -1,3 +1,4 @@ +use fxhash::FxHashMap as HashMap; use std::{collections::VecDeque, rc::Rc}; use acvm::{acir::BlackBoxFunc, BlackBoxResolutionError, FieldElement}; @@ -76,7 +77,7 @@ pub(super) fn simplify_call( Intrinsic::ArrayLen => { if let Some(length) = dfg.try_get_array_length(arguments[0]) { let length = FieldElement::from(length as u128); - SimplifyResult::SimplifiedTo(dfg.make_constant(length, Type::field())) + SimplifyResult::SimplifiedTo(dfg.make_constant(length, Type::length_type())) } else if matches!(dfg.type_of_value(arguments[1]), Type::Slice(_)) { SimplifyResult::SimplifiedTo(arguments[0]) } else { @@ -238,7 +239,6 @@ pub(super) fn simplify_call( } } Intrinsic::BlackBox(bb_func) => simplify_black_box_func(bb_func, arguments, dfg), - Intrinsic::Sort => simplify_sort(dfg, arguments), Intrinsic::AsField => { let instruction = Instruction::Cast( arguments[0], @@ -283,7 +283,7 @@ fn update_slice_length( operator: BinaryOp, block: BasicBlockId, ) -> ValueId { - let one = dfg.make_constant(FieldElement::one(), Type::field()); + let one = dfg.make_constant(FieldElement::one(), Type::length_type()); let instruction = Instruction::Binary(Binary { lhs: slice_len, operator, rhs: one }); let call_stack = dfg.get_value_call_stack(slice_len); dfg.insert_instruction_and_results(instruction, block, None, call_stack).first() @@ -296,8 +296,8 @@ fn simplify_slice_push_back( dfg: &mut DataFlowGraph, block: BasicBlockId, ) -> SimplifyResult { - // The capacity must be an integer so that we can compare it against the slice length which is represented as a field - let capacity = dfg.make_constant((slice.len() as u128).into(), Type::unsigned(64)); + // The capacity must be an integer so that we can compare it against the slice length + let capacity = dfg.make_constant((slice.len() as u128).into(), Type::length_type()); let len_equals_capacity_instr = Instruction::Binary(Binary { lhs: arguments[0], operator: BinaryOp::Eq, rhs: capacity }); let call_stack = dfg.get_value_call_stack(arguments[0]); @@ -319,6 +319,8 @@ fn simplify_slice_push_back( for elem in &arguments[2..] { slice.push_back(*elem); } + let slice_size = slice.len(); + let element_size = element_type.element_size(); let new_slice = dfg.make_array(slice, element_type); let set_last_slice_value_instr = @@ -327,7 +329,11 @@ fn simplify_slice_push_back( .insert_instruction_and_results(set_last_slice_value_instr, block, None, call_stack) .first(); - let mut value_merger = ValueMerger::new(dfg, block, None, None); + let mut slice_sizes = HashMap::default(); + slice_sizes.insert(set_last_slice_value, slice_size / element_size); + slice_sizes.insert(new_slice, slice_size / element_size); + + let mut value_merger = ValueMerger::new(dfg, block, &mut slice_sizes); let new_slice = value_merger.merge_values( len_not_equals_capacity, len_equals_capacity, @@ -356,7 +362,7 @@ fn simplify_slice_pop_back( let new_slice_length = update_slice_length(arguments[0], dfg, BinaryOp::Sub, block); - let element_size = dfg.make_constant((element_count as u128).into(), Type::field()); + let element_size = dfg.make_constant((element_count as u128).into(), Type::length_type()); let flattened_len_instr = Instruction::binary(BinaryOp::Mul, arguments[0], element_size); let mut flattened_len = dfg .insert_instruction_and_results(flattened_len_instr, block, None, CallStack::new()) @@ -434,7 +440,7 @@ fn simplify_black_box_func( SimplifyResult::None } BlackBoxFunc::BigIntAdd - | BlackBoxFunc::BigIntNeg + | BlackBoxFunc::BigIntSub | BlackBoxFunc::BigIntMul | BlackBoxFunc::BigIntDiv | BlackBoxFunc::RecursiveAggregation @@ -472,7 +478,7 @@ fn make_constant_slice( let typ = Type::Slice(Rc::new(vec![typ])); let length = FieldElement::from(result_constants.len() as u128); - (dfg.make_constant(length, Type::field()), dfg.make_array(result_constants.into(), typ)) + (dfg.make_constant(length, Type::length_type()), dfg.make_array(result_constants.into(), typ)) } /// Returns a slice (represented by a tuple (len, slice)) of constants corresponding to the limbs of the radix decomposition. @@ -584,20 +590,3 @@ fn simplify_signature( _ => SimplifyResult::None, } } - -fn simplify_sort(dfg: &mut DataFlowGraph, arguments: &[ValueId]) -> SimplifyResult { - match dfg.get_array_constant(arguments[0]) { - Some((input, _)) => { - let inputs: Option> = - input.iter().map(|id| dfg.get_numeric_constant(*id)).collect(); - - let Some(mut sorted_inputs) = inputs else { return SimplifyResult::None }; - sorted_inputs.sort_unstable(); - - let (_, element_type) = dfg.get_numeric_constant_with_type(input[0]).unwrap(); - let result_array = make_constant_array(dfg, sorted_inputs, element_type); - SimplifyResult::SimplifiedTo(result_array) - } - _ => SimplifyResult::None, - } -} diff --git a/compiler/noirc_evaluator/src/ssa/ir/instruction/constrain.rs b/compiler/noirc_evaluator/src/ssa/ir/instruction/constrain.rs index 7fb0970c834..b4198e2cfec 100644 --- a/compiler/noirc_evaluator/src/ssa/ir/instruction/constrain.rs +++ b/compiler/noirc_evaluator/src/ssa/ir/instruction/constrain.rs @@ -1,13 +1,13 @@ use acvm::FieldElement; -use super::{Binary, BinaryOp, DataFlowGraph, Instruction, Type, Value, ValueId}; +use super::{Binary, BinaryOp, ConstrainError, DataFlowGraph, Instruction, Type, Value, ValueId}; /// Try to decompose this constrain instruction. This constraint will be broken down such that it instead constrains /// all the values which are used to compute the values which were being constrained. pub(super) fn decompose_constrain( lhs: ValueId, rhs: ValueId, - msg: Option, + msg: &Option>, dfg: &mut DataFlowGraph, ) -> Vec { let lhs = dfg.resolve(lhs); @@ -39,7 +39,7 @@ pub(super) fn decompose_constrain( // Note that this doesn't remove the value `v2` as it may be used in other instructions, but it // will likely be removed through dead instruction elimination. - vec![Instruction::Constrain(lhs, rhs, msg)] + vec![Instruction::Constrain(lhs, rhs, msg.clone())] } Instruction::Binary(Binary { lhs, rhs, operator: BinaryOp::Mul }) @@ -64,7 +64,7 @@ pub(super) fn decompose_constrain( let one = dfg.make_constant(one, Type::bool()); [ - decompose_constrain(lhs, one, msg.clone(), dfg), + decompose_constrain(lhs, one, msg, dfg), decompose_constrain(rhs, one, msg, dfg), ] .concat() @@ -92,7 +92,7 @@ pub(super) fn decompose_constrain( let zero = dfg.make_constant(zero, dfg.type_of_value(lhs)); [ - decompose_constrain(lhs, zero, msg.clone(), dfg), + decompose_constrain(lhs, zero, msg, dfg), decompose_constrain(rhs, zero, msg, dfg), ] .concat() @@ -116,11 +116,28 @@ pub(super) fn decompose_constrain( decompose_constrain(value, reversed_constant, msg, dfg) } - _ => vec![Instruction::Constrain(lhs, rhs, msg)], + _ => vec![Instruction::Constrain(lhs, rhs, msg.clone())], } } - _ => vec![Instruction::Constrain(lhs, rhs, msg)], + ( + Value::Instruction { instruction: instruction_lhs, .. }, + Value::Instruction { instruction: instruction_rhs, .. }, + ) => { + match (&dfg[*instruction_lhs], &dfg[*instruction_rhs]) { + // Casting two values just to enforce an equality on them. + // + // This is equivalent to enforcing equality on the original values. + (Instruction::Cast(original_lhs, _), Instruction::Cast(original_rhs, _)) + if dfg.type_of_value(*original_lhs) == dfg.type_of_value(*original_rhs) => + { + vec![Instruction::Constrain(*original_lhs, *original_rhs, msg.clone())] + } + + _ => vec![Instruction::Constrain(lhs, rhs, msg.clone())], + } + } + _ => vec![Instruction::Constrain(lhs, rhs, msg.clone())], } } } diff --git a/compiler/noirc_evaluator/src/ssa/ir/printer.rs b/compiler/noirc_evaluator/src/ssa/ir/printer.rs index 2899b987c1d..9bd43fab1ff 100644 --- a/compiler/noirc_evaluator/src/ssa/ir/printer.rs +++ b/compiler/noirc_evaluator/src/ssa/ir/printer.rs @@ -9,7 +9,7 @@ use iter_extended::vecmap; use super::{ basic_block::BasicBlockId, function::Function, - instruction::{Instruction, InstructionId, TerminatorInstruction}, + instruction::{ConstrainError, Instruction, InstructionId, TerminatorInstruction}, value::ValueId, }; @@ -133,9 +133,17 @@ pub(crate) fn display_instruction( write!(f, "{} = ", value_list(function, results))?; } + display_instruction_inner(function, &function.dfg[instruction], f) +} + +fn display_instruction_inner( + function: &Function, + instruction: &Instruction, + f: &mut Formatter, +) -> Result { let show = |id| value(function, id); - match &function.dfg[instruction] { + match instruction { Instruction::Binary(binary) => { writeln!(f, "{} {}, {}", binary.operator, show(binary.lhs), show(binary.rhs)) } @@ -145,10 +153,15 @@ pub(crate) fn display_instruction( let value = show(*value); writeln!(f, "truncate {value} to {bit_size} bits, max_bit_size: {max_bit_size}",) } - Instruction::Constrain(lhs, rhs, message) => match message { - Some(message) => writeln!(f, "constrain {} == {} '{message}'", show(*lhs), show(*rhs)), - None => writeln!(f, "constrain {} == {}", show(*lhs), show(*rhs)), - }, + Instruction::Constrain(lhs, rhs, error) => { + write!(f, "constrain {} == {}", show(*lhs), show(*rhs))?; + if let Some(error) = error { + write!(f, " ")?; + display_constrain_error(function, error, f) + } else { + writeln!(f) + } + } Instruction::Call { func, arguments } => { writeln!(f, "call {}({})", show(*func), value_list(function, arguments)) } @@ -180,3 +193,18 @@ pub(crate) fn display_instruction( } } } + +fn display_constrain_error( + function: &Function, + error: &ConstrainError, + f: &mut Formatter, +) -> Result { + match error { + ConstrainError::Static(assert_message_string) => { + writeln!(f, "{assert_message_string:?}") + } + ConstrainError::Dynamic(assert_message_call) => { + display_instruction_inner(function, assert_message_call, f) + } + } +} diff --git a/compiler/noirc_evaluator/src/ssa/ir/types.rs b/compiler/noirc_evaluator/src/ssa/ir/types.rs index f412def1e76..ea3f5393245 100644 --- a/compiler/noirc_evaluator/src/ssa/ir/types.rs +++ b/compiler/noirc_evaluator/src/ssa/ir/types.rs @@ -90,6 +90,11 @@ impl Type { Type::Numeric(NumericType::NativeField) } + /// Creates the type of an array's length. + pub(crate) fn length_type() -> Type { + Type::unsigned(64) + } + /// Returns the bit size of the provided numeric type. /// /// # Panics @@ -120,7 +125,7 @@ impl Type { } Type::Slice(_) => true, Type::Numeric(_) => false, - Type::Reference(_) => false, + Type::Reference(element) => element.contains_slice_element(), Type::Function => false, } } diff --git a/compiler/noirc_evaluator/src/ssa/opt/bubble_up_constrains.rs b/compiler/noirc_evaluator/src/ssa/opt/bubble_up_constrains.rs index 8a903cbd87b..b737c51d145 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/bubble_up_constrains.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/bubble_up_constrains.rs @@ -109,11 +109,11 @@ mod test { let v1 = builder.insert_binary(v0, BinaryOp::Add, one); let v2 = builder.insert_binary(v1, BinaryOp::Add, one); - builder.insert_constrain(v0, one, Some("With message".to_string())); + builder.insert_constrain(v0, one, Some("With message".to_string().into())); builder.insert_constrain(v2, three, None); builder.insert_constrain(v0, one, None); builder.insert_constrain(v1, two, None); - builder.insert_constrain(v1, two, Some("With message".to_string())); + builder.insert_constrain(v1, two, Some("With message".to_string().into())); builder.terminate_with_return(vec![]); let ssa = builder.finish(); @@ -137,11 +137,11 @@ mod test { assert_eq!(block.instructions().len(), 7); let expected_instructions = vec![ - Instruction::Constrain(v0, one, Some("With message".to_string())), + Instruction::Constrain(v0, one, Some("With message".to_string().into())), Instruction::Constrain(v0, one, None), Instruction::Binary(Binary { lhs: v0, rhs: one, operator: BinaryOp::Add }), Instruction::Constrain(v1, two, None), - Instruction::Constrain(v1, two, Some("With message".to_string())), + Instruction::Constrain(v1, two, Some("With message".to_string().into())), Instruction::Binary(Binary { lhs: v1, rhs: one, operator: BinaryOp::Add }), Instruction::Constrain(v2, three, None), ]; diff --git a/compiler/noirc_evaluator/src/ssa/opt/constant_folding.rs b/compiler/noirc_evaluator/src/ssa/opt/constant_folding.rs index addaee3ba8d..06ae4bf5202 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/constant_folding.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/constant_folding.rs @@ -22,6 +22,7 @@ //! different blocks are merged, i.e. after the [`flatten_cfg`][super::flatten_cfg] pass. use std::collections::HashSet; +use acvm::FieldElement; use iter_extended::vecmap; use crate::ssa::{ @@ -30,7 +31,8 @@ use crate::ssa::{ dfg::{DataFlowGraph, InsertInstructionResult}, function::Function, instruction::{Instruction, InstructionId}, - value::ValueId, + types::Type, + value::{Value, ValueId}, }, ssa_gen::Ssa, }; @@ -43,7 +45,20 @@ impl Ssa { #[tracing::instrument(level = "trace", skip(self))] pub(crate) fn fold_constants(mut self) -> Ssa { for function in self.functions.values_mut() { - constant_fold(function); + constant_fold(function, false); + } + self + } + + /// Performs constant folding on each instruction. + /// + /// Also uses constraint information to inform more optimizations. + /// + /// See [`constant_folding`][self] module for more information. + #[tracing::instrument(level = "trace", skip(self))] + pub(crate) fn fold_constants_using_constraints(mut self) -> Ssa { + for function in self.functions.values_mut() { + constant_fold(function, true); } self } @@ -51,8 +66,8 @@ impl Ssa { /// The structure of this pass is simple: /// Go through each block and re-insert all instructions. -fn constant_fold(function: &mut Function) { - let mut context = Context::default(); +fn constant_fold(function: &mut Function, use_constraint_info: bool) { + let mut context = Context { use_constraint_info, ..Default::default() }; context.block_queue.push(function.entry_block()); while let Some(block) = context.block_queue.pop() { @@ -67,6 +82,7 @@ fn constant_fold(function: &mut Function) { #[derive(Default)] struct Context { + use_constraint_info: bool, /// Maps pre-folded ValueIds to the new ValueIds obtained by re-inserting the instruction. visited_blocks: HashSet, block_queue: Vec, @@ -79,24 +95,43 @@ impl Context { // Cache of instructions without any side-effects along with their outputs. let mut cached_instruction_results: HashMap> = HashMap::default(); + // Contains sets of values which are constrained to be equivalent to each other. + // + // The mapping's structure is `side_effects_enabled_var => (constrained_value => simplified_value)`. + // + // We partition the maps of constrained values according to the side-effects flag at the point + // at which the values are constrained. This prevents constraints which are only sometimes enforced + // being used to modify the rest of the program. + let mut constraint_simplification_mappings: HashMap> = + HashMap::default(); + let mut side_effects_enabled_var = + function.dfg.make_constant(FieldElement::one(), Type::bool()); + for instruction_id in instructions { - Self::fold_constants_into_instruction( + self.fold_constants_into_instruction( &mut function.dfg, block, instruction_id, &mut cached_instruction_results, + &mut constraint_simplification_mappings, + &mut side_effects_enabled_var, ); } self.block_queue.extend(function.dfg[block].successors()); } fn fold_constants_into_instruction( + &self, dfg: &mut DataFlowGraph, block: BasicBlockId, id: InstructionId, instruction_result_cache: &mut HashMap>, + constraint_simplification_mappings: &mut HashMap>, + side_effects_enabled_var: &mut ValueId, ) { - let instruction = Self::resolve_instruction(id, dfg); + let constraint_simplification_mapping = + constraint_simplification_mappings.entry(*side_effects_enabled_var).or_default(); + let instruction = Self::resolve_instruction(id, dfg, constraint_simplification_mapping); let old_results = dfg.instruction_results(id).to_vec(); // If a copy of this instruction exists earlier in the block, then reuse the previous results. @@ -110,15 +145,49 @@ impl Context { Self::replace_result_ids(dfg, &old_results, &new_results); - Self::cache_instruction(instruction, new_results, dfg, instruction_result_cache); + self.cache_instruction( + instruction.clone(), + new_results, + dfg, + instruction_result_cache, + constraint_simplification_mapping, + ); + + // If we just inserted an `Instruction::EnableSideEffects`, we need to update `side_effects_enabled_var` + // so that we use the correct set of constrained values in future. + if let Instruction::EnableSideEffects { condition } = instruction { + *side_effects_enabled_var = condition; + }; } /// Fetches an [`Instruction`] by its [`InstructionId`] and fully resolves its inputs. - fn resolve_instruction(instruction_id: InstructionId, dfg: &DataFlowGraph) -> Instruction { + fn resolve_instruction( + instruction_id: InstructionId, + dfg: &DataFlowGraph, + constraint_simplification_mapping: &HashMap, + ) -> Instruction { let instruction = dfg[instruction_id].clone(); + // Alternate between resolving `value_id` in the `dfg` and checking to see if the resolved value + // has been constrained to be equal to some simpler value in the current block. + // + // This allows us to reach a stable final `ValueId` for each instruction input as we add more + // constraints to the cache. + fn resolve_cache( + dfg: &DataFlowGraph, + cache: &HashMap, + value_id: ValueId, + ) -> ValueId { + let resolved_id = dfg.resolve(value_id); + match cache.get(&resolved_id) { + Some(cached_value) => resolve_cache(dfg, cache, *cached_value), + None => resolved_id, + } + } + // Resolve any inputs to ensure that we're comparing like-for-like instructions. - instruction.map_values(|value_id| dfg.resolve(value_id)) + instruction + .map_values(|value_id| resolve_cache(dfg, constraint_simplification_mapping, value_id)) } /// Pushes a new [`Instruction`] into the [`DataFlowGraph`] which applies any optimizations @@ -152,11 +221,42 @@ impl Context { } fn cache_instruction( + &self, instruction: Instruction, instruction_results: Vec, dfg: &DataFlowGraph, instruction_result_cache: &mut HashMap>, + constraint_simplification_mapping: &mut HashMap, ) { + if self.use_constraint_info { + // If the instruction was a constraint, then create a link between the two `ValueId`s + // to map from the more complex to the simpler value. + if let Instruction::Constrain(lhs, rhs, _) = instruction { + // These `ValueId`s should be fully resolved now. + match (&dfg[lhs], &dfg[rhs]) { + // Ignore trivial constraints + (Value::NumericConstant { .. }, Value::NumericConstant { .. }) => (), + + // Prefer replacing with constants where possible. + (Value::NumericConstant { .. }, _) => { + constraint_simplification_mapping.insert(rhs, lhs); + } + (_, Value::NumericConstant { .. }) => { + constraint_simplification_mapping.insert(lhs, rhs); + } + // Otherwise prefer block parameters over instruction results. + // This is as block parameters are more likely to be a single witness rather than a full expression. + (Value::Param { .. }, Value::Instruction { .. }) => { + constraint_simplification_mapping.insert(rhs, lhs); + } + (Value::Instruction { .. }, Value::Param { .. }) => { + constraint_simplification_mapping.insert(lhs, rhs); + } + (_, _) => (), + } + } + } + // If the instruction doesn't have side-effects, cache the results so we can reuse them if // the same instruction appears again later in the block. if instruction.is_pure(dfg) { @@ -336,9 +436,9 @@ mod test { // // fn main f0 { // b0(v0: u16, Field 255: Field): - // v5 = div v0, Field 255 - // v6 = truncate v5 to 8 bits, max_bit_size: 16 - // return v6 + // v6 = div v0, Field 255 + // v7 = truncate v6 to 8 bits, max_bit_size: 16 + // return v7 // } main.dfg.set_value_from_id(v1, constant); @@ -354,7 +454,7 @@ mod test { ); assert_eq!( &main.dfg[instructions[1]], - &Instruction::Truncate { value: ValueId::test_new(5), bit_size: 8, max_bit_size: 16 } + &Instruction::Truncate { value: ValueId::test_new(6), bit_size: 8, max_bit_size: 16 } ); } diff --git a/compiler/noirc_evaluator/src/ssa/opt/defunctionalize.rs b/compiler/noirc_evaluator/src/ssa/opt/defunctionalize.rs index b7f154397a6..1f09a132132 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/defunctionalize.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/defunctionalize.rs @@ -14,7 +14,7 @@ use crate::ssa::{ ir::{ basic_block::BasicBlockId, function::{Function, FunctionId, RuntimeType, Signature}, - instruction::{BinaryOp, Instruction}, + instruction::{BinaryOp, ConstrainError, Instruction}, types::{NumericType, Type}, value::{Value, ValueId}, }, @@ -86,9 +86,22 @@ impl DefunctionalizationContext { let instruction = func.dfg[instruction_id].clone(); let mut replacement_instruction = None; // Operate on call instructions - let (target_func_id, mut arguments) = match instruction { + let (target_func_id, arguments) = match &instruction { Instruction::Call { func: target_func_id, arguments } => { - (target_func_id, arguments) + (*target_func_id, arguments) + } + // Constrain instruction potentially hold a call instruction themselves + // thus we need to account for them. + Instruction::Constrain(_, _, Some(constrain_error)) => { + if let ConstrainError::Dynamic(Instruction::Call { + func: target_func_id, + arguments, + }) = constrain_error.as_ref() + { + (*target_func_id, arguments) + } else { + continue; + } } _ => continue, }; @@ -96,6 +109,7 @@ impl DefunctionalizationContext { match func.dfg[target_func_id] { // If the target is a function used as value Value::Param { .. } | Value::Instruction { .. } => { + let mut arguments = arguments.clone(); let results = func.dfg.instruction_results(instruction_id); let signature = Signature { params: vecmap(&arguments, |param| func.dfg.type_of_value(*param)), @@ -120,7 +134,20 @@ impl DefunctionalizationContext { } _ => {} } - if let Some(new_instruction) = replacement_instruction { + if let Some(mut new_instruction) = replacement_instruction { + if let Instruction::Constrain(lhs, rhs, constrain_error_call) = instruction { + let new_error_call = if let Some(error) = constrain_error_call { + match error.as_ref() { + ConstrainError::Dynamic(_) => { + Some(Box::new(ConstrainError::Dynamic(new_instruction))) + } + _ => None, + } + } else { + None + }; + new_instruction = Instruction::Constrain(lhs, rhs, new_error_call); + } func.dfg[instruction_id] = new_instruction; } } diff --git a/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg.rs b/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg.rs index 1059994b9be..943a57c1bc0 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg.rs @@ -152,8 +152,10 @@ use crate::ssa::{ }; mod branch_analysis; +mod capacity_tracker; pub(crate) mod value_merger; +use capacity_tracker::SliceCapacityTracker; use value_merger::ValueMerger; impl Ssa { @@ -184,17 +186,6 @@ struct Context<'f> { /// between inlining of branches. store_values: HashMap, - /// Maps an address to the old and new value of the element at that address - /// The difference between this map and store_values is that this stores - /// the old and new value of an element from the outer block whose jmpif - /// terminator is being flattened. - /// - /// This map persists throughout the flattening process, where addresses - /// are overwritten as new stores are found. This overwriting is the desired behavior, - /// as we want the most update to date value to be stored at a given address as - /// we walk through blocks to flatten. - outer_block_stores: HashMap, - /// Stores all allocations local to the current branch. /// Since these branches are local to the current branch (ie. only defined within one branch of /// an if expression), they should not be merged with their previous value or stored value in @@ -209,6 +200,10 @@ struct Context<'f> { /// condition. If we are under multiple conditions (a nested if), the topmost condition is /// the most recent condition combined with all previous conditions via `And` instructions. conditions: Vec<(BasicBlockId, ValueId)>, + + /// Maps SSA array values with a slice type to their size. + /// This is maintained by appropriate calls to the `SliceCapacityTracker` and is used by the `ValueMerger`. + slice_sizes: HashMap, } pub(crate) struct Store { @@ -239,7 +234,7 @@ fn flatten_function_cfg(function: &mut Function) { local_allocations: HashSet::new(), branch_ends, conditions: Vec::new(), - outer_block_stores: HashMap::default(), + slice_sizes: HashMap::default(), }; context.flatten(); } @@ -262,21 +257,18 @@ impl<'f> Context<'f> { /// Returns the last block to be inlined. This is either the return block of the function or, /// if self.conditions is not empty, the end block of the most recent condition. fn handle_terminator(&mut self, block: BasicBlockId) -> BasicBlockId { - if let TerminatorInstruction::JmpIf { .. } = - self.inserter.function.dfg[block].unwrap_terminator() - { - // Find stores in the outer block and insert into the `outer_block_stores` map. - // Not using this map can lead to issues when attempting to merge slices. - // When inlining a branch end, only the then branch and the else branch are checked for stores. - // However, there are cases where we want to load a value that comes from the outer block - // that we are handling the terminator for here. - let instructions = self.inserter.function.dfg[block].instructions().to_vec(); - for instruction in instructions { - let (instruction, _) = self.inserter.map_instruction(instruction); - if let Instruction::Store { address, value } = instruction { - self.outer_block_stores.insert(address, value); - } - } + // As we recursively flatten inner blocks, we need to track the slice information + // for the outer block before we start recursively inlining + let outer_block_instructions = self.inserter.function.dfg[block].instructions(); + let mut capacity_tracker = SliceCapacityTracker::new(&self.inserter.function.dfg); + for instruction in outer_block_instructions { + let results = self.inserter.function.dfg.instruction_results(*instruction); + let instruction = &self.inserter.function.dfg[*instruction]; + capacity_tracker.collect_slice_information( + instruction, + &mut self.slice_sizes, + results.to_vec(), + ); } match self.inserter.function.dfg[block].unwrap_terminator() { @@ -494,12 +486,16 @@ impl<'f> Context<'f> { }); let block = self.inserter.function.entry_block(); - let mut value_merger = ValueMerger::new( - &mut self.inserter.function.dfg, - block, - Some(&self.store_values), - Some(&self.outer_block_stores), - ); + + // Make sure we have tracked the slice capacities of any block arguments + let capacity_tracker = SliceCapacityTracker::new(&self.inserter.function.dfg); + for (then_arg, else_arg) in args.iter() { + capacity_tracker.compute_slice_capacity(*then_arg, &mut self.slice_sizes); + capacity_tracker.compute_slice_capacity(*else_arg, &mut self.slice_sizes); + } + + let mut value_merger = + ValueMerger::new(&mut self.inserter.function.dfg, block, &mut self.slice_sizes); // Cannot include this in the previous vecmap since it requires exclusive access to self let args = vecmap(args, |(then_arg, else_arg)| { @@ -538,18 +534,22 @@ impl<'f> Context<'f> { } } + // Most slice information is collected when instructions are inlined. + // We need to collect information on slice values here as we may possibly merge stores + // before any inlining occurs. + let capacity_tracker = SliceCapacityTracker::new(&self.inserter.function.dfg); + for (then_case, else_case, _) in new_map.values() { + capacity_tracker.compute_slice_capacity(*then_case, &mut self.slice_sizes); + capacity_tracker.compute_slice_capacity(*else_case, &mut self.slice_sizes); + } + let then_condition = then_branch.condition; let else_condition = else_branch.condition; let block = self.inserter.function.entry_block(); - let mut value_merger = ValueMerger::new( - &mut self.inserter.function.dfg, - block, - Some(&self.store_values), - Some(&self.outer_block_stores), - ); - + let mut value_merger = + ValueMerger::new(&mut self.inserter.function.dfg, block, &mut self.slice_sizes); // Merging must occur in a separate loop as we cannot borrow `self` as mutable while `value_merger` does let mut new_values = HashMap::default(); for (address, (then_case, else_case, _)) in &new_map { @@ -571,6 +571,16 @@ impl<'f> Context<'f> { .insert(address, Store { old_value: *old_value, new_value: value }); } } + + // Collect any potential slice information on the stores we are merging + for (address, (_, _, _)) in &new_map { + let value = new_values[address]; + let address = *address; + let instruction = Instruction::Store { address, value }; + + let mut capacity_tracker = SliceCapacityTracker::new(&self.inserter.function.dfg); + capacity_tracker.collect_slice_information(&instruction, &mut self.slice_sizes, vec![]); + } } fn remember_store(&mut self, address: ValueId, new_value: ValueId) { @@ -579,8 +589,18 @@ impl<'f> Context<'f> { store_value.new_value = new_value; } else { let load = Instruction::Load { address }; + let load_type = Some(vec![self.inserter.function.dfg.type_of_value(new_value)]); - let old_value = self.insert_instruction_with_typevars(load, load_type).first(); + let old_value = + self.insert_instruction_with_typevars(load.clone(), load_type).first(); + + // Need this or else we will be missing a the previous value of a slice that we wish to merge + let mut capacity_tracker = SliceCapacityTracker::new(&self.inserter.function.dfg); + capacity_tracker.collect_slice_information( + &load, + &mut self.slice_sizes, + vec![old_value], + ); self.store_values.insert(address, Store { old_value, new_value }); } @@ -602,8 +622,15 @@ impl<'f> Context<'f> { // unnecessary, when removing it actually causes an aliasing/mutability error. let instructions = self.inserter.function.dfg[destination].instructions().to_vec(); - for instruction in instructions { - self.push_instruction(instruction); + for instruction in instructions.iter() { + let results = self.push_instruction(*instruction); + let (instruction, _) = self.inserter.map_instruction(*instruction); + let mut capacity_tracker = SliceCapacityTracker::new(&self.inserter.function.dfg); + capacity_tracker.collect_slice_information( + &instruction, + &mut self.slice_sizes, + results, + ); } self.handle_terminator(destination) @@ -615,7 +642,7 @@ impl<'f> Context<'f> { /// As a result, the instruction that will be pushed will actually be a new instruction /// with a different InstructionId from the original. The results of the given instruction /// will also be mapped to the results of the new instruction. - fn push_instruction(&mut self, id: InstructionId) { + fn push_instruction(&mut self, id: InstructionId) -> Vec { let (instruction, call_stack) = self.inserter.map_instruction(id); let instruction = self.handle_instruction_side_effects(instruction, call_stack.clone()); let is_allocate = matches!(instruction, Instruction::Allocate); @@ -628,6 +655,8 @@ impl<'f> Context<'f> { if is_allocate { self.local_allocations.insert(results.first()); } + + results.results().into_owned() } /// If we are currently in a branch, we need to modify constrain instructions diff --git a/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg/capacity_tracker.rs b/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg/capacity_tracker.rs new file mode 100644 index 00000000000..7cd0fe3084e --- /dev/null +++ b/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg/capacity_tracker.rs @@ -0,0 +1,150 @@ +use crate::ssa::ir::{ + dfg::DataFlowGraph, + instruction::{Instruction, Intrinsic}, + types::Type, + value::{Value, ValueId}, +}; + +use fxhash::FxHashMap as HashMap; + +pub(crate) struct SliceCapacityTracker<'a> { + dfg: &'a DataFlowGraph, +} + +impl<'a> SliceCapacityTracker<'a> { + pub(crate) fn new(dfg: &'a DataFlowGraph) -> Self { + SliceCapacityTracker { dfg } + } + + /// Determine how the slice sizes map needs to be updated according to the provided instruction. + pub(crate) fn collect_slice_information( + &mut self, + instruction: &Instruction, + slice_sizes: &mut HashMap, + results: Vec, + ) { + match instruction { + Instruction::ArrayGet { array, .. } => { + let array_typ = self.dfg.type_of_value(*array); + let array_value = &self.dfg[*array]; + if matches!(array_value, Value::Array { .. }) && array_typ.contains_slice_element() + { + // Initial insertion into the slice sizes map + // Any other insertions should only occur if the value is already + // a part of the map. + self.compute_slice_capacity(*array, slice_sizes); + } + } + Instruction::ArraySet { array, value, .. } => { + let array_typ = self.dfg.type_of_value(*array); + let array_value = &self.dfg[*array]; + if matches!(array_value, Value::Array { .. }) && array_typ.contains_slice_element() + { + // Initial insertion into the slice sizes map + // Any other insertions should only occur if the value is already + // a part of the map. + self.compute_slice_capacity(*array, slice_sizes); + } + + let value_typ = self.dfg.type_of_value(*value); + // Compiler sanity check + assert!(!value_typ.contains_slice_element(), "ICE: Nested slices are not allowed and should not have reached the flattening pass of SSA"); + + if let Some(capacity) = slice_sizes.get(array) { + slice_sizes.insert(results[0], *capacity); + } + } + Instruction::Call { func, arguments } => { + let func = &self.dfg[*func]; + if let Value::Intrinsic(intrinsic) = func { + let (argument_index, result_index) = match intrinsic { + Intrinsic::SlicePushBack + | Intrinsic::SlicePushFront + | Intrinsic::SlicePopBack + | Intrinsic::SliceInsert + | Intrinsic::SliceRemove => (1, 1), + // `pop_front` returns the popped element, and then the respective slice. + // This means in the case of a slice with structs, the result index of the popped slice + // will change depending on the number of elements in the struct. + // For example, a slice with four elements will look as such in SSA: + // v3, v4, v5, v6, v7, v8 = call slice_pop_front(v1, v2) + // where v7 is the slice length and v8 is the popped slice itself. + Intrinsic::SlicePopFront => (1, results.len() - 1), + _ => return, + }; + let slice_contents = arguments[argument_index]; + match intrinsic { + Intrinsic::SlicePushBack + | Intrinsic::SlicePushFront + | Intrinsic::SliceInsert => { + for arg in &arguments[(argument_index + 1)..] { + let element_typ = self.dfg.type_of_value(*arg); + if element_typ.contains_slice_element() { + self.compute_slice_capacity(*arg, slice_sizes); + } + } + if let Some(contents_capacity) = slice_sizes.get(&slice_contents) { + let new_capacity = *contents_capacity + 1; + slice_sizes.insert(results[result_index], new_capacity); + } + } + Intrinsic::SlicePopBack + | Intrinsic::SliceRemove + | Intrinsic::SlicePopFront => { + // We do not decrement the size on intrinsics that could remove values from a slice. + // This is because we could potentially go back to the smaller slice and not fill in dummies. + // This pass should be tracking the potential max that a slice ***could be*** + if let Some(contents_capacity) = slice_sizes.get(&slice_contents) { + let new_capacity = *contents_capacity - 1; + slice_sizes.insert(results[result_index], new_capacity); + } + } + _ => {} + } + } + } + Instruction::Store { address, value } => { + let value_typ = self.dfg.type_of_value(*value); + if value_typ.contains_slice_element() { + self.compute_slice_capacity(*value, slice_sizes); + + let value_capacity = slice_sizes.get(value).unwrap_or_else(|| { + panic!("ICE: should have slice capacity set for value {value} being stored at {address}") + }); + + slice_sizes.insert(*address, *value_capacity); + } + } + Instruction::Load { address } => { + let load_typ = self.dfg.type_of_value(*address); + if load_typ.contains_slice_element() { + let result = results[0]; + + let address_capacity = slice_sizes.get(address).unwrap_or_else(|| { + panic!("ICE: should have slice capacity set at address {address} being loaded into {result}") + }); + + slice_sizes.insert(result, *address_capacity); + } + } + _ => {} + } + } + + /// Computes the starting capacity of a slice which is still a `Value::Array` + pub(crate) fn compute_slice_capacity( + &self, + array_id: ValueId, + slice_sizes: &mut HashMap, + ) { + if let Value::Array { array, typ } = &self.dfg[array_id] { + // Compiler sanity check + assert!(!typ.is_nested_slice(), "ICE: Nested slices are not allowed and should not have reached the flattening pass of SSA"); + if let Type::Slice(_) = typ { + let element_size = typ.element_size(); + let len = array.len() / element_size; + slice_sizes.insert(array_id, len); + } + } + } +} diff --git a/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg/value_merger.rs b/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg/value_merger.rs index 446560f45f1..6b923a2e42d 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg/value_merger.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg/value_merger.rs @@ -4,35 +4,26 @@ use fxhash::FxHashMap as HashMap; use crate::ssa::ir::{ basic_block::BasicBlockId, dfg::{CallStack, DataFlowGraph}, - instruction::{BinaryOp, Instruction, Intrinsic}, + instruction::{BinaryOp, Instruction}, types::Type, - value::{Value, ValueId}, + value::ValueId, }; -use crate::ssa::opt::flatten_cfg::Store; - pub(crate) struct ValueMerger<'a> { dfg: &'a mut DataFlowGraph, block: BasicBlockId, - store_values: Option<&'a HashMap>, - outer_block_stores: Option<&'a HashMap>, - slice_sizes: HashMap, + // Maps SSA array values with a slice type to their size. + // This must be computed before merging values. + slice_sizes: &'a mut HashMap, } impl<'a> ValueMerger<'a> { pub(crate) fn new( dfg: &'a mut DataFlowGraph, block: BasicBlockId, - store_values: Option<&'a HashMap>, - outer_block_stores: Option<&'a HashMap>, + slice_sizes: &'a mut HashMap, ) -> Self { - ValueMerger { - dfg, - block, - store_values, - outer_block_stores, - slice_sizes: HashMap::default(), - } + ValueMerger { dfg, block, slice_sizes } } /// Merge two values a and b from separate basic blocks to a single value. @@ -184,11 +175,13 @@ impl<'a> ValueMerger<'a> { _ => panic!("Expected slice type"), }; - let then_len = self.get_slice_length(then_value_id); - self.slice_sizes.insert(then_value_id, then_len); + let then_len = *self.slice_sizes.get(&then_value_id).unwrap_or_else(|| { + panic!("ICE: Merging values during flattening encountered slice {then_value_id} without a preset size"); + }); - let else_len = self.get_slice_length(else_value_id); - self.slice_sizes.insert(else_value_id, else_len); + let else_len = *self.slice_sizes.get(&else_value_id).unwrap_or_else(|| { + panic!("ICE: Merging values during flattening encountered slice {else_value_id} without a preset size"); + }); let len = then_len.max(else_len); @@ -218,8 +211,10 @@ impl<'a> ValueMerger<'a> { } }; - let then_element = get_element(then_value_id, typevars.clone(), then_len); - let else_element = get_element(else_value_id, typevars, else_len); + let then_element = + get_element(then_value_id, typevars.clone(), then_len * element_types.len()); + let else_element = + get_element(else_value_id, typevars, else_len * element_types.len()); merged.push_back(self.merge_values( then_condition, @@ -233,82 +228,6 @@ impl<'a> ValueMerger<'a> { self.dfg.make_array(merged, typ) } - fn get_slice_length(&mut self, value_id: ValueId) -> usize { - let value = &self.dfg[value_id]; - match value { - Value::Array { array, .. } => array.len(), - Value::Instruction { instruction: instruction_id, .. } => { - let instruction = &self.dfg[*instruction_id]; - match instruction { - // TODO(#3188): A slice can be the result of an ArrayGet when it is the - // fetched from a slice of slices or as a struct field. - // However, we need to incorporate nested slice support in flattening - // in order for this to be valid - // Instruction::ArrayGet { array, .. } => {} - Instruction::ArraySet { array, .. } => { - let array = *array; - let len = self.get_slice_length(array); - self.slice_sizes.insert(array, len); - len - } - Instruction::Load { address } => { - let outer_block_stores = self.outer_block_stores.expect("ICE: A map of previous stores is required in order to resolve a slice load"); - let store_values = self.store_values.expect("ICE: A map of previous stores is required in order to resolve a slice load"); - let store_value = outer_block_stores - .get(address) - .expect("ICE: load in merger should have store from outer block"); - - if let Some(len) = self.slice_sizes.get(store_value) { - return *len; - } - - let store_value = if let Some(store) = store_values.get(address) { - if let Some(len) = self.slice_sizes.get(&store.new_value) { - return *len; - } - - store.new_value - } else { - *store_value - }; - - self.get_slice_length(store_value) - } - Instruction::Call { func, arguments } => { - let slice_contents = arguments[1]; - let func = &self.dfg[*func]; - match func { - Value::Intrinsic(intrinsic) => match intrinsic { - Intrinsic::SlicePushBack - | Intrinsic::SlicePushFront - | Intrinsic::SliceInsert => { - // `get_slice_length` needs to be called here as it is borrows self as mutable - let initial_len = self.get_slice_length(slice_contents); - self.slice_sizes.insert(slice_contents, initial_len); - initial_len + 1 - } - Intrinsic::SlicePopBack - | Intrinsic::SlicePopFront - | Intrinsic::SliceRemove => { - // `get_slice_length` needs to be called here as it is borrows self as mutable - let initial_len = self.get_slice_length(slice_contents); - self.slice_sizes.insert(slice_contents, initial_len); - initial_len - 1 - } - _ => { - unreachable!("ICE: Intrinsic not supported, got {intrinsic:?}") - } - }, - _ => unreachable!("ICE: Expected intrinsic value but got {func:?}"), - } - } - _ => unreachable!("ICE: Got unexpected instruction: {instruction:?}"), - } - } - _ => unreachable!("ICE: Got unexpected value when resolving slice length {value:?}"), - } - } - /// Construct a dummy value to be attached to the smaller of two slices being merged. /// We need to make sure we follow the internal element type structure of the slice type /// even for dummy data to ensure that we do not have errors later in the compiler, diff --git a/compiler/noirc_evaluator/src/ssa/opt/mod.rs b/compiler/noirc_evaluator/src/ssa/opt/mod.rs index 71725422a7a..a315695f7db 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/mod.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/mod.rs @@ -12,5 +12,6 @@ mod die; pub(crate) mod flatten_cfg; mod inlining; mod mem2reg; +mod remove_bit_shifts; mod simplify_cfg; mod unrolling; diff --git a/compiler/noirc_evaluator/src/ssa/opt/remove_bit_shifts.rs b/compiler/noirc_evaluator/src/ssa/opt/remove_bit_shifts.rs new file mode 100644 index 00000000000..a71a42d5757 --- /dev/null +++ b/compiler/noirc_evaluator/src/ssa/opt/remove_bit_shifts.rs @@ -0,0 +1,285 @@ +use std::{borrow::Cow, rc::Rc}; + +use acvm::FieldElement; + +use crate::ssa::{ + ir::{ + basic_block::BasicBlockId, + dfg::{CallStack, InsertInstructionResult}, + function::{Function, RuntimeType}, + instruction::{Binary, BinaryOp, Endian, Instruction, InstructionId, Intrinsic}, + types::{NumericType, Type}, + value::ValueId, + }, + ssa_gen::Ssa, +}; + +impl Ssa { + /// Performs constant folding on each instruction. + /// + /// See [`constant_folding`][self] module for more information. + #[tracing::instrument(level = "trace", skip(self))] + pub(crate) fn remove_bit_shifts(mut self) -> Ssa { + remove_bit_shifts(self.main_mut()); + self + } +} + +/// The structure of this pass is simple: +/// Go through each block and re-insert all instructions. +fn remove_bit_shifts(function: &mut Function) { + if let RuntimeType::Brillig = function.runtime() { + return; + } + + let block = function.entry_block(); + let mut context = + Context { function, new_instructions: Vec::new(), block, call_stack: CallStack::default() }; + + context.remove_bit_shifts(); +} + +struct Context<'f> { + function: &'f mut Function, + new_instructions: Vec, + + block: BasicBlockId, + call_stack: CallStack, +} + +impl Context<'_> { + fn remove_bit_shifts(&mut self) { + let instructions = self.function.dfg[self.block].take_instructions(); + + for instruction_id in instructions { + match self.function.dfg[instruction_id] { + Instruction::Binary(Binary { lhs, rhs, operator }) + if matches!(operator, BinaryOp::Shl | BinaryOp::Shr) => + { + self.call_stack = self.function.dfg.get_call_stack(instruction_id).clone(); + let old_result = + *self.function.dfg.instruction_results(instruction_id).first().unwrap(); + + let bit_size = match self.function.dfg.type_of_value(lhs) { + Type::Numeric(NumericType::Signed { bit_size }) + | Type::Numeric(NumericType::Unsigned { bit_size }) => bit_size, + _ => unreachable!("ICE: right-shift attempted on non-integer"), + }; + let new_result = if operator == BinaryOp::Shl { + self.insert_wrapping_shift_left(lhs, rhs, bit_size) + } else { + self.insert_shift_right(lhs, rhs, bit_size) + }; + + self.function.dfg.set_value_from_id(old_result, new_result); + } + _ => { + self.new_instructions.push(instruction_id); + } + }; + } + + *self.function.dfg[self.block].instructions_mut() = + std::mem::take(&mut self.new_instructions); + } + + /// Insert ssa instructions which computes lhs << rhs by doing lhs*2^rhs + /// and truncate the result to bit_size + pub(crate) fn insert_wrapping_shift_left( + &mut self, + lhs: ValueId, + rhs: ValueId, + bit_size: u32, + ) -> ValueId { + let base = self.field_constant(FieldElement::from(2_u128)); + let typ = self.function.dfg.type_of_value(lhs); + let (max_bit, pow) = if let Some(rhs_constant) = self.function.dfg.get_numeric_constant(rhs) + { + // Happy case is that we know precisely by how many bits the the integer will + // increase: lhs_bit_size + rhs + let bit_shift_size = rhs_constant.to_u128() as u32; + + let (rhs_bit_size_pow_2, overflows) = 2_u128.overflowing_pow(bit_shift_size); + if overflows { + assert!(bit_size < 128, "ICE - shift left with big integers are not supported"); + if bit_size < 128 { + let zero = self.numeric_constant(FieldElement::zero(), typ); + return InsertInstructionResult::SimplifiedTo(zero).first(); + } + } + let pow = self.numeric_constant(FieldElement::from(rhs_bit_size_pow_2), typ); + + let max_lhs_bits = self.function.dfg.get_value_max_num_bits(lhs); + + (max_lhs_bits + bit_shift_size, pow) + } else { + // we use a predicate to nullify the result in case of overflow + let bit_size_var = + self.numeric_constant(FieldElement::from(bit_size as u128), typ.clone()); + let overflow = self.insert_binary(rhs, BinaryOp::Lt, bit_size_var); + let predicate = self.insert_cast(overflow, typ.clone()); + // we can safely cast to unsigned because overflow_checks prevent bit-shift with a negative value + let rhs_unsigned = self.insert_cast(rhs, Type::unsigned(bit_size)); + let pow = self.pow(base, rhs_unsigned); + let pow = self.insert_cast(pow, typ); + (FieldElement::max_num_bits(), self.insert_binary(predicate, BinaryOp::Mul, pow)) + }; + + if max_bit <= bit_size { + self.insert_binary(lhs, BinaryOp::Mul, pow) + } else { + let result = self.insert_binary(lhs, BinaryOp::Mul, pow); + self.insert_truncate(result, bit_size, max_bit) + } + } + + /// Insert ssa instructions which computes lhs >> rhs by doing lhs/2^rhs + pub(crate) fn insert_shift_right( + &mut self, + lhs: ValueId, + rhs: ValueId, + bit_size: u32, + ) -> ValueId { + let lhs_typ = self.function.dfg.type_of_value(lhs); + let base = self.field_constant(FieldElement::from(2_u128)); + // we can safely cast to unsigned because overflow_checks prevent bit-shift with a negative value + let rhs_unsigned = self.insert_cast(rhs, Type::unsigned(bit_size)); + let pow = self.pow(base, rhs_unsigned); + // We need at least one more bit for the case where rhs == bit_size + let div_type = Type::unsigned(bit_size + 1); + let casted_lhs = self.insert_cast(lhs, div_type.clone()); + let casted_pow = self.insert_cast(pow, div_type); + let div_result = self.insert_binary(casted_lhs, BinaryOp::Div, casted_pow); + // We have to cast back to the original type + self.insert_cast(div_result, lhs_typ) + } + + /// Computes lhs^rhs via square&multiply, using the bits decomposition of rhs + /// Pseudo-code of the computation: + /// let mut r = 1; + /// let rhs_bits = to_bits(rhs); + /// for i in 1 .. bit_size + 1 { + /// let r_squared = r * r; + /// let b = rhs_bits[bit_size - i]; + /// r = (r_squared * lhs * b) + (1 - b) * r_squared; + /// } + fn pow(&mut self, lhs: ValueId, rhs: ValueId) -> ValueId { + let typ = self.function.dfg.type_of_value(rhs); + if let Type::Numeric(NumericType::Unsigned { bit_size }) = typ { + let to_bits = self.function.dfg.import_intrinsic(Intrinsic::ToBits(Endian::Little)); + let length = self.field_constant(FieldElement::from(bit_size as i128)); + let result_types = + vec![Type::field(), Type::Array(Rc::new(vec![Type::bool()]), bit_size as usize)]; + let rhs_bits = self.insert_call(to_bits, vec![rhs, length], result_types); + + let rhs_bits = rhs_bits[1]; + let one = self.field_constant(FieldElement::one()); + let mut r = one; + for i in 1..bit_size + 1 { + let r_squared = self.insert_binary(r, BinaryOp::Mul, r); + let a = self.insert_binary(r_squared, BinaryOp::Mul, lhs); + let idx = self.field_constant(FieldElement::from((bit_size - i) as i128)); + let b = self.insert_array_get(rhs_bits, idx, Type::bool()); + let not_b = self.insert_not(b); + let b = self.insert_cast(b, Type::field()); + let not_b = self.insert_cast(not_b, Type::field()); + let r1 = self.insert_binary(a, BinaryOp::Mul, b); + let r2 = self.insert_binary(r_squared, BinaryOp::Mul, not_b); + r = self.insert_binary(r1, BinaryOp::Add, r2); + } + r + } else { + unreachable!("Value must be unsigned in power operation"); + } + } + + pub(crate) fn field_constant(&mut self, constant: FieldElement) -> ValueId { + self.function.dfg.make_constant(constant, Type::field()) + } + + /// Insert a numeric constant into the current function + pub(crate) fn numeric_constant( + &mut self, + value: impl Into, + typ: Type, + ) -> ValueId { + self.function.dfg.make_constant(value.into(), typ) + } + + /// Insert a binary instruction at the end of the current block. + /// Returns the result of the binary instruction. + pub(crate) fn insert_binary( + &mut self, + lhs: ValueId, + operator: BinaryOp, + rhs: ValueId, + ) -> ValueId { + let instruction = Instruction::Binary(Binary { lhs, rhs, operator }); + self.insert_instruction(instruction, None).first() + } + + /// Insert a not instruction at the end of the current block. + /// Returns the result of the instruction. + pub(crate) fn insert_not(&mut self, rhs: ValueId) -> ValueId { + self.insert_instruction(Instruction::Not(rhs), None).first() + } + + /// Insert a truncate instruction at the end of the current block. + /// Returns the result of the truncate instruction. + pub(crate) fn insert_truncate( + &mut self, + value: ValueId, + bit_size: u32, + max_bit_size: u32, + ) -> ValueId { + self.insert_instruction(Instruction::Truncate { value, bit_size, max_bit_size }, None) + .first() + } + + /// Insert a cast instruction at the end of the current block. + /// Returns the result of the cast instruction. + pub(crate) fn insert_cast(&mut self, value: ValueId, typ: Type) -> ValueId { + self.insert_instruction(Instruction::Cast(value, typ), None).first() + } + + /// Insert a call instruction at the end of the current block and return + /// the results of the call. + pub(crate) fn insert_call( + &mut self, + func: ValueId, + arguments: Vec, + result_types: Vec, + ) -> Cow<[ValueId]> { + self.insert_instruction(Instruction::Call { func, arguments }, Some(result_types)).results() + } + + /// Insert an instruction to extract an element from an array + pub(crate) fn insert_array_get( + &mut self, + array: ValueId, + index: ValueId, + element_type: Type, + ) -> ValueId { + let element_type = Some(vec![element_type]); + self.insert_instruction(Instruction::ArrayGet { array, index }, element_type).first() + } + + pub(crate) fn insert_instruction( + &mut self, + instruction: Instruction, + ctrl_typevars: Option>, + ) -> InsertInstructionResult { + let result = self.function.dfg.insert_instruction_and_results( + instruction, + self.block, + ctrl_typevars, + self.call_stack.clone(), + ); + + if let InsertInstructionResult::Results(instruction_id, _) = result { + self.new_instructions.push(instruction_id); + } + + result + } +} diff --git a/compiler/noirc_evaluator/src/ssa/ssa_gen/context.rs b/compiler/noirc_evaluator/src/ssa/ssa_gen/context.rs index 0e155776545..9c760c013a9 100644 --- a/compiler/noirc_evaluator/src/ssa/ssa_gen/context.rs +++ b/compiler/noirc_evaluator/src/ssa/ssa_gen/context.rs @@ -192,7 +192,7 @@ impl<'a> FunctionContext<'a> { ast::Type::Slice(elements) => { let element_types = Self::convert_type(elements).flatten(); Tree::Branch(vec![ - Tree::Leaf(f(Type::field())), + Tree::Leaf(f(Type::length_type())), Tree::Leaf(f(Type::Slice(Rc::new(element_types)))), ]) } @@ -219,8 +219,8 @@ impl<'a> FunctionContext<'a> { let element_types = Self::convert_type(element).flatten(); Type::Array(Rc::new(element_types), *len as usize) } - ast::Type::Integer(Signedness::Signed, bits) => Type::signed(*bits), - ast::Type::Integer(Signedness::Unsigned, bits) => Type::unsigned(*bits), + ast::Type::Integer(Signedness::Signed, bits) => Type::signed((*bits).into()), + ast::Type::Integer(Signedness::Unsigned, bits) => Type::unsigned((*bits).into()), ast::Type::Bool => Type::unsigned(1), ast::Type::String(len) => Type::Array(Rc::new(vec![Type::char()]), *len as usize), ast::Type::FmtString(_, _) => { @@ -435,7 +435,7 @@ impl<'a> FunctionContext<'a> { self.builder.set_location(location).insert_constrain( sign, one, - Some("attempt to bit-shift with overflow".to_string()), + Some("attempt to bit-shift with overflow".to_owned().into()), ); } @@ -446,7 +446,7 @@ impl<'a> FunctionContext<'a> { self.builder.set_location(location).insert_constrain( overflow, one, - Some("attempt to bit-shift with overflow".to_owned()), + Some("attempt to bit-shift with overflow".to_owned().into()), ); self.builder.insert_truncate(result, bit_size, bit_size + 1) } @@ -498,8 +498,11 @@ impl<'a> FunctionContext<'a> { let sign_diff = self.builder.insert_binary(result_sign, BinaryOp::Eq, lhs_sign); let sign_diff_with_predicate = self.builder.insert_binary(sign_diff, BinaryOp::Mul, same_sign); - let overflow_check = - Instruction::Constrain(sign_diff_with_predicate, same_sign, Some(message)); + let overflow_check = Instruction::Constrain( + sign_diff_with_predicate, + same_sign, + Some(message.into()), + ); self.builder.set_location(location).insert_instruction(overflow_check, None); } BinaryOpKind::Multiply => { @@ -509,11 +512,10 @@ impl<'a> FunctionContext<'a> { let rhs_abs = self.absolute_value_helper(rhs, rhs_sign, bit_size); let product_field = self.builder.insert_binary(lhs_abs, BinaryOp::Mul, rhs_abs); // It must not already overflow the bit_size - let message = "attempt to multiply with overflow".to_string(); self.builder.set_location(location).insert_range_check( product_field, bit_size, - Some(message.clone()), + Some("attempt to multiply with overflow".to_string()), ); let product = self.builder.insert_cast(product_field, Type::unsigned(bit_size)); @@ -530,7 +532,7 @@ impl<'a> FunctionContext<'a> { self.builder.set_location(location).insert_constrain( product_overflow_check, one, - Some(message), + Some(message.into()), ); } _ => unreachable!("operator {} should not overflow", operator), @@ -550,22 +552,6 @@ impl<'a> FunctionContext<'a> { ) -> Values { let result_type = self.builder.type_of_value(lhs); let mut result = match operator { - BinaryOpKind::ShiftLeft => { - let bit_size = match result_type { - Type::Numeric(NumericType::Signed { bit_size }) - | Type::Numeric(NumericType::Unsigned { bit_size }) => bit_size, - _ => unreachable!("ICE: left-shift attempted on non-integer"), - }; - self.builder.insert_wrapping_shift_left(lhs, rhs, bit_size) - } - BinaryOpKind::ShiftRight => { - let bit_size = match result_type { - Type::Numeric(NumericType::Signed { bit_size }) - | Type::Numeric(NumericType::Unsigned { bit_size }) => bit_size, - _ => unreachable!("ICE: right-shift attempted on non-integer"), - }; - self.builder.insert_shift_right(lhs, rhs, bit_size) - } BinaryOpKind::Equal | BinaryOpKind::NotEqual if matches!(result_type, Type::Array(..)) => { @@ -654,13 +640,13 @@ impl<'a> FunctionContext<'a> { let result_alloc = self.builder.set_location(location).insert_allocate(Type::bool()); let true_value = self.builder.numeric_constant(1u128, Type::bool()); self.builder.insert_store(result_alloc, true_value); - let zero = self.builder.field_constant(0u128); + let zero = self.builder.length_constant(0u128); self.builder.terminate_with_jmp(loop_start, vec![zero]); // loop_start self.builder.switch_to_block(loop_start); - let i = self.builder.add_block_parameter(loop_start, Type::field()); - let array_length = self.builder.field_constant(array_length as u128); + let i = self.builder.add_block_parameter(loop_start, Type::length_type()); + let array_length = self.builder.length_constant(array_length as u128); let v0 = self.builder.insert_binary(i, BinaryOp::Lt, array_length); self.builder.terminate_with_jmpif(v0, loop_body, loop_end); @@ -672,7 +658,7 @@ impl<'a> FunctionContext<'a> { let v4 = self.builder.insert_load(result_alloc, Type::bool()); let v5 = self.builder.insert_binary(v4, BinaryOp::And, v3); self.builder.insert_store(result_alloc, v5); - let one = self.builder.field_constant(1u128); + let one = self.builder.length_constant(1u128); let v6 = self.builder.insert_binary(i, BinaryOp::Add, one); self.builder.terminate_with_jmp(loop_start, vec![v6]); @@ -738,12 +724,17 @@ impl<'a> FunctionContext<'a> { /// Create a const offset of an address for an array load or store pub(super) fn make_offset(&mut self, mut address: ValueId, offset: u128) -> ValueId { if offset != 0 { - let offset = self.builder.field_constant(offset); + let offset = self.builder.numeric_constant(offset, self.builder.type_of_value(address)); address = self.builder.insert_binary(address, BinaryOp::Add, offset); } address } + /// Array indexes are u64s. This function casts values used as indexes to u64. + pub(super) fn make_array_index(&mut self, index: ValueId) -> ValueId { + self.builder.insert_cast(index, Type::unsigned(64)) + } + /// Define a local variable to be some Values that can later be retrieved /// by calling self.lookup(id) pub(super) fn define(&mut self, id: LocalId, value: Values) { @@ -987,12 +978,14 @@ impl<'a> FunctionContext<'a> { index: ValueId, location: Location, ) -> ValueId { - let element_size = self.builder.field_constant(self.element_size(array)); + let index = self.make_array_index(index); + let element_size = + self.builder.numeric_constant(self.element_size(array), Type::unsigned(64)); // The actual base index is the user's index * the array element type's size let mut index = self.builder.set_location(location).insert_binary(index, BinaryOp::Mul, element_size); - let one = self.builder.field_constant(FieldElement::one()); + let one = self.builder.numeric_constant(FieldElement::one(), Type::unsigned(64)); new_value.for_each(|value| { let value = value.eval(self); @@ -1132,9 +1125,8 @@ fn convert_operator(op: noirc_frontend::BinaryOpKind) -> BinaryOp { BinaryOpKind::And => BinaryOp::And, BinaryOpKind::Or => BinaryOp::Or, BinaryOpKind::Xor => BinaryOp::Xor, - BinaryOpKind::ShiftRight | BinaryOpKind::ShiftLeft => unreachable!( - "ICE - bit shift operators do not exist in SSA and should have been replaced" - ), + BinaryOpKind::ShiftLeft => BinaryOp::Shl, + BinaryOpKind::ShiftRight => BinaryOp::Shr, } } diff --git a/compiler/noirc_evaluator/src/ssa/ssa_gen/mod.rs b/compiler/noirc_evaluator/src/ssa/ssa_gen/mod.rs index 259aab9f228..f3fa5d1d2f8 100644 --- a/compiler/noirc_evaluator/src/ssa/ssa_gen/mod.rs +++ b/compiler/noirc_evaluator/src/ssa/ssa_gen/mod.rs @@ -13,11 +13,8 @@ use noirc_frontend::{ }; use crate::{ - errors::RuntimeError, - ssa::{ - function_builder::data_bus::DataBusBuilder, - ir::{instruction::Intrinsic, types::NumericType}, - }, + errors::{InternalError, RuntimeError}, + ssa::{function_builder::data_bus::DataBusBuilder, ir::instruction::Intrinsic}, }; use self::{ @@ -29,7 +26,7 @@ use super::{ function_builder::data_bus::DataBus, ir::{ function::RuntimeType, - instruction::{BinaryOp, TerminatorInstruction}, + instruction::{BinaryOp, ConstrainError, Instruction, TerminatorInstruction}, types::Type, value::ValueId, }, @@ -38,7 +35,10 @@ use super::{ /// Generates SSA for the given monomorphized program. /// /// This function will generate the SSA but does not perform any optimizations on it. -pub(crate) fn generate_ssa(program: Program) -> Result { +pub(crate) fn generate_ssa( + program: Program, + force_brillig_runtime: bool, +) -> Result { // see which parameter has call_data/return_data attribute let is_databus = DataBusBuilder::is_databus(&program.main_function_signature); @@ -56,7 +56,11 @@ pub(crate) fn generate_ssa(program: Program) -> Result { let mut function_context = FunctionContext::new( main.name.clone(), &main.parameters, - if main.unconstrained { RuntimeType::Brillig } else { RuntimeType::Acir }, + if force_brillig_runtime || main.unconstrained { + RuntimeType::Brillig + } else { + RuntimeType::Acir + }, &context, ); @@ -141,7 +145,7 @@ impl<'a> FunctionContext<'a> { Expression::Call(call) => self.codegen_call(call), Expression::Let(let_expr) => self.codegen_let(let_expr), Expression::Constrain(expr, location, assert_message) => { - self.codegen_constrain(expr, *location, assert_message.clone()) + self.codegen_constrain(expr, *location, assert_message) } Expression::Assign(assign) => self.codegen_assign(assign), Expression::Semi(semi) => self.codegen_semi(semi), @@ -192,7 +196,7 @@ impl<'a> FunctionContext<'a> { } ast::Type::Slice(_) => { let slice_length = - self.builder.field_constant(array.contents.len() as u128); + self.builder.length_constant(array.contents.len() as u128); let slice_contents = self.codegen_array_checked(elements, typ[1].clone())?; Tree::Branch(vec![slice_length.into(), slice_contents]) @@ -217,7 +221,7 @@ impl<'a> FunctionContext<'a> { // A caller needs multiple pieces of information to make use of a format string // The message string, the number of fields to be formatted, and the fields themselves let string = self.codegen_string(string); - let field_count = self.builder.field_constant(*number_of_fields as u128); + let field_count = self.builder.length_constant(*number_of_fields as u128); let fields = self.codegen_expression(fields)?; Ok(Tree::Branch(vec![string, field_count.into(), fields])) @@ -383,8 +387,9 @@ impl<'a> FunctionContext<'a> { length: Option, ) -> Result { // base_index = index * type_size + let index = self.make_array_index(index); let type_size = Self::convert_type(element_type).size_of_type(); - let type_size = self.builder.field_constant(type_size as u128); + let type_size = self.builder.numeric_constant(type_size as u128, Type::unsigned(64)); let base_index = self.builder.set_location(location).insert_binary(index, BinaryOp::Mul, type_size); @@ -421,27 +426,18 @@ impl<'a> FunctionContext<'a> { index: super::ir::value::ValueId, length: Option, ) { - let array_len = length.expect("ICE: a length must be supplied for indexing slices"); - // Check the type of the index value for valid comparisons - let array_len = match self.builder.type_of_value(index) { - Type::Numeric(numeric_type) => match numeric_type { - // If the index itself is an integer, keep the array length as a Field - NumericType::Unsigned { .. } | NumericType::Signed { .. } => array_len, - // If the index and the array length are both Fields we will not be able to perform a less than comparison on them. - // Thus, we cast the array length to a u64 before performing the less than comparison - NumericType::NativeField => self - .builder - .insert_cast(array_len, Type::Numeric(NumericType::Unsigned { bit_size: 64 })), - }, - _ => unreachable!("ICE: array index must be a numeric type"), - }; + let index = self.make_array_index(index); + // We convert the length as an array index type for comparison + let array_len = self + .make_array_index(length.expect("ICE: a length must be supplied for indexing slices")); let is_offset_out_of_bounds = self.builder.insert_binary(index, BinaryOp::Lt, array_len); let true_const = self.builder.numeric_constant(true, Type::bool()); + self.builder.insert_constrain( is_offset_out_of_bounds, true_const, - Some("Index out of bounds".to_owned()), + Some(Box::new("Index out of bounds".to_owned().into())), ); } @@ -619,7 +615,7 @@ impl<'a> FunctionContext<'a> { { match intrinsic { Intrinsic::SliceInsert => { - let one = self.builder.field_constant(1u128); + let one = self.builder.length_constant(1u128); // We add one here in the case of a slice insert as a slice insert at the length of the slice // can be converted to a slice push back @@ -665,15 +661,63 @@ impl<'a> FunctionContext<'a> { &mut self, expr: &Expression, location: Location, - assert_message: Option, + assert_message: &Option>, ) -> Result { let expr = self.codegen_non_tuple_expression(expr)?; let true_literal = self.builder.numeric_constant(true, Type::bool()); - self.builder.set_location(location).insert_constrain(expr, true_literal, assert_message); + + // Set the location here for any errors that may occur when we codegen the assert message + self.builder.set_location(location); + + let assert_message = self.codegen_constrain_error(assert_message)?; + + self.builder.insert_constrain(expr, true_literal, assert_message); Ok(Self::unit_value()) } + // This method does not necessary codegen the full assert message expression, thus it does not + // return a `Values` object. Instead we check the internals of the expression to make sure + // we have an `Expression::Call` as expected. An `Instruction::Call` is then constructed but not + // inserted to the SSA as we want that instruction to be atomic in SSA with a constrain instruction. + fn codegen_constrain_error( + &mut self, + assert_message: &Option>, + ) -> Result>, RuntimeError> { + let Some(assert_message_expr) = assert_message else { return Ok(None) }; + + if let ast::Expression::Literal(ast::Literal::Str(assert_message)) = + assert_message_expr.as_ref() + { + return Ok(Some(Box::new(ConstrainError::Static(assert_message.to_string())))); + } + + let ast::Expression::Call(call) = assert_message_expr.as_ref() else { + return Err(InternalError::Unexpected { + expected: "Expected a call expression".to_owned(), + found: "Instead found {expr:?}".to_owned(), + call_stack: self.builder.get_call_stack(), + } + .into()); + }; + + let func = self.codegen_non_tuple_expression(&call.func)?; + let mut arguments = Vec::with_capacity(call.arguments.len()); + + for argument in &call.arguments { + let mut values = self.codegen_expression(argument)?.into_value_list(self); + arguments.append(&mut values); + } + + // If an array is passed as an argument we increase its reference count + for argument in &arguments { + self.builder.increment_array_reference_count(*argument); + } + + let instr = Instruction::Call { func, arguments }; + Ok(Some(Box::new(ConstrainError::Dynamic(instr)))) + } + fn codegen_assign(&mut self, assign: &ast::Assign) -> Result { let lhs = self.extract_current_value(&assign.lvalue)?; let rhs = self.codegen_expression(&assign.expression)?; diff --git a/compiler/noirc_frontend/Cargo.toml b/compiler/noirc_frontend/Cargo.toml index 80d767f7f2c..a3a8d460572 100644 --- a/compiler/noirc_frontend/Cargo.toml +++ b/compiler/noirc_frontend/Cargo.toml @@ -23,6 +23,7 @@ rustc-hash = "1.1.0" small-ord-set = "0.1.3" regex = "1.9.1" tracing.workspace = true +petgraph = "0.6" [dev-dependencies] strum = "0.24" diff --git a/compiler/noirc_frontend/src/ast/expression.rs b/compiler/noirc_frontend/src/ast/expression.rs index c78deaf6dbb..2a252633a29 100644 --- a/compiler/noirc_frontend/src/ast/expression.rs +++ b/compiler/noirc_frontend/src/ast/expression.rs @@ -236,7 +236,15 @@ impl BinaryOpKind { } pub fn is_valid_for_field_type(self) -> bool { - matches!(self, BinaryOpKind::Equal | BinaryOpKind::NotEqual) + matches!( + self, + BinaryOpKind::Add + | BinaryOpKind::Subtract + | BinaryOpKind::Multiply + | BinaryOpKind::Divide + | BinaryOpKind::Equal + | BinaryOpKind::NotEqual + ) } pub fn as_string(self) -> &'static str { @@ -280,14 +288,6 @@ impl BinaryOpKind { BinaryOpKind::Modulo => Token::Percent, } } - - pub fn is_bit_shift(&self) -> bool { - matches!(self, BinaryOpKind::ShiftRight | BinaryOpKind::ShiftLeft) - } - - pub fn is_modulo(&self) -> bool { - matches!(self, BinaryOpKind::Modulo) - } } #[derive(PartialEq, PartialOrd, Eq, Ord, Hash, Debug, Copy, Clone)] diff --git a/compiler/noirc_frontend/src/ast/function.rs b/compiler/noirc_frontend/src/ast/function.rs index f20fc54b101..46f0ac0fa0f 100644 --- a/compiler/noirc_frontend/src/ast/function.rs +++ b/compiler/noirc_frontend/src/ast/function.rs @@ -29,6 +29,7 @@ pub enum FunctionKind { Builtin, Normal, Oracle, + Recursive, } impl NoirFunction { @@ -106,6 +107,7 @@ impl From for NoirFunction { Some(FunctionAttribute::Foreign(_)) => FunctionKind::LowLevel, Some(FunctionAttribute::Test { .. }) => FunctionKind::Normal, Some(FunctionAttribute::Oracle(_)) => FunctionKind::Oracle, + Some(FunctionAttribute::Recursive) => FunctionKind::Recursive, None => FunctionKind::Normal, }; diff --git a/compiler/noirc_frontend/src/ast/mod.rs b/compiler/noirc_frontend/src/ast/mod.rs index d9af5893024..29edbaca594 100644 --- a/compiler/noirc_frontend/src/ast/mod.rs +++ b/compiler/noirc_frontend/src/ast/mod.rs @@ -28,6 +28,55 @@ use crate::{ }; use iter_extended::vecmap; +#[derive(Debug, PartialEq, Eq, Clone, Copy, Hash, Ord, PartialOrd)] +pub enum IntegerBitSize { + One, + Eight, + ThirtyTwo, + SixtyFour, +} + +impl IntegerBitSize { + pub fn allowed_sizes() -> Vec { + vec![Self::One, Self::Eight, Self::ThirtyTwo, Self::SixtyFour] + } +} + +impl From for u32 { + fn from(size: IntegerBitSize) -> u32 { + use IntegerBitSize::*; + match size { + One => 1, + Eight => 8, + ThirtyTwo => 32, + SixtyFour => 64, + } + } +} + +pub struct InvalidIntegerBitSizeError(pub u32); + +impl TryFrom for IntegerBitSize { + type Error = InvalidIntegerBitSizeError; + + fn try_from(value: u32) -> Result { + use IntegerBitSize::*; + match value { + 1 => Ok(One), + 8 => Ok(Eight), + 32 => Ok(ThirtyTwo), + 64 => Ok(SixtyFour), + _ => Err(InvalidIntegerBitSizeError(value)), + } + } +} + +impl core::fmt::Display for IntegerBitSize { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", u32::from(*self)) + } +} + /// The parser parses types as 'UnresolvedType's which /// require name resolution to resolve any type names used /// for structs within, but are otherwise identical to Types. @@ -35,7 +84,7 @@ use iter_extended::vecmap; pub enum UnresolvedTypeData { FieldElement, Array(Option, Box), // [4]Witness = Array(4, Witness) - Integer(Signedness, u32), // u32 = Integer(unsigned, 32) + Integer(Signedness, IntegerBitSize), // u32 = Integer(unsigned, ThirtyTwo) Bool, Expression(UnresolvedTypeExpression), String(Option), @@ -45,7 +94,7 @@ pub enum UnresolvedTypeData { Parenthesized(Box), /// A Named UnresolvedType can be a struct type or a type variable - Named(Path, Vec), + Named(Path, Vec, /*is_synthesized*/ bool), /// A Trait as return type or parameter of function, including its generics TraitAsType(Path, Vec), @@ -110,7 +159,7 @@ impl std::fmt::Display for UnresolvedTypeData { Signedness::Signed => write!(f, "i{num_bits}"), Signedness::Unsigned => write!(f, "u{num_bits}"), }, - Named(s, args) => { + Named(s, args, _) => { let args = vecmap(args, |arg| ToString::to_string(&arg.typ)); if args.is_empty() { write!(f, "{s}") @@ -179,6 +228,14 @@ impl std::fmt::Display for UnresolvedTypeExpression { } impl UnresolvedType { + pub fn is_synthesized(&self) -> bool { + match &self.typ { + UnresolvedTypeData::MutableReference(ty) => ty.is_synthesized(), + UnresolvedTypeData::Named(_, _, synthesized) => *synthesized, + _ => false, + } + } + pub fn without_span(typ: UnresolvedTypeData) -> UnresolvedType { UnresolvedType { typ, span: None } } @@ -189,11 +246,17 @@ impl UnresolvedType { } impl UnresolvedTypeData { - pub fn from_int_token(token: IntType) -> UnresolvedTypeData { + pub fn from_int_token( + token: IntType, + ) -> Result { use {IntType::*, UnresolvedTypeData::Integer}; match token { - Signed(num_bits) => Integer(Signedness::Signed, num_bits), - Unsigned(num_bits) => Integer(Signedness::Unsigned, num_bits), + Signed(num_bits) => { + Ok(Integer(Signedness::Signed, IntegerBitSize::try_from(num_bits)?)) + } + Unsigned(num_bits) => { + Ok(Integer(Signedness::Unsigned, IntegerBitSize::try_from(num_bits)?)) + } } } diff --git a/compiler/noirc_frontend/src/ast/statement.rs b/compiler/noirc_frontend/src/ast/statement.rs index 73b1f68778d..f39b71405d3 100644 --- a/compiler/noirc_frontend/src/ast/statement.rs +++ b/compiler/noirc_frontend/src/ast/statement.rs @@ -416,7 +416,7 @@ pub enum LValue { } #[derive(Debug, PartialEq, Eq, Clone)] -pub struct ConstrainStatement(pub Expression, pub Option, pub ConstrainKind); +pub struct ConstrainStatement(pub Expression, pub Option, pub ConstrainKind); #[derive(Debug, PartialEq, Eq, Clone, Copy)] pub enum ConstrainKind { @@ -428,18 +428,22 @@ pub enum ConstrainKind { #[derive(Debug, PartialEq, Eq, Clone)] pub enum Pattern { Identifier(Ident), - Mutable(Box, Span), + Mutable(Box, Span, /*is_synthesized*/ bool), Tuple(Vec, Span), Struct(Path, Vec<(Ident, Pattern)>, Span), } impl Pattern { + pub fn is_synthesized(&self) -> bool { + matches!(self, Pattern::Mutable(_, _, true)) + } + pub fn span(&self) -> Span { match self { Pattern::Identifier(ident) => ident.span(), - Pattern::Mutable(_, span) | Pattern::Tuple(_, span) | Pattern::Struct(_, _, span) => { - *span - } + Pattern::Mutable(_, span, _) + | Pattern::Tuple(_, span) + | Pattern::Struct(_, _, span) => *span, } } pub fn name_ident(&self) -> &Ident { @@ -452,7 +456,7 @@ impl Pattern { pub(crate) fn into_ident(self) -> Ident { match self { Pattern::Identifier(ident) => ident, - Pattern::Mutable(pattern, _) => pattern.into_ident(), + Pattern::Mutable(pattern, _, _) => pattern.into_ident(), other => panic!("Pattern::into_ident called on {other} pattern with no identifier"), } } @@ -688,7 +692,7 @@ impl Display for Pattern { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { Pattern::Identifier(name) => name.fmt(f), - Pattern::Mutable(name, _) => write!(f, "mut {name}"), + Pattern::Mutable(name, _, _) => write!(f, "mut {name}"), Pattern::Tuple(fields, _) => { let fields = vecmap(fields, ToString::to_string); write!(f, "({})", fields.join(", ")) diff --git a/compiler/noirc_frontend/src/ast/traits.rs b/compiler/noirc_frontend/src/ast/traits.rs index feb627df60b..775f0a5f2b4 100644 --- a/compiler/noirc_frontend/src/ast/traits.rs +++ b/compiler/noirc_frontend/src/ast/traits.rs @@ -48,7 +48,7 @@ pub struct TypeImpl { pub object_type: UnresolvedType, pub type_span: Span, pub generics: UnresolvedGenerics, - pub methods: Vec, + pub methods: Vec<(NoirFunction, Span)>, } /// Ast node for an implementation of a trait for a particular type @@ -101,7 +101,7 @@ impl Display for TypeImpl { writeln!(f, "impl{} {} {{", generics, self.object_type)?; - for method in self.methods.iter() { + for (method, _) in self.methods.iter() { let method = method.to_string(); for line in method.lines() { writeln!(f, " {line}")?; diff --git a/compiler/noirc_frontend/src/debug/mod.rs b/compiler/noirc_frontend/src/debug/mod.rs new file mode 100644 index 00000000000..a88567fcaf9 --- /dev/null +++ b/compiler/noirc_frontend/src/debug/mod.rs @@ -0,0 +1,615 @@ +use crate::parser::{parse_program, ParsedModule}; +use crate::{ + ast, + ast::{Path, PathKind}, + parser::{Item, ItemKind}, +}; +use noirc_errors::{Span, Spanned}; +use std::collections::HashMap; +use std::collections::VecDeque; + +const MAX_MEMBER_ASSIGN_DEPTH: usize = 8; + +#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash)] +pub struct SourceVarId(pub u32); + +#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash)] +pub struct SourceFieldId(pub u32); + +/// This structure is used to collect information about variables to track +/// for debugging during the instrumentation injection phase. +#[derive(Debug, Clone)] +pub struct DebugInstrumenter { + // all collected variable names while instrumenting the source for variable tracking + pub variables: HashMap, + + // all field names referenced when assigning to a member of a variable + pub field_names: HashMap, + + next_var_id: u32, + next_field_name_id: u32, + + // last seen variable names and their IDs grouped by scope + scope: Vec>, +} + +impl Default for DebugInstrumenter { + fn default() -> Self { + Self { + variables: HashMap::default(), + field_names: HashMap::default(), + scope: vec![], + next_var_id: 0, + next_field_name_id: 1, + } + } +} + +impl DebugInstrumenter { + pub fn instrument_module(&mut self, module: &mut ParsedModule) { + module.items.iter_mut().for_each(|item| { + if let Item { kind: ItemKind::Function(f), .. } = item { + self.walk_fn(&mut f.def); + } + }); + // this part absolutely must happen after ast traversal above + // so that oracle functions don't get wrapped, resulting in infinite recursion: + self.insert_state_set_oracle(module, 8); + } + + fn insert_var(&mut self, var_name: &str) -> SourceVarId { + let var_id = SourceVarId(self.next_var_id); + self.next_var_id += 1; + self.variables.insert(var_id, var_name.to_string()); + self.scope.last_mut().unwrap().insert(var_name.to_string(), var_id); + var_id + } + + fn lookup_var(&self, var_name: &str) -> Option { + self.scope.iter().rev().find_map(|vars| vars.get(var_name).copied()) + } + + fn insert_field_name(&mut self, field_name: &str) -> SourceFieldId { + let field_name_id = SourceFieldId(self.next_field_name_id); + self.next_field_name_id += 1; + self.field_names.insert(field_name_id, field_name.to_string()); + field_name_id + } + + fn walk_fn(&mut self, func: &mut ast::FunctionDefinition) { + self.scope.push(HashMap::default()); + + let set_fn_params = func + .parameters + .iter() + .flat_map(|param| { + pattern_vars(¶m.pattern) + .iter() + .map(|(id, _is_mut)| { + let var_id = self.insert_var(&id.0.contents); + build_assign_var_stmt(var_id, id_expr(id)) + }) + .collect::>() + }) + .collect(); + + self.walk_scope(&mut func.body.0, func.span); + + // prepend fn params: + func.body.0 = [set_fn_params, func.body.0.clone()].concat(); + } + + // Modify a vector of statements in-place, adding instrumentation for sets and drops. + // This function will consume a scope level. + fn walk_scope(&mut self, statements: &mut Vec, span: Span) { + statements.iter_mut().for_each(|stmt| self.walk_statement(stmt)); + + // extract and save the return value from the scope if there is one + let ret_stmt = statements.pop(); + let has_ret_expr = match ret_stmt { + None => false, + Some(ast::Statement { kind: ast::StatementKind::Expression(ret_expr), .. }) => { + let save_ret_expr = ast::Statement { + kind: ast::StatementKind::Let(ast::LetStatement { + pattern: ast::Pattern::Identifier(ident("__debug_expr", ret_expr.span)), + r#type: ast::UnresolvedType::unspecified(), + expression: ret_expr.clone(), + }), + span: ret_expr.span, + }; + statements.push(save_ret_expr); + true + } + Some(ret_stmt) => { + // not an expression, so leave it untouched + statements.push(ret_stmt); + false + } + }; + + let span = Span::empty(span.end()); + + // drop scope variables + let scope_vars = self.scope.pop().unwrap_or_default(); + let drop_vars_stmts = scope_vars.values().map(|var_id| build_drop_var_stmt(*var_id, span)); + statements.extend(drop_vars_stmts); + + // return the saved value in __debug_expr, or unit otherwise + let last_stmt = if has_ret_expr { + ast::Statement { + kind: ast::StatementKind::Expression(ast::Expression { + kind: ast::ExpressionKind::Variable(ast::Path { + segments: vec![ident("__debug_expr", span)], + kind: PathKind::Plain, + span, + }), + span, + }), + span, + } + } else { + ast::Statement { + kind: ast::StatementKind::Expression(ast::Expression { + kind: ast::ExpressionKind::Literal(ast::Literal::Unit), + span, + }), + span, + } + }; + statements.push(last_stmt); + } + + fn walk_let_statement(&mut self, let_stmt: &ast::LetStatement, span: &Span) -> ast::Statement { + // rewrites let statements written like this: + // let (((a,b,c),D { d }),e,f) = x; + // + // into statements like this: + // + // let (a,b,c,d,e,f,g) = { + // let (((a,b,c),D { d }),e,f) = x; + // wrap(1, a); + // wrap(2, b); + // ... + // wrap(6, f); + // (a,b,c,d,e,f,g) + // }; + + // a.b.c[3].x[i*4+1].z + + let vars = pattern_vars(&let_stmt.pattern); + let vars_pattern: Vec = vars + .iter() + .map(|(id, is_mut)| { + if *is_mut { + ast::Pattern::Mutable( + Box::new(ast::Pattern::Identifier(id.clone())), + id.span(), + true, + ) + } else { + ast::Pattern::Identifier(id.clone()) + } + }) + .collect(); + let vars_exprs: Vec = vars.iter().map(|(id, _)| id_expr(id)).collect(); + + let mut block_stmts = + vec![ast::Statement { kind: ast::StatementKind::Let(let_stmt.clone()), span: *span }]; + block_stmts.extend(vars.iter().map(|(id, _)| { + let var_id = self.insert_var(&id.0.contents); + build_assign_var_stmt(var_id, id_expr(id)) + })); + block_stmts.push(ast::Statement { + kind: ast::StatementKind::Expression(ast::Expression { + kind: ast::ExpressionKind::Tuple(vars_exprs), + span: let_stmt.pattern.span(), + }), + span: let_stmt.pattern.span(), + }); + + ast::Statement { + kind: ast::StatementKind::Let(ast::LetStatement { + pattern: ast::Pattern::Tuple(vars_pattern, let_stmt.pattern.span()), + r#type: ast::UnresolvedType::unspecified(), + expression: ast::Expression { + kind: ast::ExpressionKind::Block(ast::BlockExpression(block_stmts)), + span: let_stmt.expression.span, + }, + }), + span: *span, + } + } + + fn walk_assign_statement( + &mut self, + assign_stmt: &ast::AssignStatement, + span: &Span, + ) -> ast::Statement { + // X = Y becomes: + // X = { + // let __debug_expr = Y; + // + // __debug_var_assign(17, __debug_expr); + // // or: + // __debug_member_assign_{arity}(17, __debug_expr, _v0, _v1..., _v{arity}); + // + // __debug_expr + // }; + + let let_kind = ast::StatementKind::Let(ast::LetStatement { + pattern: ast::Pattern::Identifier(ident("__debug_expr", assign_stmt.expression.span)), + r#type: ast::UnresolvedType::unspecified(), + expression: assign_stmt.expression.clone(), + }); + let expression_span = assign_stmt.expression.span; + let new_assign_stmt = match &assign_stmt.lvalue { + ast::LValue::Ident(id) => { + let var_id = self + .lookup_var(&id.0.contents) + .unwrap_or_else(|| panic!("var lookup failed for var_name={}", &id.0.contents)); + build_assign_var_stmt(var_id, id_expr(&ident("__debug_expr", id.span()))) + } + ast::LValue::Dereference(_lv) => { + // TODO: this is a dummy statement for now, but we should + // somehow track the derefence and update the pointed to + // variable + ast::Statement { + kind: ast::StatementKind::Expression(uint_expr(0, *span)), + span: *span, + } + } + _ => { + let mut indexes = vec![]; + let mut cursor = &assign_stmt.lvalue; + let var_id; + loop { + match cursor { + ast::LValue::Ident(id) => { + var_id = self.lookup_var(&id.0.contents).unwrap_or_else(|| { + panic!("var lookup failed for var_name={}", &id.0.contents) + }); + break; + } + ast::LValue::MemberAccess { object, field_name } => { + cursor = object; + let field_name_id = self.insert_field_name(&field_name.0.contents); + indexes.push(sint_expr(-(field_name_id.0 as i128), expression_span)); + } + ast::LValue::Index { index, array } => { + cursor = array; + indexes.push(index.clone()); + } + ast::LValue::Dereference(_ref) => { + unimplemented![] + } + } + } + build_assign_member_stmt( + var_id, + &indexes, + &id_expr(&ident("__debug_expr", expression_span)), + ) + } + }; + + let ret_kind = + ast::StatementKind::Expression(id_expr(&ident("__debug_expr", expression_span))); + + ast::Statement { + kind: ast::StatementKind::Assign(ast::AssignStatement { + lvalue: assign_stmt.lvalue.clone(), + expression: ast::Expression { + kind: ast::ExpressionKind::Block(ast::BlockExpression(vec![ + ast::Statement { kind: let_kind, span: expression_span }, + new_assign_stmt, + ast::Statement { kind: ret_kind, span: expression_span }, + ])), + span: expression_span, + }, + }), + span: *span, + } + } + + fn walk_expr(&mut self, expr: &mut ast::Expression) { + match &mut expr.kind { + ast::ExpressionKind::Block(ast::BlockExpression(ref mut statements)) => { + self.scope.push(HashMap::default()); + self.walk_scope(statements, expr.span); + } + ast::ExpressionKind::Prefix(prefix_expr) => { + self.walk_expr(&mut prefix_expr.rhs); + } + ast::ExpressionKind::Index(index_expr) => { + self.walk_expr(&mut index_expr.collection); + self.walk_expr(&mut index_expr.index); + } + ast::ExpressionKind::Call(call_expr) => { + // TODO: push a stack frame or something here? + self.walk_expr(&mut call_expr.func); + call_expr.arguments.iter_mut().for_each(|ref mut expr| { + self.walk_expr(expr); + }); + } + ast::ExpressionKind::MethodCall(mc_expr) => { + // TODO: also push a stack frame here + self.walk_expr(&mut mc_expr.object); + mc_expr.arguments.iter_mut().for_each(|ref mut expr| { + self.walk_expr(expr); + }); + } + ast::ExpressionKind::Constructor(c_expr) => { + c_expr.fields.iter_mut().for_each(|(_id, ref mut expr)| { + self.walk_expr(expr); + }); + } + ast::ExpressionKind::MemberAccess(ma_expr) => { + self.walk_expr(&mut ma_expr.lhs); + } + ast::ExpressionKind::Cast(cast_expr) => { + self.walk_expr(&mut cast_expr.lhs); + } + ast::ExpressionKind::Infix(infix_expr) => { + self.walk_expr(&mut infix_expr.lhs); + self.walk_expr(&mut infix_expr.rhs); + } + ast::ExpressionKind::If(if_expr) => { + self.walk_expr(&mut if_expr.condition); + self.walk_expr(&mut if_expr.consequence); + if let Some(ref mut alt) = if_expr.alternative { + self.walk_expr(alt); + } + } + ast::ExpressionKind::Tuple(exprs) => { + exprs.iter_mut().for_each(|ref mut expr| { + self.walk_expr(expr); + }); + } + ast::ExpressionKind::Lambda(lambda) => { + self.walk_expr(&mut lambda.body); + } + ast::ExpressionKind::Parenthesized(expr) => { + self.walk_expr(expr); + } + _ => {} + } + } + + fn walk_for(&mut self, for_stmt: &mut ast::ForLoopStatement) { + let var_name = &for_stmt.identifier.0.contents; + let var_id = self.insert_var(var_name); + + let set_stmt = build_assign_var_stmt(var_id, id_expr(&for_stmt.identifier)); + let drop_stmt = build_drop_var_stmt(var_id, Span::empty(for_stmt.span.end())); + + self.walk_expr(&mut for_stmt.block); + for_stmt.block = ast::Expression { + kind: ast::ExpressionKind::Block(ast::BlockExpression(vec![ + set_stmt, + ast::Statement { + kind: ast::StatementKind::Semi(for_stmt.block.clone()), + span: for_stmt.block.span, + }, + drop_stmt, + ])), + span: for_stmt.span, + }; + } + + fn walk_statement(&mut self, stmt: &mut ast::Statement) { + match &mut stmt.kind { + ast::StatementKind::Let(let_stmt) => { + *stmt = self.walk_let_statement(let_stmt, &stmt.span); + } + ast::StatementKind::Assign(assign_stmt) => { + *stmt = self.walk_assign_statement(assign_stmt, &stmt.span); + } + ast::StatementKind::Expression(expr) => { + self.walk_expr(expr); + } + ast::StatementKind::Semi(expr) => { + self.walk_expr(expr); + } + ast::StatementKind::For(ref mut for_stmt) => { + self.walk_for(for_stmt); + } + _ => {} // Constrain, Error + } + } + + fn insert_state_set_oracle(&self, module: &mut ParsedModule, n: u32) { + let member_assigns = (1..=n) + .map(|i| format!["__debug_member_assign_{i}"]) + .collect::>() + .join(",\n"); + let (program, errors) = parse_program(&format!( + r#" + use dep::__debug::{{ + __debug_var_assign, + __debug_var_drop, + __debug_dereference_assign, + {member_assigns}, + }};"# + )); + if !errors.is_empty() { + panic!("errors parsing internal oracle definitions: {errors:?}") + } + module.items.extend(program.items); + } +} + +pub fn build_debug_crate_file() -> String { + [ + r#" + #[oracle(__debug_var_assign)] + unconstrained fn __debug_var_assign_oracle(_var_id: u32, _value: T) {} + unconstrained fn __debug_var_assign_inner(var_id: u32, value: T) { + __debug_var_assign_oracle(var_id, value); + } + pub fn __debug_var_assign(var_id: u32, value: T) { + __debug_var_assign_inner(var_id, value); + } + + #[oracle(__debug_var_drop)] + unconstrained fn __debug_var_drop_oracle(_var_id: u32) {} + unconstrained fn __debug_var_drop_inner(var_id: u32) { + __debug_var_drop_oracle(var_id); + } + pub fn __debug_var_drop(var_id: u32) { + __debug_var_drop_inner(var_id); + } + + #[oracle(__debug_dereference_assign)] + unconstrained fn __debug_dereference_assign_oracle(_var_id: u32, _value: T) {} + unconstrained fn __debug_dereference_assign_inner(var_id: u32, value: T) { + __debug_dereference_assign_oracle(var_id, value); + } + pub fn __debug_dereference_assign(var_id: u32, value: T) { + __debug_dereference_assign_inner(var_id, value); + } + "# + .to_string(), + (1..=MAX_MEMBER_ASSIGN_DEPTH) + .map(|n| { + // The variable signature has to be generic as Noir supports using any polymorphic integer as an index. + // If we were to set a specific type for index signatures here, such as `Field`, we will error in + // type checking if we attempt to index with a different type such as `u8`. + let var_sig = + (0..n).map(|i| format!["_v{i}: Index"]).collect::>().join(", "); + let vars = (0..n).map(|i| format!["_v{i}"]).collect::>().join(", "); + format!( + r#" + #[oracle(__debug_member_assign_{n})] + unconstrained fn __debug_oracle_member_assign_{n}( + _var_id: u32, _value: T, {var_sig} + ) {{}} + unconstrained fn __debug_inner_member_assign_{n}( + var_id: u32, value: T, {var_sig} + ) {{ + __debug_oracle_member_assign_{n}(var_id, value, {vars}); + }} + pub fn __debug_member_assign_{n}(var_id: u32, value: T, {var_sig}) {{ + __debug_inner_member_assign_{n}(var_id, value, {vars}); + }} + + "# + ) + }) + .collect::>() + .join("\n"), + ] + .join("\n") +} + +fn build_assign_var_stmt(var_id: SourceVarId, expr: ast::Expression) -> ast::Statement { + let span = expr.span; + let kind = ast::ExpressionKind::Call(Box::new(ast::CallExpression { + func: Box::new(ast::Expression { + kind: ast::ExpressionKind::Variable(ast::Path { + segments: vec![ident("__debug_var_assign", span)], + kind: PathKind::Plain, + span, + }), + span, + }), + arguments: vec![uint_expr(var_id.0 as u128, span), expr], + })); + ast::Statement { kind: ast::StatementKind::Semi(ast::Expression { kind, span }), span } +} + +fn build_drop_var_stmt(var_id: SourceVarId, span: Span) -> ast::Statement { + let kind = ast::ExpressionKind::Call(Box::new(ast::CallExpression { + func: Box::new(ast::Expression { + kind: ast::ExpressionKind::Variable(ast::Path { + segments: vec![ident("__debug_var_drop", span)], + kind: PathKind::Plain, + span, + }), + span, + }), + arguments: vec![uint_expr(var_id.0 as u128, span)], + })); + ast::Statement { kind: ast::StatementKind::Semi(ast::Expression { kind, span }), span } +} + +fn build_assign_member_stmt( + var_id: SourceVarId, + indexes: &[ast::Expression], + expr: &ast::Expression, +) -> ast::Statement { + let arity = indexes.len(); + if arity > MAX_MEMBER_ASSIGN_DEPTH { + unreachable!("Assignment to member exceeds maximum depth for debugging"); + } + let span = expr.span; + let kind = ast::ExpressionKind::Call(Box::new(ast::CallExpression { + func: Box::new(ast::Expression { + kind: ast::ExpressionKind::Variable(ast::Path { + segments: vec![ident(&format!["__debug_member_assign_{arity}"], span)], + kind: PathKind::Plain, + span, + }), + span, + }), + arguments: [ + vec![uint_expr(var_id.0 as u128, span)], + vec![expr.clone()], + indexes.iter().rev().cloned().collect(), + ] + .concat(), + })); + ast::Statement { kind: ast::StatementKind::Semi(ast::Expression { kind, span }), span } +} + +fn pattern_vars(pattern: &ast::Pattern) -> Vec<(ast::Ident, bool)> { + let mut vars = vec![]; + let mut stack = VecDeque::from([(pattern, false)]); + while stack.front().is_some() { + let (pattern, is_mut) = stack.pop_front().unwrap(); + match pattern { + ast::Pattern::Identifier(id) => { + vars.push((id.clone(), is_mut)); + } + ast::Pattern::Mutable(pattern, _, _) => { + stack.push_back((pattern, true)); + } + ast::Pattern::Tuple(patterns, _) => { + stack.extend(patterns.iter().map(|pattern| (pattern, false))); + } + ast::Pattern::Struct(_, pids, _) => { + stack.extend(pids.iter().map(|(_, pattern)| (pattern, is_mut))); + vars.extend(pids.iter().map(|(id, _)| (id.clone(), false))); + } + } + } + vars +} + +fn ident(s: &str, span: Span) -> ast::Ident { + ast::Ident(Spanned::from(span, s.to_string())) +} + +fn id_expr(id: &ast::Ident) -> ast::Expression { + ast::Expression { + kind: ast::ExpressionKind::Variable(Path { + segments: vec![id.clone()], + kind: PathKind::Plain, + span: id.span(), + }), + span: id.span(), + } +} + +fn uint_expr(x: u128, span: Span) -> ast::Expression { + ast::Expression { + kind: ast::ExpressionKind::Literal(ast::Literal::Integer(x.into(), false)), + span, + } +} + +fn sint_expr(x: i128, span: Span) -> ast::Expression { + ast::Expression { + kind: ast::ExpressionKind::Literal(ast::Literal::Integer(x.abs().into(), x < 0)), + span, + } +} diff --git a/compiler/noirc_frontend/src/hir/def_collector/dc_crate.rs b/compiler/noirc_frontend/src/hir/def_collector/dc_crate.rs index a6ab6b1d825..7f36af5b30e 100644 --- a/compiler/noirc_frontend/src/hir/def_collector/dc_crate.rs +++ b/compiler/noirc_frontend/src/hir/def_collector/dc_crate.rs @@ -14,8 +14,8 @@ use crate::hir::resolution::{ use crate::hir::type_check::{type_check_func, TypeCheckError, TypeChecker}; use crate::hir::Context; -use crate::macros_api::MacroProcessor; -use crate::node_interner::{FuncId, NodeInterner, StmtId, StructId, TraitId, TypeAliasId}; +use crate::macros_api::{MacroError, MacroProcessor}; +use crate::node_interner::{FuncId, GlobalId, NodeInterner, StructId, TraitId, TypeAliasId}; use crate::parser::{ParserError, SortedModule}; use crate::{ @@ -109,7 +109,7 @@ pub struct UnresolvedTypeAlias { pub struct UnresolvedGlobal { pub file_id: FileId, pub module_id: LocalModuleId, - pub stmt_id: StmtId, + pub global_id: GlobalId, pub stmt_def: LetStatement, } @@ -155,6 +155,12 @@ impl From for CustomDiagnostic { } } +impl From for CompilationError { + fn from(value: MacroError) -> Self { + CompilationError::DefinitionError(DefCollectorErrorKind::MacroError(value)) + } +} + impl From for CompilationError { fn from(value: ParserError) -> Self { CompilationError::ParseError(value) @@ -250,6 +256,20 @@ impl DefCollector { // Add the current crate to the collection of DefMaps context.def_maps.insert(crate_id, def_collector.def_map); + // TODO(#4653): generalize this function + for macro_processor in ¯o_processors { + macro_processor + .process_unresolved_traits_impls( + &crate_id, + context, + &def_collector.collected_traits_impls, + &mut def_collector.collected_functions, + ) + .unwrap_or_else(|(macro_err, file_id)| { + errors.push((macro_err.into(), file_id)); + }); + } + inject_prelude(crate_id, context, crate_root, &mut def_collector.collected_imports); for submodule in submodules { inject_prelude( @@ -311,9 +331,10 @@ impl DefCollector { // Must resolve structs before we resolve globals. errors.extend(resolve_structs(context, def_collector.collected_types, crate_id)); - // We must wait to resolve non-integer globals until after we resolve structs since structs + // We must wait to resolve non-integer globals until after we resolve structs since struct // globals will need to reference the struct type they're initialized to to ensure they are valid. resolved_globals.extend(resolve_globals(context, other_globals, crate_id)); + errors.extend(resolved_globals.errors); // Bind trait impls to their trait. Collect trait functions, that have a // default implementation, which hasn't been overridden. @@ -332,41 +353,44 @@ impl DefCollector { // over trait methods if there are name conflicts. errors.extend(collect_impls(context, crate_id, &def_collector.collected_impls)); - // Lower each function in the crate. This is now possible since imports have been resolved - let file_func_ids = resolve_free_functions( + // Resolve each function in the crate. This is now possible since imports have been resolved + let mut functions = Vec::new(); + functions.extend(resolve_free_functions( &mut context.def_interner, crate_id, &context.def_maps, def_collector.collected_functions, None, &mut errors, - ); + )); - let file_method_ids = resolve_impls( + functions.extend(resolve_impls( &mut context.def_interner, crate_id, &context.def_maps, def_collector.collected_impls, &mut errors, - ); - let file_trait_impls_ids = resolve_trait_impls( + )); + + functions.extend(resolve_trait_impls( context, def_collector.collected_traits_impls, crate_id, &mut errors, - ); - - errors.extend(resolved_globals.errors); + )); for macro_processor in macro_processors { - macro_processor.process_typed_ast(&crate_id, context); + macro_processor.process_typed_ast(&crate_id, context).unwrap_or_else( + |(macro_err, file_id)| { + errors.push((macro_err.into(), file_id)); + }, + ); } - errors.extend(type_check_globals(&mut context.def_interner, resolved_globals.globals)); - // Type check all of the functions in the crate - errors.extend(type_check_functions(&mut context.def_interner, file_func_ids)); - errors.extend(type_check_functions(&mut context.def_interner, file_method_ids)); - errors.extend(type_check_functions(&mut context.def_interner, file_trait_impls_ids)); + errors.extend(context.def_interner.check_for_dependency_cycles()); + + errors.extend(type_check_globals(&mut context.def_interner, resolved_globals.globals)); + errors.extend(type_check_functions(&mut context.def_interner, functions)); errors } } @@ -426,15 +450,15 @@ fn filter_literal_globals( fn type_check_globals( interner: &mut NodeInterner, - global_ids: Vec<(FileId, StmtId)>, + global_ids: Vec<(FileId, GlobalId)>, ) -> Vec<(CompilationError, fm::FileId)> { global_ids - .iter() - .flat_map(|(file_id, stmt_id)| { - TypeChecker::check_global(stmt_id, interner) + .into_iter() + .flat_map(|(file_id, global_id)| { + TypeChecker::check_global(global_id, interner) .iter() .cloned() - .map(|e| (e.into(), *file_id)) + .map(|e| (e.into(), file_id)) .collect::>() }) .collect() @@ -445,12 +469,12 @@ fn type_check_functions( file_func_ids: Vec<(FileId, FuncId)>, ) -> Vec<(CompilationError, fm::FileId)> { file_func_ids - .iter() + .into_iter() .flat_map(|(file, func)| { - type_check_func(interner, *func) + type_check_func(interner, func) .iter() .cloned() - .map(|e| (e.into(), *file)) + .map(|e| (e.into(), file)) .collect::>() }) .collect() diff --git a/compiler/noirc_frontend/src/hir/def_collector/dc_mod.rs b/compiler/noirc_frontend/src/hir/def_collector/dc_mod.rs index 3cd60c33b8b..77224cc311c 100644 --- a/compiler/noirc_frontend/src/hir/def_collector/dc_mod.rs +++ b/compiler/noirc_frontend/src/hir/def_collector/dc_mod.rs @@ -89,13 +89,12 @@ impl<'a> ModCollector<'a> { for global in globals { let name = global.pattern.name_ident().clone(); - // First create dummy function in the DefInterner - // So that we can get a StmtId - let stmt_id = context.def_interner.push_empty_global(); + let global_id = + context.def_interner.push_empty_global(name.clone(), self.module_id, self.file_id); // Add the statement to the scope so its path can be looked up later - let result = - self.def_collector.def_map.modules[self.module_id.0].declare_global(name, stmt_id); + let result = self.def_collector.def_map.modules[self.module_id.0] + .declare_global(name, global_id); if let Err((first_def, second_def)) = result { let err = DefCollectorErrorKind::Duplicate { @@ -109,7 +108,7 @@ impl<'a> ModCollector<'a> { self.def_collector.collected_globals.push(UnresolvedGlobal { file_id: self.file_id, module_id: self.module_id, - stmt_id, + global_id, stmt_def: global, }); } @@ -126,7 +125,7 @@ impl<'a> ModCollector<'a> { trait_id: None, }; - for method in r#impl.methods { + for (method, _) in r#impl.methods { let func_id = context.def_interner.push_empty_fn(); let location = Location::new(method.span(), self.file_id); context.def_interner.push_function(func_id, &method.def, module_id, location); @@ -440,11 +439,15 @@ impl<'a> ModCollector<'a> { } } TraitItem::Constant { name, .. } => { - let stmt_id = context.def_interner.push_empty_global(); + let global_id = context.def_interner.push_empty_global( + name.clone(), + trait_id.0.local_id, + self.file_id, + ); if let Err((first_def, second_def)) = self.def_collector.def_map.modules [trait_id.0.local_id.0] - .declare_global(name.clone(), stmt_id) + .declare_global(name.clone(), global_id) { let error = DefCollectorErrorKind::Duplicate { typ: DuplicateType::TraitAssociatedConst, diff --git a/compiler/noirc_frontend/src/hir/def_map/mod.rs b/compiler/noirc_frontend/src/hir/def_map/mod.rs index 8c985e88e0b..8721bdb6c3c 100644 --- a/compiler/noirc_frontend/src/hir/def_map/mod.rs +++ b/compiler/noirc_frontend/src/hir/def_map/mod.rs @@ -31,7 +31,7 @@ pub struct LocalModuleId(pub Index); impl LocalModuleId { pub fn dummy_id() -> LocalModuleId { - LocalModuleId(Index::from_raw_parts(std::usize::MAX, std::u64::MAX)) + LocalModuleId(Index::dummy()) } } @@ -135,6 +135,11 @@ impl CrateDefMap { pub fn modules(&self) -> &Arena { &self.modules } + + pub fn modules_mut(&mut self) -> &mut Arena { + &mut self.modules + } + pub fn krate(&self) -> CrateId { self.krate } diff --git a/compiler/noirc_frontend/src/hir/def_map/module_data.rs b/compiler/noirc_frontend/src/hir/def_map/module_data.rs index fbb5e5cf741..309618dd011 100644 --- a/compiler/noirc_frontend/src/hir/def_map/module_data.rs +++ b/compiler/noirc_frontend/src/hir/def_map/module_data.rs @@ -3,7 +3,7 @@ use std::collections::HashMap; use noirc_errors::Location; use crate::{ - node_interner::{FuncId, StmtId, StructId, TraitId, TypeAliasId}, + node_interner::{FuncId, GlobalId, StructId, TraitId, TypeAliasId}, Ident, }; @@ -76,7 +76,7 @@ impl ModuleData { self.definitions.remove_definition(name); } - pub fn declare_global(&mut self, name: Ident, id: StmtId) -> Result<(), (Ident, Ident)> { + pub fn declare_global(&mut self, name: Ident, id: GlobalId) -> Result<(), (Ident, Ident)> { self.declare(name, id.into(), None) } diff --git a/compiler/noirc_frontend/src/hir/def_map/module_def.rs b/compiler/noirc_frontend/src/hir/def_map/module_def.rs index 3e5629639fa..54d092f9515 100644 --- a/compiler/noirc_frontend/src/hir/def_map/module_def.rs +++ b/compiler/noirc_frontend/src/hir/def_map/module_def.rs @@ -1,4 +1,4 @@ -use crate::node_interner::{FuncId, StmtId, StructId, TraitId, TypeAliasId}; +use crate::node_interner::{FuncId, GlobalId, StructId, TraitId, TypeAliasId}; use super::ModuleId; @@ -10,7 +10,7 @@ pub enum ModuleDefId { TypeId(StructId), TypeAliasId(TypeAliasId), TraitId(TraitId), - GlobalId(StmtId), + GlobalId(GlobalId), } impl ModuleDefId { @@ -42,9 +42,9 @@ impl ModuleDefId { } } - pub fn as_global(&self) -> Option { + pub fn as_global(&self) -> Option { match self { - ModuleDefId::GlobalId(stmt_id) => Some(*stmt_id), + ModuleDefId::GlobalId(global_id) => Some(*global_id), _ => None, } } @@ -88,9 +88,9 @@ impl From for ModuleDefId { } } -impl From for ModuleDefId { - fn from(stmt_id: StmtId) -> Self { - ModuleDefId::GlobalId(stmt_id) +impl From for ModuleDefId { + fn from(global_id: GlobalId) -> Self { + ModuleDefId::GlobalId(global_id) } } @@ -162,13 +162,13 @@ impl TryFromModuleDefId for TraitId { } } -impl TryFromModuleDefId for StmtId { +impl TryFromModuleDefId for GlobalId { fn try_from(id: ModuleDefId) -> Option { id.as_global() } fn dummy_id() -> Self { - StmtId::dummy_id() + GlobalId::dummy_id() } fn description() -> String { diff --git a/compiler/noirc_frontend/src/hir/mod.rs b/compiler/noirc_frontend/src/hir/mod.rs index 2124b5281f4..00bcb0cdebf 100644 --- a/compiler/noirc_frontend/src/hir/mod.rs +++ b/compiler/noirc_frontend/src/hir/mod.rs @@ -4,6 +4,7 @@ pub mod resolution; pub mod scope; pub mod type_check; +use crate::debug::DebugInstrumenter; use crate::graph::{CrateGraph, CrateId}; use crate::hir_def::function::FuncMeta; use crate::node_interner::{FuncId, NodeInterner, StructId}; @@ -31,6 +32,8 @@ pub struct Context<'file_manager, 'parsed_files> { // is read-only however, once it has been passed to the Context. pub file_manager: Cow<'file_manager, FileManager>, + pub debug_instrumenter: DebugInstrumenter, + /// A map of each file that already has been visited from a prior `mod foo;` declaration. /// This is used to issue an error if a second `mod foo;` is declared to the same file. pub visited_files: BTreeMap, @@ -56,6 +59,7 @@ impl Context<'_, '_> { visited_files: BTreeMap::new(), crate_graph: CrateGraph::default(), file_manager: Cow::Owned(file_manager), + debug_instrumenter: DebugInstrumenter::default(), parsed_files: Cow::Owned(parsed_files), } } @@ -70,6 +74,7 @@ impl Context<'_, '_> { visited_files: BTreeMap::new(), crate_graph: CrateGraph::default(), file_manager: Cow::Borrowed(file_manager), + debug_instrumenter: DebugInstrumenter::default(), parsed_files: Cow::Borrowed(parsed_files), } } @@ -86,6 +91,10 @@ impl Context<'_, '_> { self.def_maps.get(crate_id) } + pub fn def_map_mut(&mut self, crate_id: &CrateId) -> Option<&mut CrateDefMap> { + self.def_maps.get_mut(crate_id) + } + /// Return the CrateId for each crate that has been compiled /// successfully pub fn crates(&self) -> impl Iterator + '_ { diff --git a/compiler/noirc_frontend/src/hir/resolution/errors.rs b/compiler/noirc_frontend/src/hir/resolution/errors.rs index 390807afd17..d2fe67da38c 100644 --- a/compiler/noirc_frontend/src/hir/resolution/errors.rs +++ b/compiler/noirc_frontend/src/hir/resolution/errors.rs @@ -84,6 +84,12 @@ pub enum ResolverError { InvalidTypeForEntryPoint { span: Span }, #[error("Nested slices are not supported")] NestedSlices { span: Span }, + #[error("#[recursive] attribute is only allowed on entry points to a program")] + MisplacedRecursiveAttribute { ident: Ident }, + #[error("Usage of the `#[foreign]` or `#[builtin]` function attributes are not allowed outside of the Noir standard library")] + LowLevelFunctionOutsideOfStdlib { ident: Ident }, + #[error("Dependency cycle found, '{item}' recursively depends on itself: {cycle} ")] + DependencyCycle { span: Span, item: String, cycle: String }, } impl ResolverError { @@ -138,33 +144,33 @@ impl From for Diagnostic { field.span(), ), ResolverError::NoSuchField { field, struct_definition } => { - let mut error = Diagnostic::simple_error( + Diagnostic::simple_error( format!("no such field {field} defined in struct {struct_definition}"), String::new(), field.span(), - ); - - error.add_secondary( - format!("{struct_definition} defined here with no {field} field"), - struct_definition.span(), - ); - error + ) } - ResolverError::MissingFields { span, missing_fields, struct_definition } => { + ResolverError::MissingFields { span, mut missing_fields, struct_definition } => { let plural = if missing_fields.len() != 1 { "s" } else { "" }; - let missing_fields = missing_fields.join(", "); + let remaining_fields_names = match &missing_fields[..] { + [field1] => field1.clone(), + [field1, field2] => format!("{field1} and {field2}"), + [field1, field2, field3] => format!("{field1}, {field2} and {field3}"), + _ => { + let len = missing_fields.len() - 3; + let len_plural = if len != 1 {"s"} else {""}; - let mut error = Diagnostic::simple_error( - format!("missing field{plural}: {missing_fields}"), + let truncated_fields = format!(" and {len} other field{len_plural}"); + missing_fields.truncate(3); + format!("{}{truncated_fields}", missing_fields.join(", ")) + } + }; + + Diagnostic::simple_error( + format!("missing field{plural} {remaining_fields_names} in struct {struct_definition}"), String::new(), span, - ); - - error.add_secondary( - format!("{struct_definition} defined here"), - struct_definition.span(), - ); - error + ) } ResolverError::UnnecessaryMut { first_mut, second_mut } => { let mut error = Diagnostic::simple_error( @@ -311,6 +317,30 @@ impl From for Diagnostic { "Try to use a constant sized array instead".into(), span, ), + ResolverError::MisplacedRecursiveAttribute { ident } => { + let name = &ident.0.contents; + + let mut diag = Diagnostic::simple_error( + format!("misplaced #[recursive] attribute on function {name} rather than the main function"), + "misplaced #[recursive] attribute".to_string(), + ident.0.span(), + ); + + diag.add_note("The `#[recursive]` attribute specifies to the backend whether it should use a prover which generates proofs that are friendly for recursive verification in another circuit".to_owned()); + diag + } + ResolverError::LowLevelFunctionOutsideOfStdlib { ident } => Diagnostic::simple_error( + "Definition of low-level function outside of standard library".into(), + "Usage of the `#[foreign]` or `#[builtin]` function attributes are not allowed outside of the Noir standard library".into(), + ident.span(), + ), + ResolverError::DependencyCycle { span, item, cycle } => { + Diagnostic::simple_error( + "Dependency cycle found".into(), + format!("'{item}' recursively depends on itself: {cycle}"), + span, + ) + }, } } } diff --git a/compiler/noirc_frontend/src/hir/resolution/globals.rs b/compiler/noirc_frontend/src/hir/resolution/globals.rs index b5aec212dbf..9fb31271727 100644 --- a/compiler/noirc_frontend/src/hir/resolution/globals.rs +++ b/compiler/noirc_frontend/src/hir/resolution/globals.rs @@ -6,13 +6,13 @@ use crate::{ def_map::ModuleId, Context, }, - node_interner::StmtId, + node_interner::GlobalId, }; use fm::FileId; use iter_extended::vecmap; pub(crate) struct ResolvedGlobals { - pub(crate) globals: Vec<(FileId, StmtId)>, + pub(crate) globals: Vec<(FileId, GlobalId)>, pub(crate) errors: Vec<(CompilationError, FileId)>, } @@ -40,16 +40,13 @@ pub(crate) fn resolve_globals( global.file_id, ); - let name = global.stmt_def.pattern.name_ident().clone(); - - let hir_stmt = resolver.resolve_global_let(global.stmt_def); + let hir_stmt = resolver.resolve_global_let(global.stmt_def, global.global_id); errors.extend(take_errors(global.file_id, resolver)); - context.def_interner.update_global(global.stmt_id, hir_stmt); - - context.def_interner.push_global(global.stmt_id, name, global.module_id); + let statement_id = context.def_interner.get_global(global.global_id).let_statement; + context.def_interner.replace_statement(statement_id, hir_stmt); - (global.file_id, global.stmt_id) + (global.file_id, global.global_id) }); ResolvedGlobals { globals, errors } } diff --git a/compiler/noirc_frontend/src/hir/resolution/resolver.rs b/compiler/noirc_frontend/src/hir/resolution/resolver.rs index df533f6a4ae..7f9e48353a7 100644 --- a/compiler/noirc_frontend/src/hir/resolution/resolver.rs +++ b/compiler/noirc_frontend/src/hir/resolution/resolver.rs @@ -28,8 +28,8 @@ use crate::graph::CrateId; use crate::hir::def_map::{LocalModuleId, ModuleDefId, TryFromModuleDefId, MAIN_FUNCTION}; use crate::hir_def::stmt::{HirAssignStatement, HirForStatement, HirLValue, HirPattern}; use crate::node_interner::{ - DefinitionId, DefinitionKind, ExprId, FuncId, NodeInterner, StmtId, StructId, TraitId, - TraitImplId, TraitMethodId, + DefinitionId, DefinitionKind, DependencyId, ExprId, FuncId, GlobalId, NodeInterner, StmtId, + StructId, TraitId, TraitImplId, TraitMethodId, TypeAliasId, }; use crate::{ hir::{def_map::CrateDefMap, resolution::path_resolver::PathResolver}, @@ -39,9 +39,9 @@ use crate::{ use crate::{ ArrayLiteral, ContractFunctionType, Distinctness, ForRange, FunctionDefinition, FunctionReturnType, FunctionVisibility, Generics, LValue, NoirStruct, NoirTypeAlias, Param, - Path, PathKind, Pattern, Shared, StructType, Type, TypeAliasType, TypeVariable, - TypeVariableKind, UnaryOp, UnresolvedGenerics, UnresolvedTraitConstraint, UnresolvedType, - UnresolvedTypeData, UnresolvedTypeExpression, Visibility, ERROR_IDENT, + Path, PathKind, Pattern, Shared, StructType, Type, TypeAlias, TypeVariable, TypeVariableKind, + UnaryOp, UnresolvedGenerics, UnresolvedTraitConstraint, UnresolvedType, UnresolvedTypeData, + UnresolvedTypeExpression, Visibility, ERROR_IDENT, }; use fm::FileId; use iter_extended::vecmap; @@ -93,6 +93,10 @@ pub struct Resolver<'a> { /// to the corresponding trait impl ID. current_trait_impl: Option, + /// The current dependency item we're resolving. + /// Used to link items to their dependencies in the dependency graph + current_item: Option, + /// True if the current module is a contract. /// This is usually determined by self.path_resolver.module_id(), but it can /// be overridden for impls. Impls are an odd case since the methods within resolve @@ -148,6 +152,7 @@ impl<'a> Resolver<'a> { errors: Vec::new(), lambda_stack: Vec::new(), current_trait_impl: None, + current_item: None, file, in_contract, } @@ -184,6 +189,7 @@ impl<'a> Resolver<'a> { func_id: FuncId, ) -> (HirFunction, FuncMeta, Vec) { self.scopes.start_function(); + self.current_item = Some(DependencyId::Function(func_id)); // Check whether the function has globals in the local module and add them to the scope self.resolve_local_globals(); @@ -191,10 +197,18 @@ impl<'a> Resolver<'a> { self.add_generics(&func.def.generics); self.trait_bounds = func.def.where_clause.clone(); + let is_low_level_or_oracle = func + .attributes() + .function + .as_ref() + .map_or(false, |func| func.is_low_level() || func.is_oracle()); let (hir_func, func_meta) = self.intern_function(func, func_id); let func_scope_tree = self.scopes.end_function(); - self.check_for_unused_variables_in_scope_tree(func_scope_tree); + // The arguments to low-level and oracle functions are always unused so we do not produce warnings for them. + if !is_low_level_or_oracle { + self.check_for_unused_variables_in_scope_tree(func_scope_tree); + } self.trait_bounds.clear(); (hir_func, func_meta, self.errors) @@ -330,21 +344,22 @@ impl<'a> Resolver<'a> { // This check is necessary to maintain the same definition ids in the interner. Currently, each function uses a new resolver that has its own ScopeForest and thus global scope. // We must first check whether an existing definition ID has been inserted as otherwise there will be multiple definitions for the same global statement. // This leads to an error in evaluation where the wrong definition ID is selected when evaluating a statement using the global. The check below prevents this error. - let mut stmt_id = None; + let mut global_id = None; let global = self.interner.get_all_globals(); - for (global_stmt_id, global_info) in global { + for global_info in global { if global_info.ident == name && global_info.local_id == self.path_resolver.local_module_id() { - stmt_id = Some(global_stmt_id); + global_id = Some(global_info.id); } } - let (ident, resolver_meta) = if let Some(id) = stmt_id { - let hir_let_stmt = self.interner.let_statement(&id); - let ident = hir_let_stmt.ident(); + let (ident, resolver_meta) = if let Some(id) = global_id { + let global = self.interner.get_global(id); + let hir_ident = HirIdent::non_trait_method(global.definition_id, global.location); + let ident = hir_ident.clone(); let resolver_meta = ResolverMeta { num_times_used: 0, ident, warn_if_unused: true }; - (hir_let_stmt.ident(), resolver_meta) + (hir_ident, resolver_meta) } else { let location = Location::new(name.span(), self.file); let id = @@ -406,7 +421,7 @@ impl<'a> Resolver<'a> { FunctionKind::Builtin | FunctionKind::LowLevel | FunctionKind::Oracle => { HirFunction::empty() } - FunctionKind::Normal => { + FunctionKind::Normal | FunctionKind::Recursive => { let expr_id = self.intern_block(func.def.body); self.interner.push_expr_location(expr_id, func.def.span, self.file); HirFunction::unchecked_from_expr(expr_id) @@ -475,7 +490,7 @@ impl<'a> Resolver<'a> { Unit => Type::Unit, Unspecified => Type::Error, Error => Type::Error, - Named(path, args) => self.resolve_named_type(path, args, new_variables), + Named(path, args, _) => self.resolve_named_type(path, args, new_variables), TraitAsType(path, args) => self.resolve_trait_as_type(path, args, new_variables), Tuple(fields) => { @@ -558,16 +573,19 @@ impl<'a> Resolver<'a> { let span = path.span(); let mut args = vecmap(args, |arg| self.resolve_type_inner(arg, new_variables)); - if let Some(type_alias_type) = self.lookup_type_alias(path.clone()) { - let expected_generic_count = type_alias_type.generics.len(); - let type_alias_string = type_alias_type.to_string(); - let id = type_alias_type.id; + if let Some(type_alias) = self.lookup_type_alias(path.clone()) { + let type_alias = type_alias.borrow(); + let expected_generic_count = type_alias.generics.len(); + let type_alias_string = type_alias.to_string(); + let id = type_alias.id; self.verify_generics_count(expected_generic_count, &mut args, span, || { type_alias_string }); - let result = self.interner.get_type_alias(id).get_type(&args); + if let Some(item) = self.current_item { + self.interner.add_type_alias_dependency(item, id); + } // Collecting Type Alias references [Location]s to be used by LSP in order // to resolve the definition of the type alias @@ -578,9 +596,8 @@ impl<'a> Resolver<'a> { // equal to another type alias. Fixing this fully requires an analysis to create a DFG // of definition ordering, but for now we have an explicit check here so that we at // least issue an error that the type was not found instead of silently passing. - if result != Type::Error { - return result; - } + let alias = self.interner.get_type_alias(id); + return Type::Alias(alias, args); } match self.lookup_struct_or_error(path) { @@ -591,6 +608,11 @@ impl<'a> Resolver<'a> { struct_type.borrow().to_string() }); + if let Some(current_item) = self.current_item { + let dependency_id = struct_type.borrow().id; + self.interner.add_type_dependency(current_item, dependency_id); + } + Type::Struct(struct_type, args) } None => Type::Error, @@ -643,7 +665,10 @@ impl<'a> Resolver<'a> { // If we cannot find a local generic of the same name, try to look up a global match self.path_resolver.resolve(self.def_maps, path.clone()) { Ok(ModuleDefId::GlobalId(id)) => { - Some(Type::Constant(self.eval_global_as_array_length(id))) + if let Some(current_item) = self.current_item { + self.interner.add_global_dependency(current_item, id); + } + Some(Type::Constant(self.eval_global_as_array_length(id, path))) } _ => None, } @@ -729,12 +754,15 @@ impl<'a> Resolver<'a> { resolved_type } - pub fn resolve_type_aliases( + pub fn resolve_type_alias( mut self, unresolved: NoirTypeAlias, + alias_id: TypeAliasId, ) -> (Type, Generics, Vec) { let generics = self.add_generics(&unresolved.generics); self.resolve_local_globals(); + + self.current_item = Some(DependencyId::Alias(alias_id)); let typ = self.resolve_type(unresolved.typ); (typ, generics, self.errors) @@ -822,23 +850,27 @@ impl<'a> Resolver<'a> { pub fn resolve_struct_fields( mut self, unresolved: NoirStruct, + struct_id: StructId, ) -> (Generics, Vec<(Ident, Type)>, Vec) { let generics = self.add_generics(&unresolved.generics); // Check whether the struct definition has globals in the local module and add them to the scope self.resolve_local_globals(); + self.current_item = Some(DependencyId::Struct(struct_id)); let fields = vecmap(unresolved.fields, |(ident, typ)| (ident, self.resolve_type(typ))); (generics, fields, self.errors) } fn resolve_local_globals(&mut self) { - for (stmt_id, global_info) in self.interner.get_all_globals() { - if global_info.local_id == self.path_resolver.local_module_id() { - let global_stmt = self.interner.let_statement(&stmt_id); - let definition = DefinitionKind::Global(global_stmt.expression); - self.add_global_variable_decl(global_info.ident, definition); + let globals = vecmap(self.interner.get_all_globals(), |global| { + (global.id, global.local_id, global.ident.clone()) + }); + for (id, local_module_id, name) in globals { + if local_module_id == self.path_resolver.local_module_id() { + let definition = DefinitionKind::Global(id); + self.add_global_variable_decl(name, definition); } } } @@ -900,6 +932,13 @@ impl<'a> Resolver<'a> { position: PubPosition::ReturnType, }); } + let is_low_level_function = + func.attributes().function.as_ref().map_or(false, |func| func.is_low_level()); + if !self.path_resolver.module_id().krate.is_stdlib() && is_low_level_function { + let error = + ResolverError::LowLevelFunctionOutsideOfStdlib { ident: func.name_ident().clone() }; + self.push_err(error); + } // 'pub' is required on return types for entry point functions if self.is_entry_point_function(func) @@ -908,6 +947,12 @@ impl<'a> Resolver<'a> { { self.push_err(ResolverError::NecessaryPub { ident: func.name_ident().clone() }); } + // '#[recursive]' attribute is only allowed for entry point functions + if !self.is_entry_point_function(func) && func.kind == FunctionKind::Recursive { + self.push_err(ResolverError::MisplacedRecursiveAttribute { + ident: func.name_ident().clone(), + }); + } if !self.distinct_allowed(func) && func.def.return_distinctness != Distinctness::DuplicationAllowed @@ -1080,6 +1125,17 @@ impl<'a> Resolver<'a> { } } } + Type::Alias(alias, generics) => { + for (i, generic) in generics.iter().enumerate() { + if let Type::NamedGeneric(type_variable, name) = generic { + if alias.borrow().generic_is_numeric(i) { + found.insert(name.to_string(), type_variable.clone()); + } + } else { + Self::find_numeric_generics_in_type(generic, found); + } + } + } Type::MutableReference(element) => Self::find_numeric_generics_in_type(element, found), Type::String(length) => { if let Type::NamedGeneric(type_variable, name) = length.as_ref() { @@ -1095,9 +1151,15 @@ impl<'a> Resolver<'a> { } } - pub fn resolve_global_let(&mut self, let_stmt: crate::LetStatement) -> HirStatement { + pub fn resolve_global_let( + &mut self, + let_stmt: crate::LetStatement, + global_id: GlobalId, + ) -> HirStatement { + self.current_item = Some(DependencyId::Global(global_id)); let expression = self.resolve_expression(let_stmt.expression); - let definition = DefinitionKind::Global(expression); + let global_id = self.interner.next_global_id(); + let definition = DefinitionKind::Global(global_id); HirStatement::Let(HirLetStatement { pattern: self.resolve_pattern(let_stmt.pattern, definition), @@ -1118,9 +1180,16 @@ impl<'a> Resolver<'a> { }) } StatementKind::Constrain(constrain_stmt) => { + let span = constrain_stmt.0.span; + let assert_msg_call_expr_id = + self.resolve_assert_message(constrain_stmt.1, span, constrain_stmt.0.clone()); let expr_id = self.resolve_expression(constrain_stmt.0); - let assert_message = constrain_stmt.1; - HirStatement::Constrain(HirConstrainStatement(expr_id, self.file, assert_message)) + + HirStatement::Constrain(HirConstrainStatement( + expr_id, + self.file, + assert_msg_call_expr_id, + )) } StatementKind::Expression(expr) => { HirStatement::Expression(self.resolve_expression(expr)) @@ -1169,6 +1238,44 @@ impl<'a> Resolver<'a> { } } + fn resolve_assert_message( + &mut self, + assert_message_expr: Option, + span: Span, + condition: Expression, + ) -> Option { + let assert_message_expr = assert_message_expr?; + + if matches!( + assert_message_expr, + Expression { kind: ExpressionKind::Literal(Literal::Str(..)), .. } + ) { + return Some(self.resolve_expression(assert_message_expr)); + } + + let is_in_stdlib = self.path_resolver.module_id().krate.is_stdlib(); + let assert_msg_call_path = if is_in_stdlib { + ExpressionKind::Variable(Path { + segments: vec![Ident::from("resolve_assert_message")], + kind: PathKind::Crate, + span, + }) + } else { + ExpressionKind::Variable(Path { + segments: vec![Ident::from("std"), Ident::from("resolve_assert_message")], + kind: PathKind::Dep, + span, + }) + }; + let assert_msg_call_args = vec![assert_message_expr.clone(), condition]; + let assert_msg_call_expr = Expression::call( + Expression { kind: assert_msg_call_path, span }, + assert_msg_call_args, + span, + ); + Some(self.resolve_expression(assert_msg_call_expr)) + } + pub fn intern_stmt(&mut self, stmt: StatementKind) -> StmtId { let hir_stmt = self.resolve_stmt(stmt); self.interner.push_stmt(hir_stmt) @@ -1331,6 +1438,10 @@ impl<'a> Resolver<'a> { if hir_ident.id != DefinitionId::dummy_id() { match self.interner.definition(hir_ident.id).kind { DefinitionKind::Function(id) => { + if let Some(current_item) = self.current_item { + self.interner.add_function_dependency(current_item, id); + } + if self.interner.function_visibility(id) != FunctionVisibility::Public { @@ -1342,14 +1453,18 @@ impl<'a> Resolver<'a> { ); } } - DefinitionKind::Global(_) => {} + DefinitionKind::Global(global_id) => { + if let Some(current_item) = self.current_item { + self.interner.add_global_dependency(current_item, global_id); + } + } DefinitionKind::GenericType(_) => { // Initialize numeric generics to a polymorphic integer type in case // they're used in expressions. We must do this here since the type // checker does not check definition kinds and otherwise expects // parameters to already be typed. - if self.interner.id_type(hir_ident.id) == Type::Error { - let typ = Type::polymorphic_integer(self.interner); + if self.interner.definition_type(hir_ident.id) == Type::Error { + let typ = Type::polymorphic_integer_or_field(self.interner); self.interner.push_definition_type(hir_ident.id, typ); } } @@ -1514,19 +1629,21 @@ impl<'a> Resolver<'a> { let id = self.add_variable_decl(name, mutable.is_some(), true, definition); HirPattern::Identifier(id) } - Pattern::Mutable(pattern, span) => { + Pattern::Mutable(pattern, span, _) => { if let Some(first_mut) = mutable { self.push_err(ResolverError::UnnecessaryMut { first_mut, second_mut: span }); } let pattern = self.resolve_pattern_mutable(*pattern, Some(span), definition); - HirPattern::Mutable(Box::new(pattern), span) + let location = Location::new(span, self.file); + HirPattern::Mutable(Box::new(pattern), location) } Pattern::Tuple(fields, span) => { let fields = vecmap(fields, |field| { self.resolve_pattern_mutable(field, mutable, definition.clone()) }); - HirPattern::Tuple(fields, span) + let location = Location::new(span, self.file); + HirPattern::Tuple(fields, location) } Pattern::Struct(name, fields, span) => { let error_identifier = |this: &mut Self| { @@ -1555,7 +1672,8 @@ impl<'a> Resolver<'a> { let fields = self.resolve_constructor_fields(typ, fields, span, resolve_field); let typ = Type::Struct(struct_type, generics); - HirPattern::Struct(typ, fields, span) + let location = Location::new(span, self.file); + HirPattern::Struct(typ, fields, location) } } } @@ -1635,8 +1753,8 @@ impl<'a> Resolver<'a> { } if let Some(global) = TryFromModuleDefId::try_from(id) { - let let_stmt = self.interner.let_statement(&global); - return Ok(let_stmt.ident().id); + let global = self.interner.get_global(global); + return Ok(global.definition_id); } let expected = "global variable".into(); @@ -1689,7 +1807,7 @@ impl<'a> Resolver<'a> { } } - fn lookup_type_alias(&mut self, path: Path) -> Option<&TypeAliasType> { + fn lookup_type_alias(&mut self, path: Path) -> Option> { self.lookup(path).ok().map(|id| self.interner.get_type_alias(id)) } @@ -1760,7 +1878,7 @@ impl<'a> Resolver<'a> { } for UnresolvedTraitConstraint { typ, trait_bound } in self.trait_bounds.clone() { - if let UnresolvedTypeData::Named(constraint_path, _) = &typ.typ { + if let UnresolvedTypeData::Named(constraint_path, _, _) = &typ.typ { // if `path` is `T::method_name`, we're looking for constraint of the form `T: SomeTrait` if constraint_path.segments.len() == 1 && path.segments[0] != constraint_path.last_segment() @@ -1818,10 +1936,11 @@ impl<'a> Resolver<'a> { self.interner.push_expr(hir_block) } - fn eval_global_as_array_length(&mut self, global: StmtId) -> u64 { - let stmt = match self.interner.statement(&global) { - HirStatement::Let(let_expr) => let_expr, - _ => return 0, + fn eval_global_as_array_length(&mut self, global: GlobalId, path: &Path) -> u64 { + let Some(stmt) = self.interner.get_global_let_statement(global) else { + let path = path.clone(); + self.push_err(ResolverError::NoSuchNumericTypeVariable { path }); + return 0; }; let length = stmt.expression; @@ -1921,7 +2040,7 @@ impl<'a> Resolver<'a> { self.push_err(ResolverError::InvalidTypeForEntryPoint { span }); } } - UnresolvedTypeData::Named(path, generics) => { + UnresolvedTypeData::Named(path, generics, _) => { // Since the type is named, we need to resolve it to see what it actually refers to // in order to check whether it is valid. Since resolving it may lead to a // resolution error, we have to truncate our error count to the previous count just diff --git a/compiler/noirc_frontend/src/hir/resolution/structs.rs b/compiler/noirc_frontend/src/hir/resolution/structs.rs index cf3e3436c88..ed7aa86e718 100644 --- a/compiler/noirc_frontend/src/hir/resolution/structs.rs +++ b/compiler/noirc_frontend/src/hir/resolution/structs.rs @@ -32,7 +32,8 @@ pub(crate) fn resolve_structs( // Each struct should already be present in the NodeInterner after def collection. for (type_id, typ) in structs { let file_id = typ.file_id; - let (generics, fields, resolver_errors) = resolve_struct_fields(context, crate_id, typ); + let (generics, fields, resolver_errors) = + resolve_struct_fields(context, crate_id, type_id, typ); errors.extend(vecmap(resolver_errors, |err| (err.into(), file_id))); context.def_interner.update_struct(type_id, |struct_def| { struct_def.set_fields(fields); @@ -67,6 +68,7 @@ pub(crate) fn resolve_structs( fn resolve_struct_fields( context: &mut Context, krate: CrateId, + type_id: StructId, unresolved: UnresolvedStruct, ) -> (Generics, Vec<(Ident, Type)>, Vec) { let path_resolver = @@ -74,7 +76,7 @@ fn resolve_struct_fields( let file_id = unresolved.file_id; let (generics, fields, errors) = Resolver::new(&mut context.def_interner, &path_resolver, &context.def_maps, file_id) - .resolve_struct_fields(unresolved.struct_def); + .resolve_struct_fields(unresolved.struct_def, type_id); (generics, fields, errors) } diff --git a/compiler/noirc_frontend/src/hir/resolution/type_aliases.rs b/compiler/noirc_frontend/src/hir/resolution/type_aliases.rs index f66f6c8dfa7..2e5ce611a7f 100644 --- a/compiler/noirc_frontend/src/hir/resolution/type_aliases.rs +++ b/compiler/noirc_frontend/src/hir/resolution/type_aliases.rs @@ -17,7 +17,7 @@ pub(crate) fn resolve_type_aliases( crate_id: CrateId, ) -> Vec<(CompilationError, FileId)> { let mut errors: Vec<(CompilationError, FileId)> = vec![]; - for (type_id, unresolved_typ) in type_aliases { + for (alias_id, unresolved_typ) in type_aliases { let path_resolver = StandardPathResolver::new(ModuleId { local_id: unresolved_typ.module_id, krate: crate_id, @@ -25,9 +25,9 @@ pub(crate) fn resolve_type_aliases( let file = unresolved_typ.file_id; let (typ, generics, resolver_errors) = Resolver::new(&mut context.def_interner, &path_resolver, &context.def_maps, file) - .resolve_type_aliases(unresolved_typ.type_alias_def); + .resolve_type_alias(unresolved_typ.type_alias_def, alias_id); errors.extend(resolver_errors.iter().cloned().map(|e| (e.into(), file))); - context.def_interner.set_type_alias(type_id, typ, generics); + context.def_interner.set_type_alias(alias_id, typ, generics); } errors } diff --git a/compiler/noirc_frontend/src/hir/type_check/errors.rs b/compiler/noirc_frontend/src/hir/type_check/errors.rs index 267dbd6b5be..96d30100d8b 100644 --- a/compiler/noirc_frontend/src/hir/type_check/errors.rs +++ b/compiler/noirc_frontend/src/hir/type_check/errors.rs @@ -8,6 +8,7 @@ use crate::hir_def::expr::HirBinaryOp; use crate::hir_def::types::Type; use crate::BinaryOpKind; use crate::FunctionReturnType; +use crate::IntegerBitSize; use crate::Signedness; #[derive(Error, Debug, Clone, PartialEq, Eq)] @@ -67,7 +68,7 @@ pub enum TypeCheckError { #[error("Integers must have the same signedness LHS is {sign_x:?}, RHS is {sign_y:?}")] IntegerSignedness { sign_x: Signedness, sign_y: Signedness, span: Span }, #[error("Integers must have the same bit width LHS is {bit_width_x}, RHS is {bit_width_y}")] - IntegerBitWidth { bit_width_x: u32, bit_width_y: u32, span: Span }, + IntegerBitWidth { bit_width_x: IntegerBitSize, bit_width_y: IntegerBitSize, span: Span }, #[error("{kind} cannot be used in an infix operation")] InvalidInfixOp { kind: &'static str, span: Span }, #[error("{kind} cannot be used in a unary operation")] @@ -115,6 +116,12 @@ pub enum TypeCheckError { NoMatchingImplFound { constraints: Vec<(Type, String)>, span: Span }, #[error("Constraint for `{typ}: {trait_name}` is not needed, another matching impl is already in scope")] UnneededTraitConstraint { trait_name: String, typ: Type, span: Span }, + #[error( + "Cannot pass a mutable reference from a constrained runtime to an unconstrained runtime" + )] + ConstrainedReferenceToUnconstrained { span: Span }, + #[error("Slices cannot be returned from an unconstrained runtime to a constrained runtime")] + UnconstrainedSliceReturnToConstrained { span: Span }, } impl TypeCheckError { @@ -202,7 +209,9 @@ impl From for Diagnostic { | TypeCheckError::AmbiguousBitWidth { span, .. } | TypeCheckError::IntegerAndFieldBinaryOperation { span } | TypeCheckError::OverflowingAssignment { span, .. } - | TypeCheckError::FieldModulo { span } => { + | TypeCheckError::FieldModulo { span } + | TypeCheckError::ConstrainedReferenceToUnconstrained { span } + | TypeCheckError::UnconstrainedSliceReturnToConstrained { span } => { Diagnostic::simple_error(error.to_string(), String::new(), span) } TypeCheckError::PublicReturnType { typ, span } => Diagnostic::simple_error( diff --git a/compiler/noirc_frontend/src/hir/type_check/expr.rs b/compiler/noirc_frontend/src/hir/type_check/expr.rs index 5885707a9b7..7b854e58fca 100644 --- a/compiler/noirc_frontend/src/hir/type_check/expr.rs +++ b/compiler/noirc_frontend/src/hir/type_check/expr.rs @@ -36,6 +36,18 @@ impl<'interner> TypeChecker<'interner> { } } + fn is_unconstrained_call(&self, expr: &ExprId) -> bool { + if let HirExpression::Ident(expr::HirIdent { id, .. }) = self.interner.expression(expr) { + if let Some(DefinitionKind::Function(func_id)) = + self.interner.try_definition(id).map(|def| &def.kind) + { + let modifiers = self.interner.function_modifiers(func_id); + return modifiers.is_unconstrained; + } + } + false + } + /// Infers a type for a given expression, and return this type. /// As a side-effect, this function will also remember this type in the NodeInterner /// for the given expr_id key. @@ -53,7 +65,7 @@ impl<'interner> TypeChecker<'interner> { let elem_types = vecmap(&arr, |arg| self.check_expression(arg)); let first_elem_type = elem_types - .get(0) + .first() .cloned() .unwrap_or_else(|| self.interner.next_type_variable()); @@ -92,7 +104,7 @@ impl<'interner> TypeChecker<'interner> { Type::Array(Box::new(length), Box::new(elem_type)) } HirLiteral::Bool(_) => Type::Bool, - HirLiteral::Integer(_, _) => Type::polymorphic_integer(self.interner), + HirLiteral::Integer(_, _) => Type::polymorphic_integer_or_field(self.interner), HirLiteral::Str(string) => { let len = Type::Constant(string.len() as u64); Type::String(Box::new(len)) @@ -139,6 +151,14 @@ impl<'interner> TypeChecker<'interner> { } HirExpression::Index(index_expr) => self.check_index_expression(expr_id, index_expr), HirExpression::Call(call_expr) => { + // Need to setup these flags here as `self` is borrowed mutably to type check the rest of the call expression + // These flags are later used to type check calls to unconstrained functions from constrained functions + let current_func = self.current_function; + let func_mod = current_func.map(|func| self.interner.function_modifiers(&func)); + let is_current_func_constrained = + func_mod.map_or(true, |func_mod| !func_mod.is_unconstrained); + let is_unconstrained_call = self.is_unconstrained_call(&call_expr.func); + self.check_if_deprecated(&call_expr.func); let function = self.check_expression(&call_expr.func); @@ -147,8 +167,34 @@ impl<'interner> TypeChecker<'interner> { let typ = self.check_expression(arg); (typ, *arg, self.interner.expr_span(arg)) }); + + // Check that we are not passing a mutable reference from a constrained runtime to an unconstrained runtime + if is_current_func_constrained && is_unconstrained_call { + for (typ, _, _) in args.iter() { + if matches!(&typ.follow_bindings(), Type::MutableReference(_)) { + self.errors.push(TypeCheckError::ConstrainedReferenceToUnconstrained { + span: self.interner.expr_span(expr_id), + }); + return Type::Error; + } + } + } + let span = self.interner.expr_span(expr_id); - self.bind_function_type(function, args, span) + let return_type = self.bind_function_type(function, args, span); + + // Check that we are not passing a slice from an unconstrained runtime to a constrained runtime + if is_current_func_constrained + && is_unconstrained_call + && return_type.contains_slice() + { + self.errors.push(TypeCheckError::UnconstrainedSliceReturnToConstrained { + span: self.interner.expr_span(expr_id), + }); + return Type::Error; + } + + return_type } HirExpression::MethodCall(mut method_call) => { let mut object_type = self.check_expression(&method_call.object).follow_bindings(); @@ -238,8 +284,9 @@ impl<'interner> TypeChecker<'interner> { Type::Tuple(vecmap(&elements, |elem| self.check_expression(elem))) } HirExpression::Lambda(lambda) => { - let captured_vars = - vecmap(lambda.captures, |capture| self.interner.id_type(capture.ident.id)); + let captured_vars = vecmap(lambda.captures, |capture| { + self.interner.definition_type(capture.ident.id) + }); let env_type: Type = if captured_vars.is_empty() { Type::Unit } else { Type::Tuple(captured_vars) }; @@ -262,7 +309,7 @@ impl<'interner> TypeChecker<'interner> { } }; - self.interner.push_expr_type(expr_id, typ.clone()); + self.interner.push_expr_type(*expr_id, typ.clone()); typ } @@ -279,7 +326,10 @@ impl<'interner> TypeChecker<'interner> { assert_eq!(the_trait.generics.len(), constraint.trait_generics.len()); for (param, arg) in the_trait.generics.iter().zip(&constraint.trait_generics) { - bindings.insert(param.id(), (param.clone(), arg.clone())); + // Avoid binding t = t + if !arg.occurs(param.id()) { + bindings.insert(param.id(), (param.clone(), arg.clone())); + } } } @@ -413,7 +463,7 @@ impl<'interner> TypeChecker<'interner> { operator: UnaryOp::MutableReference, rhs: method_call.object, })); - self.interner.push_expr_type(&new_object, new_type); + self.interner.push_expr_type(new_object, new_type); self.interner.push_expr_location(new_object, location.span, location.file); new_object }); @@ -439,7 +489,7 @@ impl<'interner> TypeChecker<'interner> { operator: UnaryOp::Dereference { implicitly_added: true }, rhs: object, })); - self.interner.push_expr_type(&object, element.as_ref().clone()); + self.interner.push_expr_type(object, element.as_ref().clone()); self.interner.push_expr_location(object, location.span, location.file); // Recursively dereference to allow for converting &mut &mut T to T @@ -482,13 +532,15 @@ impl<'interner> TypeChecker<'interner> { let index_type = self.check_expression(&index_expr.index); let span = self.interner.expr_span(&index_expr.index); - index_type.unify(&Type::polymorphic_integer(self.interner), &mut self.errors, || { - TypeCheckError::TypeMismatch { + index_type.unify( + &Type::polymorphic_integer_or_field(self.interner), + &mut self.errors, + || TypeCheckError::TypeMismatch { expected_typ: "an integer".to_owned(), expr_typ: index_type.to_string(), expr_span: span, - } - }); + }, + ); // When writing `a[i]`, if `a : &mut ...` then automatically dereference `a` as many // times as needed to get the underlying array. @@ -519,6 +571,7 @@ impl<'interner> TypeChecker<'interner> { Type::Integer(..) | Type::FieldElement | Type::TypeVariable(_, TypeVariableKind::IntegerOrField) + | Type::TypeVariable(_, TypeVariableKind::Integer) | Type::Bool => (), Type::TypeVariable(_, _) => { @@ -635,8 +688,8 @@ impl<'interner> TypeChecker<'interner> { operator: crate::UnaryOp::Dereference { implicitly_added: true }, rhs: old_lhs, })); - this.interner.push_expr_type(&old_lhs, lhs_type); - this.interner.push_expr_type(access_lhs, element); + this.interner.push_expr_type(old_lhs, lhs_type); + this.interner.push_expr_type(*access_lhs, element); let old_location = this.interner.id_location(old_lhs); this.interner.push_expr_location(*access_lhs, span, old_location.file); @@ -759,37 +812,17 @@ impl<'interner> TypeChecker<'interner> { // Matches on TypeVariable must be first to follow any type // bindings. - (TypeVariable(int, _), other) | (other, TypeVariable(int, _)) => { - if let TypeBinding::Bound(binding) = &*int.borrow() { + (TypeVariable(var, _), other) | (other, TypeVariable(var, _)) => { + if let TypeBinding::Bound(binding) = &*var.borrow() { return self.comparator_operand_type_rules(other, binding, op, span); } - if !op.kind.is_valid_for_field_type() && (other.is_bindable() || other.is_field()) { - let other = other.follow_bindings(); - - self.push_delayed_type_check(Box::new(move || { - if other.is_field() || other.is_bindable() { - Err(TypeCheckError::InvalidComparisonOnField { span }) - } else { - Ok(()) - } - })); - } - - let mut bindings = TypeBindings::new(); - if other.try_bind_to_polymorphic_int(int, &mut bindings).is_ok() - || other == &Type::Error - { - Type::apply_type_bindings(bindings); - Ok((Bool, false)) - } else { - Err(TypeCheckError::TypeMismatchWithSource { - expected: lhs_type.clone(), - actual: rhs_type.clone(), - span, - source: Source::Binary, - }) - } + self.bind_type_variables_for_infix(lhs_type, op, rhs_type, span); + Ok((Bool, false)) + } + (Alias(alias, args), other) | (other, Alias(alias, args)) => { + let alias = alias.borrow().get_type(args); + self.comparator_operand_type_rules(&alias, other, op, span) } (Integer(sign_x, bit_width_x), Integer(sign_y, bit_width_y)) => { if sign_x != sign_y { @@ -1013,6 +1046,38 @@ impl<'interner> TypeChecker<'interner> { } } + fn bind_type_variables_for_infix( + &mut self, + lhs_type: &Type, + op: &HirBinaryOp, + rhs_type: &Type, + span: Span, + ) { + self.unify(lhs_type, rhs_type, || TypeCheckError::TypeMismatchWithSource { + expected: lhs_type.clone(), + actual: rhs_type.clone(), + source: Source::Binary, + span, + }); + + // In addition to unifying both types, we also have to bind either + // the lhs or rhs to an integer type variable. This ensures if both lhs + // and rhs are type variables, that they will have the correct integer + // type variable kind instead of TypeVariableKind::Normal. + let target = if op.kind.is_valid_for_field_type() { + Type::polymorphic_integer_or_field(self.interner) + } else { + Type::polymorphic_integer(self.interner) + }; + + self.unify(lhs_type, &target, || TypeCheckError::TypeMismatchWithSource { + expected: lhs_type.clone(), + actual: rhs_type.clone(), + source: Source::Binary, + span, + }); + } + // Given a binary operator and another type. This method will produce the output type // and a boolean indicating whether to use the trait impl corresponding to the operator // or not. A value of false indicates the caller to use a primitive operation for this @@ -1039,48 +1104,15 @@ impl<'interner> TypeChecker<'interner> { if let TypeBinding::Bound(binding) = &*int.borrow() { return self.infix_operand_type_rules(binding, op, other, span); } - if (op.is_modulo() || op.is_bitwise()) && (other.is_bindable() || other.is_field()) - { - let other = other.follow_bindings(); - let kind = op.kind; - // This will be an error if these types later resolve to a Field, or stay - // polymorphic as the bit size will be unknown. Delay this error until the function - // finishes resolving so we can still allow cases like `let x: u8 = 1 << 2;`. - self.push_delayed_type_check(Box::new(move || { - if other.is_field() { - if kind == BinaryOpKind::Modulo { - Err(TypeCheckError::FieldModulo { span }) - } else { - Err(TypeCheckError::InvalidBitwiseOperationOnField { span }) - } - } else if other.is_bindable() { - Err(TypeCheckError::AmbiguousBitWidth { span }) - } else if kind.is_bit_shift() && other.is_signed() { - Err(TypeCheckError::TypeCannotBeUsed { - typ: other, - place: "bit shift", - span, - }) - } else { - Ok(()) - } - })); - } - let mut bindings = TypeBindings::new(); - if other.try_bind_to_polymorphic_int(int, &mut bindings).is_ok() - || other == &Type::Error - { - Type::apply_type_bindings(bindings); - Ok((other.clone(), false)) - } else { - Err(TypeCheckError::TypeMismatchWithSource { - expected: lhs_type.clone(), - actual: rhs_type.clone(), - source: Source::Binary, - span, - }) - } + self.bind_type_variables_for_infix(lhs_type, op, rhs_type, span); + + // Both types are unified so the choice of which to return is arbitrary + Ok((other.clone(), false)) + } + (Alias(alias, args), other) | (other, Alias(alias, args)) => { + let alias = alias.borrow().get_type(args); + self.infix_operand_type_rules(&alias, op, other, span) } (Integer(sign_x, bit_width_x), Integer(sign_y, bit_width_y)) => { if sign_x != sign_y { @@ -1101,11 +1133,12 @@ impl<'interner> TypeChecker<'interner> { } // The result of two Fields is always a witness (FieldElement, FieldElement) => { - if op.is_bitwise() { - return Err(TypeCheckError::InvalidBitwiseOperationOnField { span }); - } - if op.is_modulo() { - return Err(TypeCheckError::FieldModulo { span }); + if !op.kind.is_valid_for_field_type() { + if op.kind == BinaryOpKind::Modulo { + return Err(TypeCheckError::FieldModulo { span }); + } else { + return Err(TypeCheckError::InvalidBitwiseOperationOnField { span }); + } } Ok((FieldElement, false)) } @@ -1145,7 +1178,7 @@ impl<'interner> TypeChecker<'interner> { self.errors .push(TypeCheckError::InvalidUnaryOp { kind: rhs_type.to_string(), span }); } - let expected = Type::polymorphic_integer(self.interner); + let expected = Type::polymorphic_integer_or_field(self.interner); rhs_type.unify(&expected, &mut self.errors, || TypeCheckError::InvalidUnaryOp { kind: rhs_type.to_string(), span, diff --git a/compiler/noirc_frontend/src/hir/type_check/mod.rs b/compiler/noirc_frontend/src/hir/type_check/mod.rs index 3c2a970ee84..21d1c75a0f2 100644 --- a/compiler/noirc_frontend/src/hir/type_check/mod.rs +++ b/compiler/noirc_frontend/src/hir/type_check/mod.rs @@ -15,16 +15,13 @@ pub use errors::TypeCheckError; use crate::{ hir_def::{expr::HirExpression, stmt::HirStatement, traits::TraitConstraint}, - node_interner::{ExprId, FuncId, NodeInterner, StmtId}, + node_interner::{ExprId, FuncId, GlobalId, NodeInterner}, Type, }; use self::errors::Source; -type TypeCheckFn = Box Result<(), TypeCheckError>>; - pub struct TypeChecker<'interner> { - delayed_type_checks: Vec, interner: &'interner mut NodeInterner, errors: Vec, current_function: Option, @@ -80,15 +77,7 @@ pub fn type_check_func(interner: &mut NodeInterner, func_id: FuncId) -> Vec (noirc_e impl<'interner> TypeChecker<'interner> { fn new(interner: &'interner mut NodeInterner) -> Self { - Self { - delayed_type_checks: Vec::new(), - interner, - errors: Vec::new(), - trait_constraints: Vec::new(), - current_function: None, - } - } - - pub fn push_delayed_type_check(&mut self, f: TypeCheckFn) { - self.delayed_type_checks.push(f); + Self { interner, errors: Vec::new(), trait_constraints: Vec::new(), current_function: None } } - fn check_function_body(&mut self, body: &ExprId) -> (Type, Vec) { - let body_type = self.check_expression(body); - (body_type, std::mem::take(&mut self.delayed_type_checks)) + fn check_function_body(&mut self, body: &ExprId) -> Type { + self.check_expression(body) } - pub fn check_global(id: &StmtId, interner: &'interner mut NodeInterner) -> Vec { + pub fn check_global( + id: GlobalId, + interner: &'interner mut NodeInterner, + ) -> Vec { let mut this = Self { - delayed_type_checks: Vec::new(), interner, errors: Vec::new(), trait_constraints: Vec::new(), current_function: None, }; - this.check_statement(id); + let statement = this.interner.get_global(id).let_statement; + this.check_statement(&statement); this.errors } @@ -454,7 +435,7 @@ mod test { } fn local_module_id(&self) -> LocalModuleId { - LocalModuleId(arena::Index::from_raw_parts(0, 0)) + LocalModuleId(arena::Index::unsafe_zeroed()) } fn module_id(&self) -> ModuleId { @@ -505,7 +486,7 @@ mod test { let mut def_maps = BTreeMap::new(); let file = FileId::default(); - let mut modules = arena::Arena::new(); + let mut modules = arena::Arena::default(); let location = Location::new(Default::default(), file); modules.insert(ModuleData::new(None, location, false)); diff --git a/compiler/noirc_frontend/src/hir/type_check/stmt.rs b/compiler/noirc_frontend/src/hir/type_check/stmt.rs index fd8ae62d34e..358bea86922 100644 --- a/compiler/noirc_frontend/src/hir/type_check/stmt.rs +++ b/compiler/noirc_frontend/src/hir/type_check/stmt.rs @@ -8,6 +8,7 @@ use crate::hir_def::stmt::{ }; use crate::hir_def::types::Type; use crate::node_interner::{DefinitionId, ExprId, StmtId}; +use crate::UnaryOp; use super::errors::{Source, TypeCheckError}; use super::TypeChecker; @@ -72,13 +73,10 @@ impl<'interner> TypeChecker<'interner> { let expected_type = Type::polymorphic_integer(self.interner); - self.unify(&start_range_type, &expected_type, || { - TypeCheckError::TypeCannotBeUsed { - typ: start_range_type.clone(), - place: "for loop", - span: range_span, - } - .add_context("The range of a loop must be known at compile-time") + self.unify(&start_range_type, &expected_type, || TypeCheckError::TypeCannotBeUsed { + typ: start_range_type.clone(), + place: "for loop", + span: range_span, }); self.interner.push_definition_type(for_loop.identifier.id, start_range_type); @@ -92,7 +90,7 @@ impl<'interner> TypeChecker<'interner> { match pattern { HirPattern::Identifier(ident) => self.interner.push_definition_type(ident.id, typ), HirPattern::Mutable(pattern, _) => self.bind_pattern(pattern, typ), - HirPattern::Tuple(fields, span) => match typ { + HirPattern::Tuple(fields, location) => match typ.follow_bindings() { Type::Tuple(field_types) if field_types.len() == fields.len() => { for (field, field_type) in fields.iter().zip(field_types) { self.bind_pattern(field, field_type); @@ -106,25 +104,25 @@ impl<'interner> TypeChecker<'interner> { self.errors.push(TypeCheckError::TypeMismatchWithSource { expected, actual: other, - span: *span, + span: location.span, source: Source::Assignment, }); } }, - HirPattern::Struct(struct_type, fields, span) => { + HirPattern::Struct(struct_type, fields, location) => { self.unify(struct_type, &typ, || TypeCheckError::TypeMismatchWithSource { expected: struct_type.clone(), actual: typ.clone(), - span: *span, + span: location.span, source: Source::Assignment, }); - if let Type::Struct(struct_type, generics) = struct_type { + if let Type::Struct(struct_type, generics) = struct_type.follow_bindings() { let struct_type = struct_type.borrow(); for (field_name, field_pattern) in fields { if let Some((type_field, _)) = - struct_type.get_field(&field_name.0.contents, generics) + struct_type.get_field(&field_name.0.contents, &generics) { self.bind_pattern(field_pattern, type_field); } @@ -191,7 +189,7 @@ impl<'interner> TypeChecker<'interner> { mutable = definition.mutable; } - let typ = self.interner.id_type(ident.id).instantiate(self.interner).0; + let typ = self.interner.definition_type(ident.id).instantiate(self.interner).0; typ.follow_bindings() }; @@ -234,7 +232,7 @@ impl<'interner> TypeChecker<'interner> { let expr_span = self.interner.expr_span(index); index_type.unify( - &Type::polymorphic_integer(self.interner), + &Type::polymorphic_integer_or_field(self.interner), &mut self.errors, || TypeCheckError::TypeMismatch { expected_typ: "an integer".to_owned(), @@ -303,6 +301,9 @@ impl<'interner> TypeChecker<'interner> { let expr_type = self.check_expression(&stmt.0); let expr_span = self.interner.expr_span(&stmt.0); + // Must type check the assertion message expression so that we instantiate bindings + stmt.2.map(|assert_msg_expr| self.check_expression(&assert_msg_expr)); + self.unify(&expr_type, &Type::Bool, || TypeCheckError::TypeMismatch { expr_typ: expr_type.to_string(), expected_typ: Type::Bool.to_string(), @@ -347,6 +348,7 @@ impl<'interner> TypeChecker<'interner> { HirExpression::Literal(HirLiteral::Integer(value, false)) => { let v = value.to_u128(); if let Type::Integer(_, bit_count) = annotated_type { + let bit_count: u32 = (*bit_count).into(); let max = 1 << bit_count; if v >= max { self.errors.push(TypeCheckError::OverflowingAssignment { @@ -358,9 +360,15 @@ impl<'interner> TypeChecker<'interner> { }; }; } - HirExpression::Prefix(_) => self - .errors - .push(TypeCheckError::InvalidUnaryOp { kind: annotated_type.to_string(), span }), + HirExpression::Prefix(expr) => { + self.lint_overflowing_uint(&expr.rhs, annotated_type); + if matches!(expr.operator, UnaryOp::Minus) { + self.errors.push(TypeCheckError::InvalidUnaryOp { + kind: "annotated_type".to_string(), + span, + }); + } + } HirExpression::Infix(expr) => { self.lint_overflowing_uint(&expr.lhs, annotated_type); self.lint_overflowing_uint(&expr.rhs, annotated_type); diff --git a/compiler/noirc_frontend/src/hir_def/expr.rs b/compiler/noirc_frontend/src/hir_def/expr.rs index 75ed68af0f6..b4c590de491 100644 --- a/compiler/noirc_frontend/src/hir_def/expr.rs +++ b/compiler/noirc_frontend/src/hir_def/expr.rs @@ -94,19 +94,6 @@ impl HirBinaryOp { let location = Location::new(op.span(), file); HirBinaryOp { location, kind } } - - pub fn is_bitwise(&self) -> bool { - use BinaryOpKind::*; - matches!(self.kind, And | Or | Xor | ShiftRight | ShiftLeft) - } - - pub fn is_bit_shift(&self) -> bool { - self.kind.is_bit_shift() - } - - pub fn is_modulo(&self) -> bool { - self.kind.is_modulo() - } } #[derive(Debug, Clone)] diff --git a/compiler/noirc_frontend/src/hir_def/function.rs b/compiler/noirc_frontend/src/hir_def/function.rs index 9fff301f5f7..d3ab2a9393b 100644 --- a/compiler/noirc_frontend/src/hir_def/function.rs +++ b/compiler/noirc_frontend/src/hir_def/function.rs @@ -43,12 +43,7 @@ pub struct Parameters(pub Vec); impl Parameters { pub fn span(&self) -> Span { assert!(!self.is_empty()); - let mut spans = vecmap(&self.0, |param| match ¶m.0 { - HirPattern::Identifier(ident) => ident.location.span, - HirPattern::Mutable(_, span) => *span, - HirPattern::Tuple(_, span) => *span, - HirPattern::Struct(_, _, span) => *span, - }); + let mut spans = vecmap(&self.0, |param| param.0.span()); let merged_span = spans.pop().unwrap(); for span in spans { @@ -127,7 +122,7 @@ impl FuncMeta { pub fn can_ignore_return_type(&self) -> bool { match self.kind { FunctionKind::LowLevel | FunctionKind::Builtin | FunctionKind::Oracle => true, - FunctionKind::Normal => false, + FunctionKind::Normal | FunctionKind::Recursive => false, } } diff --git a/compiler/noirc_frontend/src/hir_def/stmt.rs b/compiler/noirc_frontend/src/hir_def/stmt.rs index 34c9302c251..b910be1fdda 100644 --- a/compiler/noirc_frontend/src/hir_def/stmt.rs +++ b/compiler/noirc_frontend/src/hir_def/stmt.rs @@ -2,7 +2,7 @@ use super::expr::HirIdent; use crate::node_interner::ExprId; use crate::{Ident, Type}; use fm::FileId; -use noirc_errors::Span; +use noirc_errors::{Location, Span}; /// A HirStatement is the result of performing name resolution on /// the Statement AST node. Unlike the AST node, any nested nodes @@ -55,14 +55,14 @@ pub struct HirAssignStatement { /// originates from. This is used later in the SSA pass to issue /// an error if a constrain is found to be always false. #[derive(Debug, Clone)] -pub struct HirConstrainStatement(pub ExprId, pub FileId, pub Option); +pub struct HirConstrainStatement(pub ExprId, pub FileId, pub Option); #[derive(Debug, Clone, Hash)] pub enum HirPattern { Identifier(HirIdent), - Mutable(Box, Span), - Tuple(Vec, Span), - Struct(Type, Vec<(Ident, HirPattern)>, Span), + Mutable(Box, Location), + Tuple(Vec, Location), + Struct(Type, Vec<(Ident, HirPattern)>, Location), } impl HirPattern { @@ -92,9 +92,9 @@ impl HirPattern { pub fn span(&self) -> Span { match self { HirPattern::Identifier(ident) => ident.location.span, - HirPattern::Mutable(_, span) - | HirPattern::Tuple(_, span) - | HirPattern::Struct(_, _, span) => *span, + HirPattern::Mutable(_, location) + | HirPattern::Tuple(_, location) + | HirPattern::Struct(_, _, location) => location.span, } } } diff --git a/compiler/noirc_frontend/src/hir_def/types.rs b/compiler/noirc_frontend/src/hir_def/types.rs index 0ba4cb2da65..b70aa43701c 100644 --- a/compiler/noirc_frontend/src/hir_def/types.rs +++ b/compiler/noirc_frontend/src/hir_def/types.rs @@ -8,6 +8,7 @@ use std::{ use crate::{ hir::type_check::TypeCheckError, node_interner::{ExprId, NodeInterner, TraitId, TypeAliasId}, + IntegerBitSize, }; use iter_extended::vecmap; use noirc_errors::{Location, Span}; @@ -27,8 +28,8 @@ pub enum Type { Array(Box, Box), /// A primitive integer type with the given sign and bit count. - /// E.g. `u32` would be `Integer(Unsigned, 32)` - Integer(Signedness, u32), + /// E.g. `u32` would be `Integer(Unsigned, ThirtyTwo)` + Integer(Signedness, IntegerBitSize), /// The primitive `bool` type. Bool, @@ -44,13 +45,18 @@ pub enum Type { /// The unit type `()`. Unit, + /// A tuple type with the given list of fields in the order they appear in source code. + Tuple(Vec), + /// A user-defined struct type. The `Shared` field here refers to /// the shared definition for each instance of this struct type. The `Vec` /// represents the generic arguments (if any) to this struct type. Struct(Shared, Vec), - /// A tuple type with the given list of fields in the order they appear in source code. - Tuple(Vec), + /// A user-defined alias to another type. Similar to a Struct, this carries a shared + /// reference to the definition of the alias along with any generics that may have + /// been applied to the alias. + Alias(Shared, Vec), /// TypeVariables are stand-in variables for some type which is not yet known. /// They are not to be confused with NamedGenerics. While the later mostly works @@ -116,11 +122,16 @@ impl Type { let typ = typ.as_ref(); (length as u32) * typ.field_count() } - Type::Struct(ref def, args) => { + Type::Struct(def, args) => { let struct_type = def.borrow(); let fields = struct_type.get_fields(args); fields.iter().fold(0, |acc, (_, field_type)| acc + field_type.field_count()) } + Type::Alias(def, _) => { + // It is safe to access `typ` without instantiating generics here since generics + // cannot change the number of fields in `typ`. + def.borrow().typ.field_count() + } Type::Tuple(fields) => { fields.iter().fold(0, |acc, field_typ| acc + field_typ.field_count()) } @@ -157,7 +168,7 @@ impl Type { } } - fn contains_slice(&self) -> bool { + pub(crate) fn contains_slice(&self) -> bool { match self { Type::Array(size, _) => matches!(size.as_ref(), Type::NotConstant), Type::Struct(struct_typ, generics) => { @@ -309,7 +320,7 @@ impl std::fmt::Display for StructType { /// Wrap around an unsolved type #[derive(Debug, Clone, Eq)] -pub struct TypeAliasType { +pub struct TypeAlias { pub name: Ident, pub id: TypeAliasId, pub typ: Type, @@ -317,40 +328,33 @@ pub struct TypeAliasType { pub location: Location, } -impl std::hash::Hash for TypeAliasType { +impl std::hash::Hash for TypeAlias { fn hash(&self, state: &mut H) { self.id.hash(state); } } -impl PartialEq for TypeAliasType { +impl PartialEq for TypeAlias { fn eq(&self, other: &Self) -> bool { self.id == other.id } } -impl std::fmt::Display for TypeAliasType { +impl std::fmt::Display for TypeAlias { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", self.name)?; - - if !self.generics.is_empty() { - let generics = vecmap(&self.generics, |binding| binding.borrow().to_string()); - write!(f, "{}", generics.join(", "))?; - } - - Ok(()) + write!(f, "{}", self.name) } } -impl TypeAliasType { +impl TypeAlias { pub fn new( id: TypeAliasId, name: Ident, location: Location, typ: Type, generics: Generics, - ) -> TypeAliasType { - TypeAliasType { id, typ, name, location, generics } + ) -> TypeAlias { + TypeAlias { id, typ, name, location, generics } } pub fn set_type_and_generics(&mut self, new_typ: Type, new_generics: Generics) { @@ -371,6 +375,14 @@ impl TypeAliasType { self.typ.substitute(&substitutions) } + + /// True if the given index is the same index as a generic type of this alias + /// which is expected to be a numeric generic. + /// This is needed because we infer type kinds in Noir and don't have extensive kind checking. + pub fn generic_is_numeric(&self, index_of_generic: usize) -> bool { + let target_id = self.generics[index_of_generic].0; + self.typ.contains_numeric_typevar(target_id) + } } /// A shared, mutable reference to some T. @@ -441,6 +453,10 @@ pub enum TypeVariableKind { /// type annotations on each integer literal. IntegerOrField, + /// A generic integer type. This is a more specific kind of TypeVariable + /// that can only be bound to Type::Integer, or other polymorphic integers. + Integer, + /// A potentially constant array size. This will only bind to itself, Type::NotConstant, or /// Type::Constant(n) with a matching size. This defaults to Type::Constant(n) if still unbound /// during monomorphization. @@ -538,7 +554,7 @@ impl Type { } pub fn default_range_loop_type() -> Type { - Type::Integer(Signedness::Unsigned, 64) + Type::Integer(Signedness::Unsigned, IntegerBitSize::SixtyFour) } pub fn type_variable(id: TypeVariableId) -> Type { @@ -555,13 +571,20 @@ impl Type { Type::TypeVariable(var, kind) } - pub fn polymorphic_integer(interner: &mut NodeInterner) -> Type { + pub fn polymorphic_integer_or_field(interner: &mut NodeInterner) -> Type { let id = interner.next_type_variable_id(); let kind = TypeVariableKind::IntegerOrField; let var = TypeVariable::unbound(id); Type::TypeVariable(var, kind) } + pub fn polymorphic_integer(interner: &mut NodeInterner) -> Type { + let id = interner.next_type_variable_id(); + let kind = TypeVariableKind::Integer; + let var = TypeVariable::unbound(id); + Type::TypeVariable(var, kind) + } + /// A bit of an awkward name for this function - this function returns /// true for type variables or polymorphic integers which are unbound. /// NamedGenerics will always be false as although they are bindable, @@ -637,6 +660,13 @@ impl Type { } }) } + Type::Alias(alias, generics) => generics.iter().enumerate().any(|(i, generic)| { + if named_generic_id_matches_target(generic) { + alias.borrow().generic_is_numeric(i) + } else { + generic.contains_numeric_typevar(target_id) + } + }), Type::MutableReference(element) => element.contains_numeric_typevar(target_id), Type::String(length) => named_generic_id_matches_target(length), Type::FmtString(length, elements) => { @@ -673,6 +703,11 @@ impl Type { | Type::TraitAsType(..) | Type::NotConstant => false, + // This function is called during name resolution before we've verified aliases + // are not cyclic. As a result, it wouldn't be safe to check this alias' definition + // to see if the aliased type is valid. + Type::Alias(..) => false, + Type::Array(length, element) => { length.is_valid_for_program_input() && element.is_valid_for_program_input() } @@ -746,6 +781,13 @@ impl std::fmt::Display for Type { Signedness::Unsigned => write!(f, "u{num_bits}"), }, Type::TypeVariable(var, TypeVariableKind::Normal) => write!(f, "{}", var.borrow()), + Type::TypeVariable(binding, TypeVariableKind::Integer) => { + if let TypeBinding::Unbound(_) = &*binding.borrow() { + write!(f, "{}", TypeVariableKind::Integer.default_type()) + } else { + write!(f, "{}", binding.borrow()) + } + } Type::TypeVariable(binding, TypeVariableKind::IntegerOrField) => { if let TypeBinding::Unbound(_) = &*binding.borrow() { // Show a Field by default if this TypeVariableKind::IntegerOrField is unbound, since that is @@ -772,6 +814,14 @@ impl std::fmt::Display for Type { write!(f, "{}<{}>", s.borrow(), args.join(", ")) } } + Type::Alias(alias, args) => { + let args = vecmap(args, |arg| arg.to_string()); + if args.is_empty() { + write!(f, "{}", alias.borrow()) + } else { + write!(f, "{}<{}>", alias.borrow(), args.join(", ")) + } + } Type::TraitAsType(_id, name, generics) => { write!(f, "impl {}", name)?; if !generics.is_empty() { @@ -863,7 +913,7 @@ impl Type { TypeBinding::Unbound(id) => *id, }; - let this = self.substitute(bindings); + let this = self.substitute(bindings).follow_bindings(); match &this { Type::Constant(length) if *length == target_length => { @@ -910,6 +960,7 @@ impl Type { Ok(()) } TypeVariableKind::IntegerOrField => Err(UnificationError), + TypeVariableKind::Integer => Err(UnificationError), }, } } @@ -920,27 +971,54 @@ impl Type { /// Try to bind a PolymorphicInt variable to self, succeeding if self is an integer, field, /// other PolymorphicInt type, or type variable. If successful, the binding is placed in the /// given TypeBindings map rather than linked immediately. - pub fn try_bind_to_polymorphic_int( + fn try_bind_to_polymorphic_int( &self, var: &TypeVariable, bindings: &mut TypeBindings, + only_integer: bool, ) -> Result<(), UnificationError> { let target_id = match &*var.borrow() { TypeBinding::Bound(_) => unreachable!(), TypeBinding::Unbound(id) => *id, }; - let this = self.substitute(bindings); - + let this = self.substitute(bindings).follow_bindings(); match &this { - Type::FieldElement | Type::Integer(..) => { + Type::Integer(..) => { + bindings.insert(target_id, (var.clone(), this)); + Ok(()) + } + Type::FieldElement if !only_integer => { bindings.insert(target_id, (var.clone(), this)); Ok(()) } Type::TypeVariable(self_var, TypeVariableKind::IntegerOrField) => { let borrow = self_var.borrow(); match &*borrow { - TypeBinding::Bound(typ) => typ.try_bind_to_polymorphic_int(var, bindings), + TypeBinding::Bound(typ) => { + typ.try_bind_to_polymorphic_int(var, bindings, only_integer) + } + // Avoid infinitely recursive bindings + TypeBinding::Unbound(id) if *id == target_id => Ok(()), + TypeBinding::Unbound(new_target_id) => { + if only_integer { + // Integer is more specific than IntegerOrField so we bind the type + // variable to Integer instead. + let clone = Type::TypeVariable(var.clone(), TypeVariableKind::Integer); + bindings.insert(*new_target_id, (self_var.clone(), clone)); + } else { + bindings.insert(target_id, (var.clone(), this.clone())); + } + Ok(()) + } + } + } + Type::TypeVariable(self_var, TypeVariableKind::Integer) => { + let borrow = self_var.borrow(); + match &*borrow { + TypeBinding::Bound(typ) => { + typ.try_bind_to_polymorphic_int(var, bindings, only_integer) + } // Avoid infinitely recursive bindings TypeBinding::Unbound(id) if *id == target_id => Ok(()), TypeBinding::Unbound(_) => { @@ -949,18 +1027,23 @@ impl Type { } } } - Type::TypeVariable(binding, TypeVariableKind::Normal) => { - let borrow = binding.borrow(); + Type::TypeVariable(self_var, TypeVariableKind::Normal) => { + let borrow = self_var.borrow(); match &*borrow { - TypeBinding::Bound(typ) => typ.try_bind_to_polymorphic_int(var, bindings), + TypeBinding::Bound(typ) => { + typ.try_bind_to_polymorphic_int(var, bindings, only_integer) + } // Avoid infinitely recursive bindings TypeBinding::Unbound(id) if *id == target_id => Ok(()), TypeBinding::Unbound(new_target_id) => { - // IntegerOrField is more specific than TypeVariable so we bind the type - // variable to IntegerOrField instead. - let clone = - Type::TypeVariable(var.clone(), TypeVariableKind::IntegerOrField); - bindings.insert(*new_target_id, (binding.clone(), clone)); + // Bind to the most specific type variable kind + let clone_kind = if only_integer { + TypeVariableKind::Integer + } else { + TypeVariableKind::IntegerOrField + }; + let clone = Type::TypeVariable(var.clone(), clone_kind); + bindings.insert(*new_target_id, (self_var.clone(), clone)); Ok(()) } } @@ -1047,10 +1130,24 @@ impl Type { match (self, other) { (Error, _) | (_, Error) => Ok(()), + (Alias(alias, args), other) | (other, Alias(alias, args)) => { + let alias = alias.borrow().get_type(args); + alias.try_unify(other, bindings) + } + (TypeVariable(var, Kind::IntegerOrField), other) | (other, TypeVariable(var, Kind::IntegerOrField)) => { other.try_unify_to_type_variable(var, bindings, |bindings| { - other.try_bind_to_polymorphic_int(var, bindings) + let only_integer = false; + other.try_bind_to_polymorphic_int(var, bindings, only_integer) + }) + } + + (TypeVariable(var, Kind::Integer), other) + | (other, TypeVariable(var, Kind::Integer)) => { + other.try_unify_to_type_variable(var, bindings, |bindings| { + let only_integer = true; + other.try_bind_to_polymorphic_int(var, bindings, only_integer) }) } @@ -1408,7 +1505,7 @@ impl Type { Type::NamedGeneric(binding, _) | Type::TypeVariable(binding, _) => { substitute_binding(binding) } - // Do not substitute_helper fields, it ca, substitute_bound_typevarsn lead to infinite recursion + // Do not substitute_helper fields, it can lead to infinite recursion // and we should not match fields when type checking anyway. Type::Struct(fields, args) => { let args = vecmap(args, |arg| { @@ -1416,6 +1513,12 @@ impl Type { }); Type::Struct(fields.clone(), args) } + Type::Alias(alias, args) => { + let args = vecmap(args, |arg| { + arg.substitute_helper(type_bindings, substitute_bound_typevars) + }); + Type::Alias(alias.clone(), args) + } Type::Tuple(fields) => { let fields = vecmap(fields, |field| { field.substitute_helper(type_bindings, substitute_bound_typevars) @@ -1455,7 +1558,7 @@ impl Type { } /// True if the given TypeVariableId is free anywhere within self - fn occurs(&self, target_id: TypeVariableId) -> bool { + pub fn occurs(&self, target_id: TypeVariableId) -> bool { match self { Type::Array(len, elem) => len.occurs(target_id) || elem.occurs(target_id), Type::String(len) => len.occurs(target_id), @@ -1464,7 +1567,9 @@ impl Type { let field_occurs = fields.occurs(target_id); len_occurs || field_occurs } - Type::Struct(_, generic_args) => generic_args.iter().any(|arg| arg.occurs(target_id)), + Type::Struct(_, generic_args) | Type::Alias(_, generic_args) => { + generic_args.iter().any(|arg| arg.occurs(target_id)) + } Type::Tuple(fields) => fields.iter().any(|field| field.occurs(target_id)), Type::NamedGeneric(binding, _) | Type::TypeVariable(binding, _) => { match &*binding.borrow() { @@ -1515,6 +1620,11 @@ impl Type { let args = vecmap(args, |arg| arg.follow_bindings()); Struct(def.clone(), args) } + Alias(def, args) => { + // We don't need to vecmap(args, follow_bindings) since we're recursively + // calling follow_bindings here already. + def.borrow().get_type(args).follow_bindings() + } Tuple(args) => Tuple(vecmap(args, |arg| arg.follow_bindings())), TypeVariable(var, _) | NamedGeneric(var, _) => { if let TypeBinding::Bound(typ) = &*var.borrow() { @@ -1573,11 +1683,10 @@ fn convert_array_expression_to_slice( interner.push_expr_location(call, location.span, location.file); interner.push_expr_location(func, location.span, location.file); - interner.push_expr_type(&call, target_type.clone()); - interner.push_expr_type( - &func, - Type::Function(vec![array_type], Box::new(target_type), Box::new(Type::Unit)), - ); + interner.push_expr_type(call, target_type.clone()); + + let func_type = Type::Function(vec![array_type], Box::new(target_type), Box::new(Type::Unit)); + interner.push_expr_type(func, func_type); } impl BinaryTypeOperator { @@ -1599,6 +1708,7 @@ impl TypeVariableKind { pub(crate) fn default_type(&self) -> Type { match self { TypeVariableKind::IntegerOrField | TypeVariableKind::Normal => Type::default_int_type(), + TypeVariableKind::Integer => Type::default_range_loop_type(), TypeVariableKind::Constant(length) => Type::Constant(*length), } } @@ -1622,8 +1732,14 @@ impl From<&Type> for PrintableType { PrintableType::Array { length, typ: Box::new(typ.into()) } } Type::Integer(sign, bit_width) => match sign { - Signedness::Unsigned => PrintableType::UnsignedInteger { width: *bit_width }, - Signedness::Signed => PrintableType::SignedInteger { width: *bit_width }, + Signedness::Unsigned => { + PrintableType::UnsignedInteger { width: (*bit_width).into() } + } + Signedness::Signed => PrintableType::SignedInteger { width: (*bit_width).into() }, + }, + Type::TypeVariable(binding, TypeVariableKind::Integer) => match &*binding.borrow() { + TypeBinding::Bound(typ) => typ.into(), + TypeBinding::Unbound(_) => Type::default_range_loop_type().into(), }, Type::TypeVariable(binding, TypeVariableKind::IntegerOrField) => { match &*binding.borrow() { @@ -1646,6 +1762,7 @@ impl From<&Type> for PrintableType { let fields = vecmap(fields, |(name, typ)| (name, typ.into())); PrintableType::Struct { fields, name: struct_type.name.to_string() } } + Type::Alias(alias, args) => alias.borrow().get_type(args).into(), Type::TraitAsType(_, _, _) => unreachable!(), Type::Tuple(types) => PrintableType::Tuple { types: vecmap(types, |typ| typ.into()) }, Type::TypeVariable(_, _) => unreachable!(), @@ -1683,15 +1800,26 @@ impl std::fmt::Debug for Type { Type::TypeVariable(binding, TypeVariableKind::IntegerOrField) => { write!(f, "IntOrField{:?}", binding) } + Type::TypeVariable(binding, TypeVariableKind::Integer) => { + write!(f, "Int{:?}", binding) + } Type::TypeVariable(binding, TypeVariableKind::Constant(n)) => { write!(f, "{}{:?}", n, binding) } Type::Struct(s, args) => { let args = vecmap(args, |arg| format!("{:?}", arg)); if args.is_empty() { - write!(f, "{:?}", s.borrow()) + write!(f, "{}", s.borrow()) + } else { + write!(f, "{}<{}>", s.borrow(), args.join(", ")) + } + } + Type::Alias(alias, args) => { + let args = vecmap(args, |arg| format!("{:?}", arg)); + if args.is_empty() { + write!(f, "{}", alias.borrow()) } else { - write!(f, "{:?}<{}>", s.borrow(), args.join(", ")) + write!(f, "{}<{}>", alias.borrow(), args.join(", ")) } } Type::TraitAsType(_id, name, generics) => { diff --git a/compiler/noirc_frontend/src/lexer/errors.rs b/compiler/noirc_frontend/src/lexer/errors.rs index a2a4056f1d0..35a07c11e0a 100644 --- a/compiler/noirc_frontend/src/lexer/errors.rs +++ b/compiler/noirc_frontend/src/lexer/errors.rs @@ -17,8 +17,6 @@ pub enum LexerErrorKind { InvalidIntegerLiteral { span: Span, found: String }, #[error("{:?} is not a valid attribute", found)] MalformedFuncAttribute { span: Span, found: String }, - #[error("Integer type is larger than the maximum supported size of u127")] - TooManyBits { span: Span, max: u32, got: u32 }, #[error("Logical and used instead of bitwise and")] LogicalAnd { span: Span }, #[error("Unterminated block comment")] @@ -45,7 +43,6 @@ impl LexerErrorKind { LexerErrorKind::NotADoubleChar { span, .. } => *span, LexerErrorKind::InvalidIntegerLiteral { span, .. } => *span, LexerErrorKind::MalformedFuncAttribute { span, .. } => *span, - LexerErrorKind::TooManyBits { span, .. } => *span, LexerErrorKind::LogicalAnd { span } => *span, LexerErrorKind::UnterminatedBlockComment { span } => *span, LexerErrorKind::UnterminatedStringLiteral { span } => *span, @@ -85,13 +82,6 @@ impl LexerErrorKind { format!(" {found} is not a valid attribute"), *span, ), - LexerErrorKind::TooManyBits { span, max, got } => ( - "Integer literal too large".to_string(), - format!( - "The maximum number of bits needed to represent a field is {max}, This integer type needs {got} bits" - ), - *span, - ), LexerErrorKind::LogicalAnd { span } => ( "Noir has no logical-and (&&) operator since short-circuiting is much less efficient when compiling to circuits".to_string(), "Try `&` instead, or use `if` only if you require short-circuiting".to_string(), diff --git a/compiler/noirc_frontend/src/lexer/lexer.rs b/compiler/noirc_frontend/src/lexer/lexer.rs index fd8168e36c6..cf66ece0c30 100644 --- a/compiler/noirc_frontend/src/lexer/lexer.rs +++ b/compiler/noirc_frontend/src/lexer/lexer.rs @@ -307,7 +307,7 @@ impl<'a> Lexer<'a> { // Check if word an int type // if no error occurred, then it is either a valid integer type or it is not an int type - let parsed_token = IntType::lookup_int_type(&word, Span::inclusive(start, end))?; + let parsed_token = IntType::lookup_int_type(&word)?; // Check if it is an int type if let Some(int_type_token) = parsed_token { diff --git a/compiler/noirc_frontend/src/lexer/token.rs b/compiler/noirc_frontend/src/lexer/token.rs index ab131ccd880..5674ae5a39a 100644 --- a/compiler/noirc_frontend/src/lexer/token.rs +++ b/compiler/noirc_frontend/src/lexer/token.rs @@ -306,7 +306,7 @@ impl IntType { // XXX: Result // Is not the best API. We could split this into two functions. One that checks if the the // word is a integer, which only returns an Option - pub(crate) fn lookup_int_type(word: &str, span: Span) -> Result, LexerErrorKind> { + pub(crate) fn lookup_int_type(word: &str) -> Result, LexerErrorKind> { // Check if the first string is a 'u' or 'i' let is_signed = if word.starts_with('i') { @@ -324,12 +324,6 @@ impl IntType { Err(_) => return Ok(None), }; - let max_bits = FieldElement::max_num_bits() / 2; - - if str_as_u32 > max_bits { - return Err(LexerErrorKind::TooManyBits { span, max: max_bits, got: str_as_u32 }); - } - if is_signed { Ok(Some(Token::IntType(IntType::Signed(str_as_u32)))) } else { @@ -491,6 +485,7 @@ impl Attribute { Attribute::Function(FunctionAttribute::Oracle(name.to_string())) } ["test"] => Attribute::Function(FunctionAttribute::Test(TestScope::None)), + ["recursive"] => Attribute::Function(FunctionAttribute::Recursive), ["test", name] => { validate(name)?; let malformed_scope = @@ -541,6 +536,7 @@ pub enum FunctionAttribute { Builtin(String), Oracle(String), Test(TestScope), + Recursive, } impl FunctionAttribute { @@ -562,6 +558,10 @@ impl FunctionAttribute { matches!(self, FunctionAttribute::Foreign(_)) } + pub fn is_oracle(&self) -> bool { + matches!(self, FunctionAttribute::Oracle(_)) + } + pub fn is_low_level(&self) -> bool { matches!(self, FunctionAttribute::Foreign(_) | FunctionAttribute::Builtin(_)) } @@ -574,6 +574,7 @@ impl fmt::Display for FunctionAttribute { FunctionAttribute::Foreign(ref k) => write!(f, "#[foreign({k})]"), FunctionAttribute::Builtin(ref k) => write!(f, "#[builtin({k})]"), FunctionAttribute::Oracle(ref k) => write!(f, "#[oracle({k})]"), + FunctionAttribute::Recursive => write!(f, "#[recursive]"), } } } @@ -617,6 +618,7 @@ impl AsRef for FunctionAttribute { FunctionAttribute::Builtin(string) => string, FunctionAttribute::Oracle(string) => string, FunctionAttribute::Test { .. } => "", + FunctionAttribute::Recursive => "", } } } @@ -671,6 +673,7 @@ pub enum Keyword { Struct, Trait, Type, + Unchecked, Unconstrained, Use, Where, @@ -713,6 +716,7 @@ impl fmt::Display for Keyword { Keyword::Struct => write!(f, "struct"), Keyword::Trait => write!(f, "trait"), Keyword::Type => write!(f, "type"), + Keyword::Unchecked => write!(f, "unchecked"), Keyword::Unconstrained => write!(f, "unconstrained"), Keyword::Use => write!(f, "use"), Keyword::Where => write!(f, "where"), @@ -758,6 +762,7 @@ impl Keyword { "struct" => Keyword::Struct, "trait" => Keyword::Trait, "type" => Keyword::Type, + "unchecked" => Keyword::Unchecked, "unconstrained" => Keyword::Unconstrained, "use" => Keyword::Use, "where" => Keyword::Where, @@ -772,6 +777,27 @@ impl Keyword { } } +pub struct Tokens(pub Vec); + +type TokenMapIter = Map, fn(SpannedToken) -> (Token, Span)>; + +impl<'a> From for chumsky::Stream<'a, Token, Span, TokenMapIter> { + fn from(tokens: Tokens) -> Self { + let end_of_input = match tokens.0.last() { + Some(spanned_token) => spanned_token.to_span(), + None => Span::single_char(0), + }; + + fn get_span(token: SpannedToken) -> (Token, Span) { + let span = token.to_span(); + (token.into_token(), span) + } + + let iter = tokens.0.into_iter().map(get_span as fn(_) -> _); + chumsky::Stream::from_iter(end_of_input, iter) + } +} + #[cfg(test)] mod keywords { use strum::IntoEnumIterator; @@ -794,24 +820,3 @@ mod keywords { } } } - -pub struct Tokens(pub Vec); - -type TokenMapIter = Map, fn(SpannedToken) -> (Token, Span)>; - -impl<'a> From for chumsky::Stream<'a, Token, Span, TokenMapIter> { - fn from(tokens: Tokens) -> Self { - let end_of_input = match tokens.0.last() { - Some(spanned_token) => spanned_token.to_span(), - None => Span::single_char(0), - }; - - fn get_span(token: SpannedToken) -> (Token, Span) { - let span = token.to_span(); - (token.into_token(), span) - } - - let iter = tokens.0.into_iter().map(get_span as fn(_) -> _); - chumsky::Stream::from_iter(end_of_input, iter) - } -} diff --git a/compiler/noirc_frontend/src/lib.rs b/compiler/noirc_frontend/src/lib.rs index 9582b80dcba..be007929fc4 100644 --- a/compiler/noirc_frontend/src/lib.rs +++ b/compiler/noirc_frontend/src/lib.rs @@ -11,6 +11,7 @@ #![warn(clippy::semicolon_if_nothing_returned)] pub mod ast; +pub mod debug; pub mod graph; pub mod lexer; pub mod monomorphization; @@ -44,11 +45,12 @@ pub mod macros_api { pub use noirc_errors::Span; pub use crate::graph::CrateId; + use crate::hir::def_collector::dc_crate::{UnresolvedFunctions, UnresolvedTraitImpl}; pub use crate::hir::def_collector::errors::MacroError; pub use crate::hir_def::expr::{HirExpression, HirLiteral}; pub use crate::hir_def::stmt::HirStatement; pub use crate::node_interner::{NodeInterner, StructId}; - pub use crate::parser::SortedModule; + pub use crate::parser::{parse_program, SortedModule}; pub use crate::token::SecondaryAttribute; pub use crate::hir::def_map::ModuleDefId; @@ -73,8 +75,22 @@ pub mod macros_api { crate_id: &CrateId, context: &HirContext, ) -> Result; + + // TODO(#4653): generalize this function + fn process_unresolved_traits_impls( + &self, + _crate_id: &CrateId, + _context: &mut HirContext, + _unresolved_traits_impls: &[UnresolvedTraitImpl], + _collected_functions: &mut Vec, + ) -> Result<(), (MacroError, FileId)>; + /// Function to manipulate the AST after type checking has been completed. /// The AST after type checking has been done is called the HIR. - fn process_typed_ast(&self, crate_id: &CrateId, context: &mut HirContext); + fn process_typed_ast( + &self, + crate_id: &CrateId, + context: &mut HirContext, + ) -> Result<(), (MacroError, FileId)>; } } diff --git a/compiler/noirc_frontend/src/monomorphization/ast.rs b/compiler/noirc_frontend/src/monomorphization/ast.rs index 42a618e7d77..e4e619d5d92 100644 --- a/compiler/noirc_frontend/src/monomorphization/ast.rs +++ b/compiler/noirc_frontend/src/monomorphization/ast.rs @@ -1,9 +1,13 @@ use acvm::FieldElement; use iter_extended::vecmap; -use noirc_errors::Location; +use noirc_errors::{ + debug_info::{DebugTypes, DebugVariables}, + Location, +}; use crate::{ - hir_def::function::FunctionSignature, BinaryOpKind, Distinctness, Signedness, Visibility, + hir_def::function::FunctionSignature, BinaryOpKind, Distinctness, IntegerBitSize, Signedness, + Visibility, }; /// The monomorphized AST is expression-based, all statements are also @@ -31,7 +35,7 @@ pub enum Expression { ExtractTupleField(Box, usize), Call(Call), Let(Let), - Constrain(Box, Location, Option), + Constrain(Box, Location, Option>), Assign(Assign), Semi(Box), } @@ -214,8 +218,8 @@ pub struct Function { #[derive(Debug, PartialEq, Eq, Clone, Hash)] pub enum Type { Field, - Array(/*len:*/ u64, Box), // Array(4, Field) = [Field; 4] - Integer(Signedness, /*bits:*/ u32), // u32 = Integer(unsigned, 32) + Array(/*len:*/ u64, Box), // Array(4, Field) = [Field; 4] + Integer(Signedness, /*bits:*/ IntegerBitSize), // u32 = Integer(unsigned, ThirtyTwo) Bool, String(/*len:*/ u64), // String(4) = str[4] FmtString(/*len:*/ u64, Box), @@ -246,15 +250,23 @@ pub struct Program { pub return_distinctness: Distinctness, pub return_location: Option, pub return_visibility: Visibility, + /// Indicates to a backend whether a SNARK-friendly prover should be used. + pub recursive: bool, + pub debug_variables: DebugVariables, + pub debug_types: DebugTypes, } impl Program { + #[allow(clippy::too_many_arguments)] pub fn new( functions: Vec, main_function_signature: FunctionSignature, return_distinctness: Distinctness, return_location: Option, return_visibility: Visibility, + recursive: bool, + debug_variables: DebugVariables, + debug_types: DebugTypes, ) -> Program { Program { functions, @@ -262,6 +274,9 @@ impl Program { return_distinctness, return_location, return_visibility, + recursive, + debug_variables, + debug_types, } } diff --git a/compiler/noirc_frontend/src/monomorphization/debug.rs b/compiler/noirc_frontend/src/monomorphization/debug.rs new file mode 100644 index 00000000000..a8ff4399f99 --- /dev/null +++ b/compiler/noirc_frontend/src/monomorphization/debug.rs @@ -0,0 +1,202 @@ +use iter_extended::vecmap; +use noirc_errors::debug_info::DebugVarId; +use noirc_errors::Location; +use noirc_printable_type::PrintableType; + +use crate::debug::{SourceFieldId, SourceVarId}; +use crate::hir_def::expr::*; +use crate::node_interner::ExprId; + +use super::ast::{Expression, Ident}; +use super::{MonomorphizationError, Monomorphizer}; + +const DEBUG_MEMBER_ASSIGN_PREFIX: &str = "__debug_member_assign_"; +const DEBUG_VAR_ID_ARG_SLOT: usize = 0; +const DEBUG_VALUE_ARG_SLOT: usize = 1; +const DEBUG_MEMBER_FIELD_INDEX_ARG_SLOT: usize = 2; + +impl From for SourceVarId { + fn from(var_id: u128) -> Self { + Self(var_id as u32) + } +} + +impl From for SourceFieldId { + fn from(field_id: u128) -> Self { + Self(field_id as u32) + } +} + +impl<'interner> Monomorphizer<'interner> { + /// Patch instrumentation calls inserted for debugging. This will record + /// tracked variables and their types, and assign them an ID to use at + /// runtime. This ID is different from the source ID assigned at + /// instrumentation time because at that point we haven't fully resolved the + /// types for generic functions. So a single generic function may be + /// instantiated multiple times with its tracked variables being of + /// different types for each instance at runtime. + pub(super) fn patch_debug_instrumentation_call( + &mut self, + call: &HirCallExpression, + arguments: &mut [Expression], + ) -> Result<(), MonomorphizationError> { + let original_func = Box::new(self.expr(call.func)?); + if let Expression::Ident(Ident { name, .. }) = original_func.as_ref() { + if name == "__debug_var_assign" { + self.patch_debug_var_assign(call, arguments)?; + } else if name == "__debug_var_drop" { + self.patch_debug_var_drop(call, arguments)?; + } else if let Some(arity) = name.strip_prefix(DEBUG_MEMBER_ASSIGN_PREFIX) { + let arity = arity.parse::().expect("failed to parse member assign arity"); + self.patch_debug_member_assign(call, arguments, arity)?; + } + } + Ok(()) + } + + /// Update instrumentation code inserted on variable assignment. We need to + /// register the variable instance, its type and replace the source_var_id + /// with the ID of the registration. Multiple registrations of the same + /// variable are possible if using generic functions, hence the temporary ID + /// created when injecting the instrumentation code can map to multiple IDs + /// at runtime. + fn patch_debug_var_assign( + &mut self, + call: &HirCallExpression, + arguments: &mut [Expression], + ) -> Result<(), MonomorphizationError> { + let hir_arguments = vecmap(&call.arguments, |id| self.interner.expression(id)); + let var_id_arg = hir_arguments.get(DEBUG_VAR_ID_ARG_SLOT); + let Some(HirExpression::Literal(HirLiteral::Integer(source_var_id, _))) = var_id_arg else { + unreachable!("Missing source_var_id in __debug_var_assign call"); + }; + + // instantiate tracked variable for the value type and associate it with + // the ID used by the injected instrumentation code + let var_type = self.interner.id_type(call.arguments[DEBUG_VALUE_ARG_SLOT]); + let source_var_id = source_var_id.to_u128().into(); + // then update the ID used for tracking at runtime + let var_id = self.debug_type_tracker.insert_var(source_var_id, var_type); + let interned_var_id = self.intern_var_id(var_id, &call.location); + arguments[DEBUG_VAR_ID_ARG_SLOT] = self.expr(interned_var_id)?; + Ok(()) + } + + /// Update instrumentation code for a variable being dropped out of scope. + /// Given the source_var_id we search for the last assigned debug var_id and + /// replace it instead. + fn patch_debug_var_drop( + &mut self, + call: &HirCallExpression, + arguments: &mut [Expression], + ) -> Result<(), MonomorphizationError> { + let hir_arguments = vecmap(&call.arguments, |id| self.interner.expression(id)); + let var_id_arg = hir_arguments.get(DEBUG_VAR_ID_ARG_SLOT); + let Some(HirExpression::Literal(HirLiteral::Integer(source_var_id, _))) = var_id_arg else { + unreachable!("Missing source_var_id in __debug_var_drop call"); + }; + // update variable ID for tracked drops (ie. when the var goes out of scope) + let source_var_id = source_var_id.to_u128().into(); + let var_id = self + .debug_type_tracker + .get_var_id(source_var_id) + .unwrap_or_else(|| unreachable!("failed to find debug variable")); + let interned_var_id = self.intern_var_id(var_id, &call.location); + arguments[DEBUG_VAR_ID_ARG_SLOT] = self.expr(interned_var_id)?; + Ok(()) + } + + /// Update instrumentation code inserted when assigning to a member of an + /// existing variable. Same as above for replacing the source_var_id, but also + /// we need to resolve the path and the type of the member being assigned. + /// For this last part, we need to resolve the mapping from field names in + /// structs to positions in the runtime tuple, since all structs are + /// replaced by tuples during compilation. + fn patch_debug_member_assign( + &mut self, + call: &HirCallExpression, + arguments: &mut [Expression], + arity: usize, + ) -> Result<(), MonomorphizationError> { + let hir_arguments = vecmap(&call.arguments, |id| self.interner.expression(id)); + let var_id_arg = hir_arguments.get(DEBUG_VAR_ID_ARG_SLOT); + let Some(HirExpression::Literal(HirLiteral::Integer(source_var_id, _))) = var_id_arg else { + unreachable!("Missing source_var_id in __debug_member_assign call"); + }; + // update variable member assignments + let source_var_id = source_var_id.to_u128().into(); + + let var_type = self + .debug_type_tracker + .get_type(source_var_id) + .unwrap_or_else(|| panic!("type not found for {source_var_id:?}")) + .clone(); + let mut cursor_type = &var_type; + for i in 0..arity { + if let Some(HirExpression::Literal(HirLiteral::Integer(fe_i, i_neg))) = + hir_arguments.get(DEBUG_MEMBER_FIELD_INDEX_ARG_SLOT + i) + { + let index = fe_i.to_i128().unsigned_abs(); + if *i_neg { + // We use negative indices at instrumentation time to indicate + // and reference member accesses by name which cannot be + // resolved until we have a type. This strategy is also used + // for tuple member access because syntactically they are + // the same as named field accesses. + let field_index = self + .debug_type_tracker + .resolve_field_index(index.into(), cursor_type) + .unwrap_or_else(|| { + unreachable!("failed to resolve {i}-th member indirection on type {cursor_type:?}") + }); + + cursor_type = element_type_at_index(cursor_type, field_index); + let index_id = self.interner.push_expr(HirExpression::Literal( + HirLiteral::Integer(field_index.into(), false), + )); + self.interner.push_expr_type(index_id, crate::Type::FieldElement); + self.interner.push_expr_location( + index_id, + call.location.span, + call.location.file, + ); + arguments[DEBUG_MEMBER_FIELD_INDEX_ARG_SLOT + i] = self.expr(index_id)?; + } else { + // array/string element using constant index + cursor_type = element_type_at_index(cursor_type, index as usize); + } + } else { + // array element using non-constant index + cursor_type = element_type_at_index(cursor_type, 0); + } + } + + let var_id = self + .debug_type_tracker + .get_var_id(source_var_id) + .unwrap_or_else(|| unreachable!("failed to find debug variable")); + let interned_var_id = self.intern_var_id(var_id, &call.location); + arguments[DEBUG_VAR_ID_ARG_SLOT] = self.expr(interned_var_id)?; + Ok(()) + } + + fn intern_var_id(&mut self, var_id: DebugVarId, location: &Location) -> ExprId { + let var_id_literal = HirLiteral::Integer((var_id.0 as u128).into(), false); + let expr_id = self.interner.push_expr(HirExpression::Literal(var_id_literal)); + self.interner.push_expr_type(expr_id, crate::Type::FieldElement); + self.interner.push_expr_location(expr_id, location.span, location.file); + expr_id + } +} + +fn element_type_at_index(ptype: &PrintableType, i: usize) -> &PrintableType { + match ptype { + PrintableType::Array { length: _length, typ } => typ.as_ref(), + PrintableType::Tuple { types } => &types[i], + PrintableType::Struct { name: _name, fields } => &fields[i].1, + PrintableType::String { length: _length } => &PrintableType::UnsignedInteger { width: 8 }, + _ => { + panic!["expected type with sub-fields, found terminal type"] + } + } +} diff --git a/compiler/noirc_frontend/src/monomorphization/debug_types.rs b/compiler/noirc_frontend/src/monomorphization/debug_types.rs new file mode 100644 index 00000000000..fea073d394f --- /dev/null +++ b/compiler/noirc_frontend/src/monomorphization/debug_types.rs @@ -0,0 +1,137 @@ +use crate::{ + debug::{DebugInstrumenter, SourceFieldId, SourceVarId}, + hir_def::types::Type, +}; +use noirc_errors::debug_info::{ + DebugTypeId, DebugTypes, DebugVarId, DebugVariable, DebugVariables, +}; +use noirc_printable_type::PrintableType; +use std::collections::HashMap; + +/// We keep a collection of the debug variables and their types in this +/// structure. The source_var_id refers to the ID given by the debug +/// instrumenter. This variable does not have a type yet and hence it +/// can be instantiated for multiple types if it's in the context of a generic +/// variable. The var_id refers to the ID of the instantiated variable which +/// will have a valid type. +#[derive(Debug, Clone, Default)] +pub struct DebugTypeTracker { + // Variable names collected during instrumentation injection + source_variables: HashMap, + + // Field names used for member access collected during instrumentation injection + source_field_names: HashMap, + + // Current instances of tracked variables from the ID given during + // instrumentation. The tracked var_id will change for each source_var_id + // when compiling generic functions. + source_to_debug_vars: HashMap, + + // All instances of tracked variables + variables: HashMap, + + // Types of tracked variables + types: HashMap, + types_reverse: HashMap, + + next_var_id: u32, + next_type_id: u32, +} + +impl DebugTypeTracker { + pub fn build_from_debug_instrumenter(instrumenter: &DebugInstrumenter) -> Self { + DebugTypeTracker { + source_variables: instrumenter.variables.clone(), + source_field_names: instrumenter.field_names.clone(), + ..DebugTypeTracker::default() + } + } + + pub fn extract_vars_and_types(&self) -> (DebugVariables, DebugTypes) { + let debug_variables = self + .variables + .clone() + .into_iter() + .map(|(var_id, (source_var_id, type_id))| { + ( + var_id, + DebugVariable { + name: self.source_variables.get(&source_var_id).cloned().unwrap_or_else( + || { + unreachable!( + "failed to retrieve variable name for {source_var_id:?}" + ); + }, + ), + debug_type_id: type_id, + }, + ) + }) + .collect(); + let debug_types = self.types.clone().into_iter().collect(); + + (debug_variables, debug_types) + } + + pub fn resolve_field_index( + &self, + field_id: SourceFieldId, + cursor_type: &PrintableType, + ) -> Option { + self.source_field_names + .get(&field_id) + .and_then(|field_name| get_field(cursor_type, field_name)) + } + + pub fn insert_var(&mut self, source_var_id: SourceVarId, var_type: Type) -> DebugVarId { + if !self.source_variables.contains_key(&source_var_id) { + unreachable!("cannot find source debug variable {source_var_id:?}"); + } + + let ptype: PrintableType = var_type.follow_bindings().into(); + let type_id = self.types_reverse.get(&ptype).copied().unwrap_or_else(|| { + let type_id = DebugTypeId(self.next_type_id); + self.next_type_id += 1; + self.types_reverse.insert(ptype.clone(), type_id); + self.types.insert(type_id, ptype); + type_id + }); + // check if we need to instantiate the var with a new type + let existing_var_id = self.source_to_debug_vars.get(&source_var_id).and_then(|var_id| { + let (_, existing_type_id) = self.variables.get(var_id).unwrap(); + (*existing_type_id == type_id).then_some(var_id) + }); + if let Some(var_id) = existing_var_id { + *var_id + } else { + let var_id = DebugVarId(self.next_var_id); + self.next_var_id += 1; + self.variables.insert(var_id, (source_var_id, type_id)); + self.source_to_debug_vars.insert(source_var_id, var_id); + var_id + } + } + + pub fn get_var_id(&self, source_var_id: SourceVarId) -> Option { + self.source_to_debug_vars.get(&source_var_id).copied() + } + + pub fn get_type(&self, source_var_id: SourceVarId) -> Option<&PrintableType> { + self.source_to_debug_vars + .get(&source_var_id) + .and_then(|var_id| self.variables.get(var_id)) + .and_then(|(_, type_id)| self.types.get(type_id)) + } +} + +fn get_field(ptype: &PrintableType, field_name: &str) -> Option { + match ptype { + PrintableType::Struct { fields, .. } => { + fields.iter().position(|(name, _)| name == field_name) + } + PrintableType::Tuple { .. } | PrintableType::Array { .. } => { + field_name.parse::().ok() + } + _ => None, + } +} diff --git a/compiler/noirc_frontend/src/monomorphization/mod.rs b/compiler/noirc_frontend/src/monomorphization/mod.rs index 67b246a02ce..ce880401d77 100644 --- a/compiler/noirc_frontend/src/monomorphization/mod.rs +++ b/compiler/noirc_frontend/src/monomorphization/mod.rs @@ -9,30 +9,35 @@ //! The entry point to this pass is the `monomorphize` function which, starting from a given //! function, will monomorphize the entire reachable program. use acvm::FieldElement; -use iter_extended::{btree_map, vecmap}; -use noirc_errors::Location; +use iter_extended::{btree_map, try_vecmap, vecmap}; +use noirc_errors::{CustomDiagnostic, FileDiagnostic, Location}; use noirc_printable_type::PrintableType; use std::{ collections::{BTreeMap, HashMap, VecDeque}, unreachable, }; +use thiserror::Error; use crate::{ + debug::DebugInstrumenter, hir_def::{ expr::*, - function::{FunctionSignature, Parameters}, + function::{FuncMeta, FunctionSignature, Parameters}, stmt::{HirAssignStatement, HirLValue, HirLetStatement, HirPattern, HirStatement}, types, }, node_interner::{self, DefinitionKind, NodeInterner, StmtId, TraitImplKind, TraitMethodId}, token::FunctionAttribute, - ContractFunctionType, FunctionKind, Type, TypeBinding, TypeBindings, TypeVariable, - TypeVariableKind, UnaryOp, Visibility, + ContractFunctionType, FunctionKind, IntegerBitSize, Signedness, Type, TypeBinding, + TypeBindings, TypeVariable, TypeVariableKind, UnaryOp, Visibility, }; use self::ast::{Definition, FuncId, Function, LocalId, Program}; +use self::debug_types::DebugTypeTracker; pub mod ast; +mod debug; +pub mod debug_types; pub mod printer; struct LambdaContext { @@ -67,7 +72,7 @@ struct Monomorphizer<'interner> { finished_functions: BTreeMap, /// Used to reference existing definitions in the HIR - interner: &'interner NodeInterner, + interner: &'interner mut NodeInterner, lambda_envs_stack: Vec, @@ -77,10 +82,46 @@ struct Monomorphizer<'interner> { is_range_loop: bool, return_location: Option, + + debug_type_tracker: DebugTypeTracker, } type HirType = crate::Type; +#[derive(Debug, Error)] +pub enum MonomorphizationError { + #[error("Length of generic array could not be determined.")] + UnknownArrayLength { location: Location }, +} + +impl MonomorphizationError { + fn call_stack(&self) -> Vec { + match self { + MonomorphizationError::UnknownArrayLength { location } => vec![*location], + } + } +} + +impl From for FileDiagnostic { + fn from(error: MonomorphizationError) -> FileDiagnostic { + let call_stack = error.call_stack(); + let file_id = call_stack.last().map(|location| location.file).unwrap_or_default(); + let diagnostic = error.into_diagnostic(); + diagnostic.in_file(file_id).with_call_stack(call_stack) + } +} + +impl MonomorphizationError { + fn into_diagnostic(self) -> CustomDiagnostic { + CustomDiagnostic::simple_error( + "Internal Consistency Evaluators Errors: \n + This is likely a bug. Consider opening an issue at https://github.com/noir-lang/noir/issues".to_owned(), + self.to_string(), + noirc_errors::Span::inclusive(0, 0) + ) + } +} + /// Starting from the given `main` function, monomorphize the entire program, /// replacing all references to type variables and NamedGenerics with concrete /// types, duplicating definitions as necessary to do so. @@ -93,9 +134,21 @@ type HirType = crate::Type; /// this function. Typically, this is the function named "main" in the source project, /// but it can also be, for example, an arbitrary test function for running `nargo test`. #[tracing::instrument(level = "trace", skip(main, interner))] -pub fn monomorphize(main: node_interner::FuncId, interner: &NodeInterner) -> Program { - let mut monomorphizer = Monomorphizer::new(interner); - let function_sig = monomorphizer.compile_main(main); +pub fn monomorphize( + main: node_interner::FuncId, + interner: &mut NodeInterner, +) -> Result { + monomorphize_debug(main, interner, &DebugInstrumenter::default()) +} + +pub fn monomorphize_debug( + main: node_interner::FuncId, + interner: &mut NodeInterner, + debug_instrumenter: &DebugInstrumenter, +) -> Result { + let debug_type_tracker = DebugTypeTracker::build_from_debug_instrumenter(debug_instrumenter); + let mut monomorphizer = Monomorphizer::new(interner, debug_type_tracker); + let function_sig = monomorphizer.compile_main(main)?; while !monomorphizer.queue.is_empty() { let (next_fn_id, new_id, bindings, trait_method) = monomorphizer.queue.pop_front().unwrap(); @@ -103,25 +156,31 @@ pub fn monomorphize(main: node_interner::FuncId, interner: &NodeInterner) -> Pro perform_instantiation_bindings(&bindings); let impl_bindings = monomorphizer.perform_impl_bindings(trait_method, next_fn_id); - monomorphizer.function(next_fn_id, new_id); + monomorphizer.function(next_fn_id, new_id)?; undo_instantiation_bindings(impl_bindings); undo_instantiation_bindings(bindings); } let functions = vecmap(monomorphizer.finished_functions, |(_, f)| f); - let meta = interner.function_meta(&main); + let FuncMeta { return_distinctness, return_visibility, kind, .. } = + monomorphizer.interner.function_meta(&main); - Program::new( + let (debug_variables, debug_types) = monomorphizer.debug_type_tracker.extract_vars_and_types(); + let program = Program::new( functions, function_sig, - meta.return_distinctness, + *return_distinctness, monomorphizer.return_location, - meta.return_visibility, - ) + *return_visibility, + *kind == FunctionKind::Recursive, + debug_variables, + debug_types, + ); + Ok(program) } impl<'interner> Monomorphizer<'interner> { - fn new(interner: &'interner NodeInterner) -> Self { + fn new(interner: &'interner mut NodeInterner, debug_type_tracker: DebugTypeTracker) -> Self { Monomorphizer { globals: HashMap::new(), locals: HashMap::new(), @@ -133,6 +192,7 @@ impl<'interner> Monomorphizer<'interner> { lambda_envs_stack: Vec::new(), is_range_loop: false, return_location: None, + debug_type_tracker, } } @@ -195,6 +255,9 @@ impl<'interner> Monomorphizer<'interner> { _ => unreachable!("Oracle function must have an oracle attribute"), } } + FunctionKind::Recursive => { + unreachable!("Only main can be specified as recursive, which should already be checked"); + } } } } @@ -209,10 +272,13 @@ impl<'interner> Monomorphizer<'interner> { self.globals.entry(id).or_default().insert(typ, new_id); } - fn compile_main(&mut self, main_id: node_interner::FuncId) -> FunctionSignature { + fn compile_main( + &mut self, + main_id: node_interner::FuncId, + ) -> Result { let new_main_id = self.next_function_id(); assert_eq!(new_main_id, Program::main_id()); - self.function(main_id, new_main_id); + self.function(main_id, new_main_id)?; self.return_location = self.interner.function(&main_id).block(self.interner).statements().last().and_then( |x| match self.interner.statement(x) { @@ -221,16 +287,20 @@ impl<'interner> Monomorphizer<'interner> { }, ); let main_meta = self.interner.function_meta(&main_id); - main_meta.function_signature() + Ok(main_meta.function_signature()) } - fn function(&mut self, f: node_interner::FuncId, id: FuncId) { + fn function( + &mut self, + f: node_interner::FuncId, + id: FuncId, + ) -> Result<(), MonomorphizationError> { if let Some((self_type, trait_id)) = self.interner.get_function_trait(&f) { let the_trait = self.interner.get_trait(trait_id); the_trait.self_type_typevar.force_bind(self_type); } - let meta = self.interner.function_meta(&f); + let meta = self.interner.function_meta(&f).clone(); let modifiers = self.interner.function_modifiers(&f); let name = self.interner.function_name(&f).to_owned(); @@ -240,15 +310,15 @@ impl<'interner> Monomorphizer<'interner> { Type::TraitAsType(..) => &body_return_type, _ => meta.return_type(), }); - - let parameters = self.parameters(&meta.parameters); - - let body = self.expr(body_expr_id); let unconstrained = modifiers.is_unconstrained || matches!(modifiers.contract_function_type, Some(ContractFunctionType::Open)); + let parameters = self.parameters(&meta.parameters); + let body = self.expr(body_expr_id)?; let function = ast::Function { id, name, parameters, body, return_type, unconstrained }; + self.push_function(id, function); + Ok(()) } fn push_function(&mut self, id: FuncId, function: ast::Function) { @@ -308,15 +378,18 @@ impl<'interner> Monomorphizer<'interner> { } } - fn expr(&mut self, expr: node_interner::ExprId) -> ast::Expression { + fn expr( + &mut self, + expr: node_interner::ExprId, + ) -> Result { use ast::Expression::Literal; use ast::Literal::*; - match self.interner.expression(&expr) { - HirExpression::Ident(ident) => self.ident(ident, expr), + let expr = match self.interner.expression(&expr) { + HirExpression::Ident(ident) => self.ident(ident, expr)?, HirExpression::Literal(HirLiteral::Str(contents)) => Literal(Str(contents)), HirExpression::Literal(HirLiteral::FmtStr(contents, idents)) => { - let fields = vecmap(idents, |ident| self.expr(ident)); + let fields = try_vecmap(idents, |ident| self.expr(ident))?; Literal(FmtStr( contents, fields.len() as u64, @@ -331,6 +404,7 @@ impl<'interner> Monomorphizer<'interner> { match typ { ast::Type::Field => Literal(Integer(-value, typ, location)), ast::Type::Integer(_, bit_size) => { + let bit_size: u32 = bit_size.into(); let base = 1_u128 << bit_size; Literal(Integer(FieldElement::from(base) - value, typ, location)) } @@ -343,30 +417,29 @@ impl<'interner> Monomorphizer<'interner> { } } HirExpression::Literal(HirLiteral::Array(array)) => match array { - HirArrayLiteral::Standard(array) => self.standard_array(expr, array), + HirArrayLiteral::Standard(array) => self.standard_array(expr, array)?, HirArrayLiteral::Repeated { repeated_element, length } => { - self.repeated_array(expr, repeated_element, length) + self.repeated_array(expr, repeated_element, length)? } }, HirExpression::Literal(HirLiteral::Unit) => ast::Expression::Block(vec![]), - HirExpression::Block(block) => self.block(block.0), + HirExpression::Block(block) => self.block(block.0)?, HirExpression::Prefix(prefix) => { let location = self.interner.expr_location(&expr); ast::Expression::Unary(ast::Unary { operator: prefix.operator, - rhs: Box::new(self.expr(prefix.rhs)), + rhs: Box::new(self.expr(prefix.rhs)?), result_type: self.convert_type(&self.interner.id_type(expr)), location, }) } HirExpression::Infix(infix) => { - let lhs = self.expr(infix.lhs); - let rhs = self.expr(infix.rhs); + let lhs = self.expr(infix.lhs)?; + let rhs = self.expr(infix.rhs)?; let operator = infix.operator.kind; let location = self.interner.expr_location(&expr); - if self.interner.get_selected_impl_for_expression(expr).is_some() { // If an impl was selected for this infix operator, replace it // with a method call to the appropriate trait impl method. @@ -395,26 +468,27 @@ impl<'interner> Monomorphizer<'interner> { } } - HirExpression::Index(index) => self.index(expr, index), + HirExpression::Index(index) => self.index(expr, index)?, HirExpression::MemberAccess(access) => { let field_index = self.interner.get_field_index(expr); - let expr = Box::new(self.expr(access.lhs)); + let expr = Box::new(self.expr(access.lhs)?); ast::Expression::ExtractTupleField(expr, field_index) } - HirExpression::Call(call) => self.function_call(call, expr), + HirExpression::Call(call) => self.function_call(call, expr)?, HirExpression::Cast(cast) => ast::Expression::Cast(ast::Cast { - lhs: Box::new(self.expr(cast.lhs)), + lhs: Box::new(self.expr(cast.lhs)?), r#type: self.convert_type(&cast.r#type), location: self.interner.expr_location(&expr), }), HirExpression::If(if_expr) => { - let cond = self.expr(if_expr.condition); - let then = self.expr(if_expr.consequence); - let else_ = if_expr.alternative.map(|alt| Box::new(self.expr(alt))); + let cond = self.expr(if_expr.condition)?; + let then = self.expr(if_expr.consequence)?; + let else_ = + if_expr.alternative.map(|alt| self.expr(alt)).transpose()?.map(Box::new); ast::Expression::If(ast::If { condition: Box::new(cond), consequence: Box::new(then), @@ -424,28 +498,30 @@ impl<'interner> Monomorphizer<'interner> { } HirExpression::Tuple(fields) => { - let fields = vecmap(fields, |id| self.expr(id)); + let fields = try_vecmap(fields, |id| self.expr(id))?; ast::Expression::Tuple(fields) } - HirExpression::Constructor(constructor) => self.constructor(constructor, expr), + HirExpression::Constructor(constructor) => self.constructor(constructor, expr)?, - HirExpression::Lambda(lambda) => self.lambda(lambda, expr), + HirExpression::Lambda(lambda) => self.lambda(lambda, expr)?, HirExpression::MethodCall(hir_method_call) => { unreachable!("Encountered HirExpression::MethodCall during monomorphization {hir_method_call:?}") } HirExpression::Error => unreachable!("Encountered Error node during monomorphization"), - } + }; + + Ok(expr) } fn standard_array( &mut self, array: node_interner::ExprId, array_elements: Vec, - ) -> ast::Expression { + ) -> Result { let typ = self.convert_type(&self.interner.id_type(array)); - let contents = vecmap(array_elements, |id| self.expr(id)); - ast::Expression::Literal(ast::Literal::Array(ast::ArrayLiteral { contents, typ })) + let contents = try_vecmap(array_elements, |id| self.expr(id))?; + Ok(ast::Expression::Literal(ast::Literal::Array(ast::ArrayLiteral { contents, typ }))) } fn repeated_array( @@ -453,46 +529,56 @@ impl<'interner> Monomorphizer<'interner> { array: node_interner::ExprId, repeated_element: node_interner::ExprId, length: HirType, - ) -> ast::Expression { + ) -> Result { let typ = self.convert_type(&self.interner.id_type(array)); - let length = length - .evaluate_to_u64() - .expect("Length of array is unknown when evaluating numeric generic"); + let length = length.evaluate_to_u64().ok_or_else(|| { + let location = self.interner.expr_location(&array); + MonomorphizationError::UnknownArrayLength { location } + })?; - let contents = vecmap(0..length, |_| self.expr(repeated_element)); - ast::Expression::Literal(ast::Literal::Array(ast::ArrayLiteral { contents, typ })) + let contents = try_vecmap(0..length, |_| self.expr(repeated_element))?; + Ok(ast::Expression::Literal(ast::Literal::Array(ast::ArrayLiteral { contents, typ }))) } - fn index(&mut self, id: node_interner::ExprId, index: HirIndexExpression) -> ast::Expression { + fn index( + &mut self, + id: node_interner::ExprId, + index: HirIndexExpression, + ) -> Result { let element_type = self.convert_type(&self.interner.id_type(id)); - let collection = Box::new(self.expr(index.collection)); - let index = Box::new(self.expr(index.index)); + let collection = Box::new(self.expr(index.collection)?); + let index = Box::new(self.expr(index.index)?); let location = self.interner.expr_location(&id); - ast::Expression::Index(ast::Index { collection, index, element_type, location }) + Ok(ast::Expression::Index(ast::Index { collection, index, element_type, location })) } - fn statement(&mut self, id: StmtId) -> ast::Expression { + fn statement(&mut self, id: StmtId) -> Result { match self.interner.statement(&id) { HirStatement::Let(let_statement) => self.let_statement(let_statement), HirStatement::Constrain(constrain) => { - let expr = self.expr(constrain.0); + let expr = self.expr(constrain.0)?; let location = self.interner.expr_location(&constrain.0); - ast::Expression::Constrain(Box::new(expr), location, constrain.2) + let assert_message = constrain + .2 + .map(|assert_msg_expr| self.expr(assert_msg_expr)) + .transpose()? + .map(Box::new); + Ok(ast::Expression::Constrain(Box::new(expr), location, assert_message)) } HirStatement::Assign(assign) => self.assign(assign), HirStatement::For(for_loop) => { self.is_range_loop = true; - let start = self.expr(for_loop.start_range); - let end = self.expr(for_loop.end_range); + let start = self.expr(for_loop.start_range)?; + let end = self.expr(for_loop.end_range)?; self.is_range_loop = false; let index_variable = self.next_local_id(); self.define_local(for_loop.identifier.id, index_variable); - let block = Box::new(self.expr(for_loop.block)); + let block = Box::new(self.expr(for_loop.block)?); - ast::Expression::For(ast::For { + Ok(ast::Expression::For(ast::For { index_variable, index_name: self.interner.definition_name(for_loop.identifier.id).to_owned(), index_type: self.convert_type(&self.interner.id_type(for_loop.start_range)), @@ -501,25 +587,30 @@ impl<'interner> Monomorphizer<'interner> { start_range_location: self.interner.expr_location(&for_loop.start_range), end_range_location: self.interner.expr_location(&for_loop.end_range), block, - }) + })) } HirStatement::Expression(expr) => self.expr(expr), - HirStatement::Semi(expr) => ast::Expression::Semi(Box::new(self.expr(expr))), + HirStatement::Semi(expr) => { + self.expr(expr).map(|expr| ast::Expression::Semi(Box::new(expr))) + } HirStatement::Error => unreachable!(), } } - fn let_statement(&mut self, let_statement: HirLetStatement) -> ast::Expression { - let expr = self.expr(let_statement.expression); + fn let_statement( + &mut self, + let_statement: HirLetStatement, + ) -> Result { + let expr = self.expr(let_statement.expression)?; let expected_type = self.interner.id_type(let_statement.expression); - self.unpack_pattern(let_statement.pattern, expr, &expected_type) + Ok(self.unpack_pattern(let_statement.pattern, expr, &expected_type)) } fn constructor( &mut self, constructor: HirConstructorExpression, id: node_interner::ExprId, - ) -> ast::Expression { + ) -> Result { let typ = self.interner.id_type(id); let field_types = unwrap_struct_type(&typ); @@ -536,7 +627,7 @@ impl<'interner> Monomorphizer<'interner> { let typ = self.convert_type(field_type); field_vars.insert(field_name.0.contents.clone(), (new_id, typ)); - let expression = Box::new(self.expr(expr_id)); + let expression = Box::new(self.expr(expr_id)?); new_exprs.push(ast::Expression::Let(ast::Let { id: new_id, @@ -561,11 +652,15 @@ impl<'interner> Monomorphizer<'interner> { // Finally we can return the created Tuple from the new block new_exprs.push(ast::Expression::Tuple(field_idents)); - ast::Expression::Block(new_exprs) + Ok(ast::Expression::Block(new_exprs)) } - fn block(&mut self, statement_ids: Vec) -> ast::Expression { - ast::Expression::Block(vecmap(statement_ids, |id| self.statement(id))) + fn block( + &mut self, + statement_ids: Vec, + ) -> Result { + let stmts = try_vecmap(statement_ids, |id| self.statement(id)); + stmts.map(ast::Expression::Block) } fn unpack_pattern( @@ -671,25 +766,28 @@ impl<'interner> Monomorphizer<'interner> { let mutable = definition.mutable; let definition = self.lookup_local(ident.id)?; - let typ = self.convert_type(&self.interner.id_type(ident.id)); + let typ = self.convert_type(&self.interner.definition_type(ident.id)); Some(ast::Ident { location: Some(ident.location), mutable, definition, name, typ }) } - fn ident(&mut self, ident: HirIdent, expr_id: node_interner::ExprId) -> ast::Expression { + fn ident( + &mut self, + ident: HirIdent, + expr_id: node_interner::ExprId, + ) -> Result { let typ = self.interner.id_type(expr_id); if let ImplKind::TraitMethod(method, _, _) = ident.impl_kind { - return self.resolve_trait_method_reference(expr_id, typ, method); + return Ok(self.resolve_trait_method_reference(expr_id, typ, method)); } let definition = self.interner.definition(ident.id); - match &definition.kind { + let ident = match &definition.kind { DefinitionKind::Function(func_id) => { let mutable = definition.mutable; let location = Some(ident.location); let name = definition.name.clone(); - let typ = self.interner.id_type(expr_id); let definition = self.lookup_function(*func_id, expr_id, &typ, None); let typ = self.convert_type(&typ); let ident = ast::Ident { location, mutable, definition, name, typ: typ.clone() }; @@ -706,7 +804,14 @@ impl<'interner> Monomorphizer<'interner> { ident_expression } } - DefinitionKind::Global(expr_id) => self.expr(*expr_id), + DefinitionKind::Global(global_id) => { + let Some(let_) = self.interner.get_global_let_statement(*global_id) else { + unreachable!( + "Globals should have a corresponding let statement by monomorphization" + ) + }; + self.expr(let_.expression)? + } DefinitionKind::Local(_) => self.lookup_captured_expr(ident.id).unwrap_or_else(|| { let ident = self.local_ident(&ident).unwrap(); ast::Expression::Ident(ident) @@ -723,9 +828,12 @@ impl<'interner> Monomorphizer<'interner> { let value = FieldElement::from(value as u128); let location = self.interner.id_location(expr_id); - ast::Expression::Literal(ast::Literal::Integer(value, ast::Type::Field, location)) + let typ = self.convert_type(&typ); + ast::Expression::Literal(ast::Literal::Integer(value, typ, location)) } - } + }; + + Ok(ident) } /// Convert a non-tuple/struct type to a monomorphized type @@ -773,12 +881,14 @@ impl<'interner> Monomorphizer<'interner> { // Default any remaining unbound type variables. // This should only happen if the variable in question is unused // and within a larger generic type. - let default = - if self.is_range_loop && matches!(kind, TypeVariableKind::IntegerOrField) { - Type::default_range_loop_type() - } else { - kind.default_type() - }; + let default = if self.is_range_loop + && (matches!(kind, TypeVariableKind::IntegerOrField) + || matches!(kind, TypeVariableKind::Integer)) + { + Type::default_range_loop_type() + } else { + kind.default_type() + }; let monomorphized_default = self.convert_type(&default); binding.bind(default); @@ -791,6 +901,8 @@ impl<'interner> Monomorphizer<'interner> { ast::Type::Tuple(fields) } + HirType::Alias(def, args) => self.convert_type(&def.borrow().get_type(args)), + HirType::Tuple(fields) => { let fields = vecmap(fields, |x| self.convert_type(x)); ast::Type::Tuple(fields) @@ -913,10 +1025,13 @@ impl<'interner> Monomorphizer<'interner> { &mut self, call: HirCallExpression, id: node_interner::ExprId, - ) -> ast::Expression { - let original_func = Box::new(self.expr(call.func)); - let mut arguments = vecmap(&call.arguments, |id| self.expr(*id)); + ) -> Result { + let original_func = Box::new(self.expr(call.func)?); + let mut arguments = try_vecmap(&call.arguments, |id| self.expr(*id))?; let hir_arguments = vecmap(&call.arguments, |id| self.interner.expression(id)); + + self.patch_debug_instrumentation_call(&call, &mut arguments)?; + let return_type = self.interner.id_type(id); let return_type = self.convert_type(&return_type); @@ -929,6 +1044,9 @@ impl<'interner> Monomorphizer<'interner> { // The first argument to the `print` oracle is a bool, indicating a newline to be inserted at the end of the input // The second argument is expected to always be an ident self.append_printable_type_info(&hir_arguments[1], &mut arguments); + } else if name.as_str() == "assert_message" { + // The first argument to the `assert_message` oracle is the expression passed as a message to an `assert` or `assert_eq` statement + self.append_printable_type_info(&hir_arguments[0], &mut arguments); } } } @@ -975,9 +1093,9 @@ impl<'interner> Monomorphizer<'interner> { if !block_expressions.is_empty() { block_expressions.push(call); - ast::Expression::Block(block_expressions) + Ok(ast::Expression::Block(block_expressions)) } else { - call + Ok(call) } } @@ -998,7 +1116,7 @@ impl<'interner> Monomorphizer<'interner> { ) { match hir_argument { HirExpression::Ident(ident) => { - let typ = self.interner.id_type(ident.id); + let typ = self.interner.definition_type(ident.id); let typ: Type = typ.follow_bindings(); let is_fmt_str = match typ { // A format string has many different possible types that need to be handled. @@ -1024,7 +1142,7 @@ impl<'interner> Monomorphizer<'interner> { // The caller needs information as to whether it is handling a format string or a single type arguments.push(ast::Expression::Literal(ast::Literal::Bool(is_fmt_str))); } - _ => unreachable!("logging expr {:?} is not supported", arguments[0]), + _ => unreachable!("logging expr {:?} is not supported", hir_argument), } } @@ -1033,10 +1151,10 @@ impl<'interner> Monomorphizer<'interner> { // since they cannot be passed from ACIR into Brillig if let HirType::Array(size, _) = typ { if let HirType::NotConstant = **size { - unreachable!("println does not support slices. Convert the slice to an array before passing it to println"); + unreachable!("println and format strings do not support slices. Convert the slice to an array before passing it to println"); } } else if matches!(typ, HirType::MutableReference(_)) { - unreachable!("println does not support mutable references."); + unreachable!("println and format strings do not support mutable references."); } let printable_type: PrintableType = typ.into(); @@ -1065,7 +1183,8 @@ impl<'interner> Monomorphizer<'interner> { return match opcode.as_str() { "modulus_num_bits" => { let bits = (FieldElement::max_num_bits() as u128).into(); - let typ = ast::Type::Field; + let typ = + ast::Type::Integer(Signedness::Unsigned, IntegerBitSize::SixtyFour); Some(ast::Expression::Literal(ast::Literal::Integer(bits, typ, location))) } "zeroed" => { @@ -1074,19 +1193,19 @@ impl<'interner> Monomorphizer<'interner> { } "modulus_le_bits" => { let bits = FieldElement::modulus().to_radix_le(2); - Some(self.modulus_array_literal(bits, 1, location)) + Some(self.modulus_array_literal(bits, IntegerBitSize::One, location)) } "modulus_be_bits" => { let bits = FieldElement::modulus().to_radix_be(2); - Some(self.modulus_array_literal(bits, 1, location)) + Some(self.modulus_array_literal(bits, IntegerBitSize::One, location)) } "modulus_be_bytes" => { let bytes = FieldElement::modulus().to_bytes_be(); - Some(self.modulus_array_literal(bytes, 8, location)) + Some(self.modulus_array_literal(bytes, IntegerBitSize::Eight, location)) } "modulus_le_bytes" => { let bytes = FieldElement::modulus().to_bytes_le(); - Some(self.modulus_array_literal(bytes, 8, location)) + Some(self.modulus_array_literal(bytes, IntegerBitSize::Eight, location)) } _ => None, }; @@ -1098,7 +1217,7 @@ impl<'interner> Monomorphizer<'interner> { fn modulus_array_literal( &self, bytes: Vec, - arr_elem_bits: u32, + arr_elem_bits: IntegerBitSize, location: Location, ) -> ast::Expression { use ast::*; @@ -1147,47 +1266,59 @@ impl<'interner> Monomorphizer<'interner> { .collect() } - fn assign(&mut self, assign: HirAssignStatement) -> ast::Expression { - let expression = Box::new(self.expr(assign.expression)); - let lvalue = self.lvalue(assign.lvalue); - ast::Expression::Assign(ast::Assign { expression, lvalue }) + fn assign( + &mut self, + assign: HirAssignStatement, + ) -> Result { + let expression = Box::new(self.expr(assign.expression)?); + let lvalue = self.lvalue(assign.lvalue)?; + Ok(ast::Expression::Assign(ast::Assign { expression, lvalue })) } - fn lvalue(&mut self, lvalue: HirLValue) -> ast::LValue { - match lvalue { + fn lvalue(&mut self, lvalue: HirLValue) -> Result { + let value = match lvalue { HirLValue::Ident(ident, _) => self .lookup_captured_lvalue(ident.id) .unwrap_or_else(|| ast::LValue::Ident(self.local_ident(&ident).unwrap())), HirLValue::MemberAccess { object, field_index, .. } => { let field_index = field_index.unwrap(); - let object = Box::new(self.lvalue(*object)); + let object = Box::new(self.lvalue(*object)?); ast::LValue::MemberAccess { object, field_index } } HirLValue::Index { array, index, typ } => { let location = self.interner.expr_location(&index); - let array = Box::new(self.lvalue(*array)); - let index = Box::new(self.expr(index)); + let array = Box::new(self.lvalue(*array)?); + let index = Box::new(self.expr(index)?); let element_type = self.convert_type(&typ); ast::LValue::Index { array, index, element_type, location } } HirLValue::Dereference { lvalue, element_type } => { - let reference = Box::new(self.lvalue(*lvalue)); + let reference = Box::new(self.lvalue(*lvalue)?); let element_type = self.convert_type(&element_type); ast::LValue::Dereference { reference, element_type } } - } + }; + + Ok(value) } - fn lambda(&mut self, lambda: HirLambda, expr: node_interner::ExprId) -> ast::Expression { + fn lambda( + &mut self, + lambda: HirLambda, + expr: node_interner::ExprId, + ) -> Result { if lambda.captures.is_empty() { self.lambda_no_capture(lambda) } else { - let (setup, closure_variable) = self.lambda_with_setup(lambda, expr); - ast::Expression::Block(vec![setup, closure_variable]) + let (setup, closure_variable) = self.lambda_with_setup(lambda, expr)?; + Ok(ast::Expression::Block(vec![setup, closure_variable])) } } - fn lambda_no_capture(&mut self, lambda: HirLambda) -> ast::Expression { + fn lambda_no_capture( + &mut self, + lambda: HirLambda, + ) -> Result { let ret_type = self.convert_type(&lambda.return_type); let lambda_name = "lambda"; let parameter_types = vecmap(&lambda.parameters, |(_, typ)| self.convert_type(typ)); @@ -1197,7 +1328,7 @@ impl<'interner> Monomorphizer<'interner> { vecmap(lambda.parameters, |(pattern, typ)| (pattern, typ, Visibility::Private)).into(); let parameters = self.parameters(¶meters); - let body = self.expr(lambda.body); + let body = self.expr(lambda.body)?; let id = self.next_function_id(); let return_type = ret_type.clone(); @@ -1211,20 +1342,20 @@ impl<'interner> Monomorphizer<'interner> { ast::Type::Function(parameter_types, Box::new(ret_type), Box::new(ast::Type::Unit)); let name = lambda_name.to_owned(); - ast::Expression::Ident(ast::Ident { + Ok(ast::Expression::Ident(ast::Ident { definition: Definition::Function(id), mutable: false, location: None, name, typ, - }) + })) } fn lambda_with_setup( &mut self, lambda: HirLambda, expr: node_interner::ExprId, - ) -> (ast::Expression, ast::Expression) { + ) -> Result<(ast::Expression, ast::Expression), MonomorphizationError> { // returns (, ) // which can be used directly in callsites or transformed // directly to a single `Expression` @@ -1300,7 +1431,7 @@ impl<'interner> Monomorphizer<'interner> { self.lambda_envs_stack .push(LambdaContext { env_ident: env_ident.clone(), captures: lambda.captures }); - let body = self.expr(lambda.body); + let body = self.expr(lambda.body)?; self.lambda_envs_stack.pop(); let lambda_fn_typ: ast::Type = @@ -1342,7 +1473,7 @@ impl<'interner> Monomorphizer<'interner> { typ: ast::Type::Tuple(vec![env_typ, lambda_fn_typ]), }); - (block_let_stmt, closure_ident) + Ok((block_let_stmt, closure_ident)) } /// Implements std::unsafe::zeroed by returning an appropriate zeroed diff --git a/compiler/noirc_frontend/src/node_interner.rs b/compiler/noirc_frontend/src/node_interner.rs index b856b54f6ca..5de43e59254 100644 --- a/compiler/noirc_frontend/src/node_interner.rs +++ b/compiler/noirc_frontend/src/node_interner.rs @@ -1,15 +1,22 @@ +use std::borrow::Cow; use std::collections::HashMap; +use std::ops::Deref; use arena::{Arena, Index}; use fm::FileId; use iter_extended::vecmap; use noirc_errors::{Location, Span, Spanned}; +use petgraph::algo::tarjan_scc; +use petgraph::prelude::DiGraph; +use petgraph::prelude::NodeIndex as PetGraphIndex; use crate::ast::Ident; use crate::graph::CrateId; +use crate::hir::def_collector::dc_crate::CompilationError; use crate::hir::def_collector::dc_crate::{UnresolvedStruct, UnresolvedTrait, UnresolvedTypeAlias}; use crate::hir::def_map::{LocalModuleId, ModuleId}; +use crate::hir::resolution::errors::ResolverError; use crate::hir_def::stmt::HirLetStatement; use crate::hir_def::traits::TraitImpl; use crate::hir_def::traits::{Trait, TraitConstraint}; @@ -22,7 +29,7 @@ use crate::hir_def::{ use crate::token::{Attributes, SecondaryAttribute}; use crate::{ BinaryOpKind, ContractFunctionType, FunctionDefinition, FunctionVisibility, Generics, Shared, - TypeAliasType, TypeBindings, TypeVariable, TypeVariableId, TypeVariableKind, + TypeAlias, TypeBindings, TypeVariable, TypeVariableId, TypeVariableKind, }; /// An arbitrary number to limit the recursion depth when searching for trait impls. @@ -41,6 +48,7 @@ type StructAttributes = Vec; pub struct NodeInterner { pub(crate) nodes: Arena, pub(crate) func_meta: HashMap, + function_definition_ids: HashMap, // For a given function ID, this gives the function's modifiers which includes @@ -51,6 +59,14 @@ pub struct NodeInterner { // Contains the source module each function was defined in function_modules: HashMap, + /// This graph tracks dependencies between different global definitions. + /// This is used to ensure the absense of dependency cycles for globals and types. + dependency_graph: DiGraph, + + /// To keep track of where each DependencyId is in `dependency_graph`, we need + /// this separate graph to map between the ids and indices. + dependency_graph_indices: HashMap, + // Map each `Index` to it's own location pub(crate) id_to_location: HashMap, @@ -59,13 +75,14 @@ pub struct NodeInterner { // Type checking map // - // Notice that we use `Index` as the Key and not an ExprId or IdentId - // Therefore, If a raw index is passed in, then it is not safe to assume that it will have - // a Type, as not all Ids have types associated to them. - // Further note, that an ExprId and an IdentId will never have the same underlying Index - // Because we use one Arena to store all Definitions/Nodes + // This should only be used with indices from the `nodes` arena. + // Otherwise the indices used may overwrite other existing indices. + // Each type for each index is filled in during type checking. id_to_type: HashMap, + // Similar to `id_to_type` but maps definitions to their type + definition_to_type: HashMap, + // Struct map. // // Each struct definition is possibly shared across multiple type nodes. @@ -74,11 +91,12 @@ pub struct NodeInterner { structs: HashMap>, struct_attributes: HashMap, - // Type Aliases map. + + // Maps TypeAliasId -> Shared // // Map type aliases to the actual type. // When resolving types, check against this map to see if a type alias is defined. - pub(crate) type_aliases: Vec, + pub(crate) type_aliases: Vec>, // Trait map. // @@ -125,7 +143,9 @@ pub struct NodeInterner { /// checking. field_indices: HashMap, - globals: HashMap, // NOTE: currently only used for checking repeat globals and restricting their scope to a module + // Maps GlobalId -> GlobalInfo + // NOTE: currently only used for checking repeat globals and restricting their scope to a module + globals: Vec, next_type_variable_id: std::cell::Cell, @@ -151,6 +171,24 @@ pub struct NodeInterner { pub(crate) type_ref_locations: Vec<(Type, Location)>, } +/// A dependency in the dependency graph may be a type or a definition. +/// Types can depend on definitions too. E.g. `Foo` depends on `COUNT` in: +/// +/// ```struct +/// global COUNT = 3; +/// +/// struct Foo { +/// array: [Field; COUNT], +/// } +/// ``` +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] +pub enum DependencyId { + Struct(StructId), + Global(GlobalId), + Function(FuncId), + Alias(TypeAliasId), +} + /// A trait implementation is either a normal implementation that is present in the source /// program via an `impl` block, or it is assumed to exist from a `where` clause or similar. #[derive(Debug, Clone)] @@ -240,9 +278,14 @@ impl DefinitionId { } } -impl From for Index { - fn from(id: DefinitionId) -> Self { - Index::from_raw_parts(id.0, u64::MAX) +/// An ID for a global value +#[derive(Debug, Eq, PartialEq, Hash, Clone, Copy)] +pub struct GlobalId(usize); + +impl GlobalId { + // Dummy id for error reporting + pub fn dummy_id() -> Self { + GlobalId(std::usize::MAX) } } @@ -254,7 +297,7 @@ impl StmtId { // This can be anything, as the program will ultimately fail // after resolution pub fn dummy_id() -> StmtId { - StmtId(Index::from_raw_parts(std::usize::MAX, 0)) + StmtId(Index::dummy()) } } @@ -263,7 +306,7 @@ pub struct ExprId(Index); impl ExprId { pub fn empty_block_id() -> ExprId { - ExprId(Index::from_raw_parts(0, 0)) + ExprId(Index::unsafe_zeroed()) } } #[derive(Debug, Eq, PartialEq, Hash, Copy, Clone)] @@ -274,7 +317,7 @@ impl FuncId { // This can be anything, as the program will ultimately fail // after resolution pub fn dummy_id() -> FuncId { - FuncId(Index::from_raw_parts(std::usize::MAX, 0)) + FuncId(Index::dummy()) } } @@ -324,7 +367,7 @@ impl TraitId { } #[derive(Debug, Eq, PartialEq, Hash, Clone, Copy)] -pub struct TraitImplId(usize); +pub struct TraitImplId(pub usize); #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] pub struct TraitMethodId { @@ -348,23 +391,9 @@ macro_rules! into_index { }; } -macro_rules! partialeq { - ($id_type:ty) => { - impl PartialEq for &$id_type { - fn eq(&self, other: &usize) -> bool { - let (index, _) = self.0.into_raw_parts(); - index == *other - } - } - }; -} - into_index!(ExprId); into_index!(StmtId); -partialeq!(ExprId); -partialeq!(StmtId); - /// A Definition enum specifies anything that we can intern in the NodeInterner /// We use one Arena for all types that can be interned as that has better cache locality /// This data structure is never accessed directly, so API wise there is no difference between using @@ -396,7 +425,7 @@ impl DefinitionInfo { pub enum DefinitionKind { Function(FuncId), - Global(ExprId), + Global(GlobalId), /// Locals may be defined in let statements or parameters, /// in which case they will not have an associated ExprId @@ -417,7 +446,7 @@ impl DefinitionKind { pub fn get_rhs(&self) -> Option { match self { DefinitionKind::Function(_) => None, - DefinitionKind::Global(id) => Some(*id), + DefinitionKind::Global(_) => None, DefinitionKind::Local(id) => *id, DefinitionKind::GenericType(_) => None, } @@ -426,8 +455,12 @@ impl DefinitionKind { #[derive(Debug, Clone)] pub struct GlobalInfo { + pub id: GlobalId, + pub definition_id: DefinitionId, pub ident: Ident, pub local_id: LocalModuleId, + pub location: Location, + pub let_statement: StmtId, } impl Default for NodeInterner { @@ -439,9 +472,12 @@ impl Default for NodeInterner { function_modifiers: HashMap::new(), function_modules: HashMap::new(), func_id_to_trait: HashMap::new(), + dependency_graph: petgraph::graph::DiGraph::new(), + dependency_graph_indices: HashMap::new(), id_to_location: HashMap::new(), definitions: vec![], id_to_type: HashMap::new(), + definition_to_type: HashMap::new(), structs: HashMap::new(), struct_attributes: HashMap::new(), type_aliases: Vec::new(), @@ -454,7 +490,7 @@ impl Default for NodeInterner { instantiation_bindings: HashMap::new(), field_indices: HashMap::new(), next_type_variable_id: std::cell::Cell::new(0), - globals: HashMap::new(), + globals: Vec::new(), struct_methods: HashMap::new(), primitive_methods: HashMap::new(), type_alias_ref: Vec::new(), @@ -491,10 +527,15 @@ impl NodeInterner { } /// Store the type for an interned expression - pub fn push_expr_type(&mut self, expr_id: &ExprId, typ: Type) { + pub fn push_expr_type(&mut self, expr_id: ExprId, typ: Type) { self.id_to_type.insert(expr_id.into(), typ); } + /// Store the type for an interned expression + pub fn push_definition_type(&mut self, definition_id: DefinitionId, typ: Type) { + self.definition_to_type.insert(definition_id, typ); + } + pub fn push_empty_trait(&mut self, type_id: TraitId, unresolved_trait: &UnresolvedTrait) { let self_type_typevar_id = self.next_type_variable_id(); @@ -551,13 +592,13 @@ impl NodeInterner { pub fn push_type_alias(&mut self, typ: &UnresolvedTypeAlias) -> TypeAliasId { let type_id = TypeAliasId(self.type_aliases.len()); - self.type_aliases.push(TypeAliasType::new( + self.type_aliases.push(Shared::new(TypeAlias::new( type_id, typ.type_alias_def.name.clone(), Location::new(typ.type_alias_def.span, typ.file_id), Type::Error, vecmap(&typ.type_alias_def.generics, |_| TypeVariable::unbound(TypeVariableId(0))), - )); + ))); type_id } @@ -579,7 +620,7 @@ impl NodeInterner { pub fn set_type_alias(&mut self, type_id: TypeAliasId, typ: Type, generics: Generics) { let type_alias_type = &mut self.type_aliases[type_id.0]; - type_alias_type.set_type_and_generics(typ, generics); + type_alias_type.borrow_mut().set_type_and_generics(typ, generics); } /// Returns the interned statement corresponding to `stmt_id` @@ -606,36 +647,46 @@ impl NodeInterner { } } - /// Store the type for an interned Identifier - pub fn push_definition_type(&mut self, definition_id: DefinitionId, typ: Type) { - self.id_to_type.insert(definition_id.into(), typ); - } - /// Store [Location] of [Type] reference pub fn push_type_ref_location(&mut self, typ: Type, location: Location) { self.type_ref_locations.push((typ, location)); } - pub fn push_global(&mut self, stmt_id: StmtId, ident: Ident, local_id: LocalModuleId) { - self.globals.insert(stmt_id, GlobalInfo { ident, local_id }); + fn push_global( + &mut self, + ident: Ident, + local_id: LocalModuleId, + let_statement: StmtId, + file: FileId, + ) -> GlobalId { + let id = GlobalId(self.globals.len()); + let location = Location::new(ident.span(), file); + let name = ident.to_string(); + let definition_id = self.push_definition(name, false, DefinitionKind::Global(id), location); + self.globals.push(GlobalInfo { + id, + definition_id, + ident, + local_id, + let_statement, + location, + }); + id } - /// Intern an empty global stmt. Used for collecting globals - pub fn push_empty_global(&mut self) -> StmtId { - self.push_stmt(HirStatement::Error) + pub fn next_global_id(&mut self) -> GlobalId { + GlobalId(self.globals.len()) } - pub fn update_global(&mut self, stmt_id: StmtId, hir_stmt: HirStatement) { - let def = - self.nodes.get_mut(stmt_id.0).expect("ice: all function ids should have definitions"); - - let stmt = match def { - Node::Statement(stmt) => stmt, - _ => { - panic!("ice: all global ids should correspond to a statement in the interner") - } - }; - *stmt = hir_stmt; + /// Intern an empty global. Used for collecting globals before they're defined + pub fn push_empty_global( + &mut self, + name: Ident, + local_id: LocalModuleId, + file: FileId, + ) -> GlobalId { + let statement = self.push_stmt(HirStatement::Error); + self.push_global(name, local_id, statement, file) } /// Intern an empty function. @@ -815,19 +866,19 @@ impl NodeInterner { } } - /// Returns the interned let statement corresponding to `stmt_id` - pub fn let_statement(&self, stmt_id: &StmtId) -> HirLetStatement { - let def = - self.nodes.get(stmt_id.0).expect("ice: all statement ids should have definitions"); + /// Try to get the `HirLetStatement` which defines a given global value + pub fn get_global_let_statement(&self, global: GlobalId) -> Option { + let global = self.get_global(global); + let def = self.nodes.get(global.let_statement.0)?; match def { - Node::Statement(hir_stmt) => { - match hir_stmt { - HirStatement::Let(let_stmt) => let_stmt.clone(), - _ => panic!("ice: all let statement ids should correspond to a let statement in the interner"), + Node::Statement(hir_stmt) => match hir_stmt { + HirStatement::Let(let_stmt) => Some(let_stmt.clone()), + _ => { + panic!("ice: all globals should correspond to a let statement in the interner") } }, - _ => panic!("ice: all statement ids should correspond to a statement in the interner"), + _ => panic!("ice: all globals should correspond to a statement in the interner"), } } @@ -889,16 +940,21 @@ impl NodeInterner { self.traits.get(&id) } - pub fn get_type_alias(&self, id: TypeAliasId) -> &TypeAliasType { - &self.type_aliases[id.0] + pub fn get_type_alias(&self, id: TypeAliasId) -> Shared { + self.type_aliases[id.0].clone() + } + + pub fn get_global(&self, global_id: GlobalId) -> &GlobalInfo { + &self.globals[global_id.0] } - pub fn get_global(&self, stmt_id: &StmtId) -> Option { - self.globals.get(stmt_id).cloned() + pub fn get_global_definition(&self, global_id: GlobalId) -> &DefinitionInfo { + let global = self.get_global(global_id); + self.definition(global.definition_id) } - pub fn get_all_globals(&self) -> HashMap { - self.globals.clone() + pub fn get_all_globals(&self) -> &[GlobalInfo] { + &self.globals } /// Returns the type of an item stored in the Interner or Error if it was not found. @@ -906,8 +962,13 @@ impl NodeInterner { self.id_to_type.get(&index.into()).cloned().unwrap_or(Type::Error) } + /// Returns the type of the definition or `Type::Error` if it was not found. + pub fn definition_type(&self, id: DefinitionId) -> Type { + self.definition_to_type.get(&id).cloned().unwrap_or(Type::Error) + } + pub fn id_type_substitute_trait_as_type(&self, def_id: DefinitionId) -> Type { - let typ = self.id_type(def_id); + let typ = self.definition_type(def_id); if let Type::Function(args, ret, env) = &typ { let def = self.definition(def_id); if let Type::TraitAsType(..) = ret.as_ref() { @@ -934,6 +995,12 @@ impl NodeInterner { *old = Node::Expression(new); } + /// Replaces the HirStatement at the given StmtId with a new HirStatement + pub fn replace_statement(&mut self, stmt_id: StmtId, hir_stmt: HirStatement) { + let old = self.nodes.get_mut(stmt_id.0).unwrap(); + *old = Node::Statement(hir_stmt); + } + pub fn next_type_variable_id(&self) -> TypeVariableId { let id = self.next_type_variable_id.get(); self.next_type_variable_id.set(id + 1); @@ -1042,6 +1109,34 @@ impl NodeInterner { Ok(impl_kind) } + /// Given a `ObjectType: TraitId` pair, find all implementations without taking constraints into account or + /// applying any type bindings. Useful to look for a specific trait in a type that is used in a macro. + pub fn lookup_all_trait_implementations( + &self, + object_type: &Type, + trait_id: TraitId, + ) -> Vec<&TraitImplKind> { + let trait_impl = self.trait_implementation_map.get(&trait_id); + + trait_impl + .map(|trait_impl| { + trait_impl + .iter() + .filter_map(|(typ, impl_kind)| match &typ { + Type::Forall(_, typ) => { + if typ.deref() == object_type { + Some(impl_kind) + } else { + None + } + } + _ => None, + }) + .collect() + }) + .unwrap_or_default() + } + /// Similar to `lookup_trait_implementation` but does not apply any type bindings on success. pub fn try_lookup_trait_implementation( &self, @@ -1263,7 +1358,6 @@ impl NodeInterner { force_type_check: bool, ) -> Option { let methods = self.struct_methods.get(&(id, method_name.to_owned())); - // If there is only one method, just return it immediately. // It will still be typechecked later. if !force_type_check { @@ -1418,6 +1512,115 @@ impl NodeInterner { pub(crate) fn ordering_type(&self) -> Type { self.ordering_type.clone().expect("Expected ordering_type to be set in the NodeInterner") } + + /// Register that `dependent` depends on `dependency`. + /// This is usually because `dependent` refers to `dependency` in one of its struct fields. + pub fn add_type_dependency(&mut self, dependent: DependencyId, dependency: StructId) { + self.add_dependency(dependent, DependencyId::Struct(dependency)); + } + + pub fn add_global_dependency(&mut self, dependent: DependencyId, dependency: GlobalId) { + self.add_dependency(dependent, DependencyId::Global(dependency)); + } + + pub fn add_function_dependency(&mut self, dependent: DependencyId, dependency: FuncId) { + self.add_dependency(dependent, DependencyId::Function(dependency)); + } + + pub fn add_type_alias_dependency(&mut self, dependent: DependencyId, dependency: TypeAliasId) { + self.add_dependency(dependent, DependencyId::Alias(dependency)); + } + + fn add_dependency(&mut self, dependent: DependencyId, dependency: DependencyId) { + let dependent_index = self.get_or_insert_dependency(dependent); + let dependency_index = self.get_or_insert_dependency(dependency); + self.dependency_graph.update_edge(dependent_index, dependency_index, ()); + } + + fn get_or_insert_dependency(&mut self, id: DependencyId) -> PetGraphIndex { + if let Some(index) = self.dependency_graph_indices.get(&id) { + return *index; + } + + let index = self.dependency_graph.add_node(id); + self.dependency_graph_indices.insert(id, index); + index + } + + pub(crate) fn check_for_dependency_cycles(&self) -> Vec<(CompilationError, FileId)> { + let strongly_connected_components = tarjan_scc(&self.dependency_graph); + let mut errors = Vec::new(); + + let mut push_error = |item: String, scc: &[_], i, location: Location| { + let cycle = self.get_cycle_error_string(scc, i); + let span = location.span; + let error = ResolverError::DependencyCycle { item, cycle, span }; + errors.push((error.into(), location.file)); + }; + + for scc in strongly_connected_components { + if scc.len() > 1 { + // If a SCC contains a type, type alias, or global, it must be the only element in the SCC + for (i, index) in scc.iter().enumerate() { + match self.dependency_graph[*index] { + DependencyId::Struct(struct_id) => { + let struct_type = self.get_struct(struct_id); + let struct_type = struct_type.borrow(); + push_error(struct_type.name.to_string(), &scc, i, struct_type.location); + break; + } + DependencyId::Global(global_id) => { + let global = self.get_global(global_id); + let name = global.ident.to_string(); + push_error(name, &scc, i, global.location); + break; + } + DependencyId::Alias(alias_id) => { + let alias = self.get_type_alias(alias_id); + // If type aliases form a cycle, we have to manually break the cycle + // here to prevent infinite recursion in the type checker. + alias.borrow_mut().typ = Type::Error; + + // push_error will borrow the alias so we have to drop the mutable borrow + let alias = alias.borrow(); + push_error(alias.name.to_string(), &scc, i, alias.location); + break; + } + // Mutually recursive functions are allowed + DependencyId::Function(_) => (), + } + } + } + } + + errors + } + + /// Build up a string starting from the given item containing each item in the dependency + /// cycle. The final result will resemble `foo -> bar -> baz -> foo`, always going back to the + /// element at the given start index. + fn get_cycle_error_string(&self, scc: &[PetGraphIndex], start_index: usize) -> String { + let index_to_string = |index: PetGraphIndex| match self.dependency_graph[index] { + DependencyId::Struct(id) => Cow::Owned(self.get_struct(id).borrow().name.to_string()), + DependencyId::Function(id) => Cow::Borrowed(self.function_name(&id)), + DependencyId::Alias(id) => { + Cow::Owned(self.get_type_alias(id).borrow().name.to_string()) + } + DependencyId::Global(id) => { + Cow::Borrowed(self.get_global(id).ident.0.contents.as_ref()) + } + }; + + let mut cycle = index_to_string(scc[start_index]).to_string(); + + // Reversing the dependencies here matches the order users would expect for the error message + for i in (0..scc.len()).rev() { + cycle += " -> "; + cycle += &index_to_string(scc[(start_index + i) % scc.len()]); + } + + cycle + } } impl Methods { @@ -1454,7 +1657,7 @@ impl Methods { for method in self.iter() { match interner.function_meta(&method).typ.instantiate(interner).0 { Type::Function(args, _, _) => { - if let Some(object) = args.get(0) { + if let Some(object) = args.first() { let mut bindings = TypeBindings::new(); if object.try_unify(typ, &mut bindings).is_ok() { @@ -1495,6 +1698,7 @@ fn get_type_method_key(typ: &Type) -> Option { Type::Array(_, _) => Some(Array), Type::Integer(_, _) => Some(FieldOrInt), Type::TypeVariable(_, TypeVariableKind::IntegerOrField) => Some(FieldOrInt), + Type::TypeVariable(_, TypeVariableKind::Integer) => Some(FieldOrInt), Type::Bool => Some(Bool), Type::String(_) => Some(String), Type::FmtString(_, _) => Some(FmtString), @@ -1503,6 +1707,7 @@ fn get_type_method_key(typ: &Type) -> Option { Type::Function(_, _, _) => Some(Function), Type::NamedGeneric(_, _) => Some(Generic), Type::MutableReference(element) => get_type_method_key(element), + Type::Alias(alias, _) => get_type_method_key(&alias.borrow().typ), // We do not support adding methods to these types Type::TypeVariable(_, _) diff --git a/compiler/noirc_frontend/src/parser/errors.rs b/compiler/noirc_frontend/src/parser/errors.rs index 5c869ff4719..43a1f96f13f 100644 --- a/compiler/noirc_frontend/src/parser/errors.rs +++ b/compiler/noirc_frontend/src/parser/errors.rs @@ -1,6 +1,7 @@ use crate::lexer::errors::LexerErrorKind; use crate::lexer::token::Token; use crate::Expression; +use crate::IntegerBitSize; use small_ord_set::SmallOrdSet; use thiserror::Error; @@ -40,6 +41,8 @@ pub enum ParserErrorReason { NoFunctionAttributesAllowedOnStruct, #[error("Assert statements can only accept string literals")] AssertMessageNotString, + #[error("Integer bit size {0} isn't supported")] + InvalidBitSize(u32), #[error("{0}")] Lexer(LexerErrorKind), } @@ -145,6 +148,11 @@ impl From for Diagnostic { "The 'comptime' keyword has been deprecated. It can be removed without affecting your program".into(), error.span, ), + ParserErrorReason::InvalidBitSize(bit_size) => Diagnostic::simple_error( + format!("Use of invalid bit size {}", bit_size), + format!("Allowed bit sizes for integers are {}", IntegerBitSize::allowed_sizes().iter().map(|n| n.to_string()).collect::>().join(", ")), + error.span, + ), ParserErrorReason::ExperimentalFeature(_) => Diagnostic::simple_warning( reason.to_string(), "".into(), diff --git a/compiler/noirc_frontend/src/parser/parser.rs b/compiler/noirc_frontend/src/parser/parser.rs index f82ce95c718..75f4a6359bf 100644 --- a/compiler/noirc_frontend/src/parser/parser.rs +++ b/compiler/noirc_frontend/src/parser/parser.rs @@ -23,6 +23,8 @@ //! prevent other parsers from being tried afterward since there is no longer an error. Thus, they should //! be limited to cases like the above `fn` example where it is clear we shouldn't back out of the //! current parser to try alternative parsers in a `choice` expression. +use self::primitives::{keyword, mutable_reference, variable}; + use super::{ foldl_with_span, labels::ParsingRuleLabel, parameter_name_recovery, parameter_recovery, parenthesized, then_commit, then_commit_ignore, top_level_statement_recovery, ExprParser, @@ -35,13 +37,11 @@ use crate::ast::{ }; use crate::lexer::Lexer; use crate::parser::{force, ignore_then_commit, statement_recovery}; -use crate::token::{Attribute, Attributes, Keyword, SecondaryAttribute, Token, TokenKind}; +use crate::token::{Keyword, Token, TokenKind}; use crate::{ - BinaryOp, BinaryOpKind, BlockExpression, ConstrainKind, ConstrainStatement, Distinctness, - ForLoopStatement, ForRange, FunctionDefinition, FunctionReturnType, FunctionVisibility, Ident, - IfExpression, InfixExpression, LValue, Lambda, Literal, NoirFunction, NoirStruct, NoirTrait, - NoirTraitImpl, NoirTypeAlias, Param, Path, PathKind, Pattern, Recoverable, Statement, - TraitBound, TraitImplItem, TraitItem, TypeImpl, UnaryOp, UnresolvedTraitConstraint, + BinaryOp, BinaryOpKind, BlockExpression, Distinctness, ForLoopStatement, ForRange, + FunctionReturnType, Ident, IfExpression, InfixExpression, LValue, Literal, NoirTypeAlias, + Param, Path, Pattern, Recoverable, Statement, TraitBound, TypeImpl, UnresolvedTraitConstraint, UnresolvedTypeExpression, UseTree, UseTreeKind, Visibility, }; @@ -49,6 +49,23 @@ use chumsky::prelude::*; use iter_extended::vecmap; use noirc_errors::{Span, Spanned}; +mod assertion; +mod attributes; +mod function; +mod lambdas; +mod literals; +mod path; +mod primitives; +mod structs; +mod traits; + +#[cfg(test)] +mod test_helpers; + +use literals::literal; +use path::{maybe_empty_path, path}; +use primitives::{dereference, ident, negation, not, nothing, right_shift_operator, token_kind}; + /// Entry function for the parser - also handles lexing internally. /// /// Given a source_program string, return the ParsedModule Ast representation @@ -109,10 +126,10 @@ fn top_level_statement( module_parser: impl NoirParser, ) -> impl NoirParser { choice(( - function_definition(false).map(TopLevelStatement::Function), - struct_definition(), - trait_definition(), - trait_implementation(), + function::function_definition(false).map(TopLevelStatement::Function), + structs::struct_definition(), + traits::trait_definition(), + traits::trait_implementation(), implementation(), type_alias_definition().then_ignore(force(just(Token::Semicolon))), submodule(module_parser.clone()), @@ -124,6 +141,21 @@ fn top_level_statement( .recover_via(top_level_statement_recovery()) } +/// Parses a non-trait implementation, adding a set of methods to a type. +/// +/// implementation: 'impl' generics type '{' function_definition ... '}' +fn implementation() -> impl NoirParser { + keyword(Keyword::Impl) + .ignore_then(function::generics()) + .then(parse_type().map_with_span(|typ, span| (typ, span))) + .then_ignore(just(Token::LeftBrace)) + .then(spanned(function::function_definition(true)).repeated()) + .then_ignore(just(Token::RightBrace)) + .map(|((generics, (object_type, type_span)), methods)| { + TopLevelStatement::Impl(TypeImpl { generics, object_type, type_span, methods }) + }) +} + /// global_declaration: 'global' ident global_type_annotation '=' literal fn global_declaration() -> impl NoirParser { let p = ignore_then_commit( @@ -132,7 +164,7 @@ fn global_declaration() -> impl NoirParser { ); let p = then_commit(p, optional_type_annotation()); let p = then_commit_ignore(p, just(Token::Assign)); - let p = then_commit(p, literal_or_collection(expression()).map_with_span(Expression::new)); + let p = then_commit(p, expression()); p.map(LetStatement::new_let).map(TopLevelStatement::Global) } @@ -160,121 +192,11 @@ fn contract(module_parser: impl NoirParser) -> impl NoirParser impl NoirParser { - attributes() - .then(function_modifiers()) - .then_ignore(keyword(Keyword::Fn)) - .then(ident()) - .then(generics()) - .then(parenthesized(function_parameters(allow_self))) - .then(function_return_type()) - .then(where_clause()) - .then(spanned(block(fresh_statement()))) - .validate(|(((args, ret), where_clause), (body, body_span)), span, emit| { - let ((((attributes, modifiers), name), generics), parameters) = args; - - // Validate collected attributes, filtering them into function and secondary variants - let attributes = validate_attributes(attributes, span, emit); - FunctionDefinition { - span: body_span, - name, - attributes, - is_unconstrained: modifiers.0, - is_open: modifiers.2, - is_internal: modifiers.3, - visibility: if modifiers.1 { - FunctionVisibility::PublicCrate - } else if modifiers.4 { - FunctionVisibility::Public - } else { - FunctionVisibility::Private - }, - generics, - parameters, - body, - where_clause, - return_type: ret.1, - return_visibility: ret.0 .1, - return_distinctness: ret.0 .0, - } - .into() - }) -} - -/// function_modifiers: 'unconstrained'? 'pub(crate)'? 'pub'? 'open'? 'internal'? -/// -/// returns (is_unconstrained, is_pub_crate, is_open, is_internal, is_pub) for whether each keyword was present -fn function_modifiers() -> impl NoirParser<(bool, bool, bool, bool, bool)> { - keyword(Keyword::Unconstrained) - .or_not() - .then(is_pub_crate()) - .then(keyword(Keyword::Pub).or_not()) - .then(keyword(Keyword::Open).or_not()) - .then(keyword(Keyword::Internal).or_not()) - .map(|((((unconstrained, pub_crate), public), open), internal)| { - ( - unconstrained.is_some(), - pub_crate, - open.is_some(), - internal.is_some(), - public.is_some(), - ) - }) -} - -fn is_pub_crate() -> impl NoirParser { - (keyword(Keyword::Pub) - .then_ignore(just(Token::LeftParen)) - .then_ignore(keyword(Keyword::Crate)) - .then_ignore(just(Token::RightParen))) - .or_not() - .map(|a| a.is_some()) -} - -/// non_empty_ident_list: ident ',' non_empty_ident_list -/// | ident -/// -/// generics: '<' non_empty_ident_list '>' -/// | %empty -fn generics() -> impl NoirParser> { - ident() - .separated_by(just(Token::Comma)) - .allow_trailing() - .at_least(1) - .delimited_by(just(Token::Less), just(Token::Greater)) - .or_not() - .map(|opt| opt.unwrap_or_default()) -} - -fn struct_definition() -> impl NoirParser { - use self::Keyword::Struct; - use Token::*; - - let fields = struct_fields() - .delimited_by(just(LeftBrace), just(RightBrace)) - .recover_with(nested_delimiters( - LeftBrace, - RightBrace, - [(LeftParen, RightParen), (LeftBracket, RightBracket)], - |_| vec![], - )) - .or(just(Semicolon).to(Vec::new())); - - attributes().then_ignore(keyword(Struct)).then(ident()).then(generics()).then(fields).validate( - |(((raw_attributes, name), generics), fields), span, emit| { - let attributes = validate_struct_attributes(raw_attributes, span, emit); - TopLevelStatement::Struct(NoirStruct { name, attributes, generics, fields, span }) - }, - ) -} - fn type_alias_definition() -> impl NoirParser { use self::Keyword::Type; let p = ignore_then_commit(keyword(Type), ident()); - let p = then_commit(p, generics()); + let p = then_commit(p, function::generics()); let p = then_commit_ignore(p, just(Token::Assign)); let p = then_commit(p, parse_type()); @@ -283,13 +205,6 @@ fn type_alias_definition() -> impl NoirParser { }) } -fn lambda_return_type() -> impl NoirParser { - just(Token::Arrow) - .ignore_then(parse_type()) - .or_not() - .map(|ret| ret.unwrap_or_else(UnresolvedType::unspecified)) -} - fn function_return_type() -> impl NoirParser<((Distinctness, Visibility), FunctionReturnType)> { just(Token::Arrow) .ignore_then(optional_distinctness()) @@ -305,69 +220,6 @@ fn function_return_type() -> impl NoirParser<((Distinctness, Visibility), Functi }) } -fn attribute() -> impl NoirParser { - token_kind(TokenKind::Attribute).map(|token| match token { - Token::Attribute(attribute) => attribute, - _ => unreachable!("Parser should have already errored due to token not being an attribute"), - }) -} - -fn attributes() -> impl NoirParser> { - attribute().repeated() -} - -fn struct_fields() -> impl NoirParser> { - ident() - .then_ignore(just(Token::Colon)) - .then(parse_type()) - .separated_by(just(Token::Comma)) - .allow_trailing() -} - -fn lambda_parameters() -> impl NoirParser> { - let typ = parse_type().recover_via(parameter_recovery()); - let typ = just(Token::Colon).ignore_then(typ); - - let parameter = pattern() - .recover_via(parameter_name_recovery()) - .then(typ.or_not().map(|typ| typ.unwrap_or_else(UnresolvedType::unspecified))); - - parameter - .separated_by(just(Token::Comma)) - .allow_trailing() - .labelled(ParsingRuleLabel::Parameter) -} - -fn function_parameters<'a>(allow_self: bool) -> impl NoirParser> + 'a { - let typ = parse_type().recover_via(parameter_recovery()); - - let full_parameter = pattern() - .recover_via(parameter_name_recovery()) - .then_ignore(just(Token::Colon)) - .then(optional_visibility()) - .then(typ) - .map_with_span(|((pattern, visibility), typ), span| Param { - visibility, - pattern, - typ, - span, - }); - - let self_parameter = if allow_self { self_parameter().boxed() } else { nothing().boxed() }; - - let parameter = full_parameter.or(self_parameter); - - parameter - .separated_by(just(Token::Comma)) - .allow_trailing() - .labelled(ParsingRuleLabel::Parameter) -} - -/// This parser always parses no input and fails -fn nothing() -> impl NoirParser { - one_of([]).map(|_| unreachable!("parser should always error")) -} - fn self_parameter() -> impl NoirParser { let mut_ref_pattern = just(Token::Ampersand).then_ignore(keyword(Keyword::Mut)); let mut_pattern = keyword(Keyword::Mut); @@ -380,132 +232,27 @@ fn self_parameter() -> impl NoirParser { Token::Ident(ref word) if word == "self" => Ok(span), _ => Err(ParserError::expected_label(ParsingRuleLabel::Parameter, found, span)), })) - .map(|(pattern_keyword, span)| { - let ident = Ident::new("self".to_string(), span); - let path = Path::from_single("Self".to_owned(), span); - let mut self_type = UnresolvedTypeData::Named(path, vec![]).with_span(span); + .map(|(pattern_keyword, ident_span)| { + let ident = Ident::new("self".to_string(), ident_span); + let path = Path::from_single("Self".to_owned(), ident_span); + let mut self_type = UnresolvedTypeData::Named(path, vec![], true).with_span(ident_span); let mut pattern = Pattern::Identifier(ident); match pattern_keyword { Some((Token::Ampersand, _)) => { - self_type = - UnresolvedTypeData::MutableReference(Box::new(self_type)).with_span(span); + self_type = UnresolvedTypeData::MutableReference(Box::new(self_type)) + .with_span(ident_span); } Some((Token::Keyword(_), span)) => { - pattern = Pattern::Mutable(Box::new(pattern), span); + pattern = Pattern::Mutable(Box::new(pattern), span.merge(ident_span), true); } _ => (), } - Param { pattern, typ: self_type, visibility: Visibility::Private, span } - }) -} - -fn trait_definition() -> impl NoirParser { - keyword(Keyword::Trait) - .ignore_then(ident()) - .then(generics()) - .then(where_clause()) - .then_ignore(just(Token::LeftBrace)) - .then(trait_body()) - .then_ignore(just(Token::RightBrace)) - .map_with_span(|(((name, generics), where_clause), items), span| { - TopLevelStatement::Trait(NoirTrait { name, generics, where_clause, span, items }) - }) -} - -fn trait_body() -> impl NoirParser> { - trait_function_declaration() - .or(trait_type_declaration()) - .or(trait_constant_declaration()) - .repeated() -} - -fn optional_default_value() -> impl NoirParser> { - ignore_then_commit(just(Token::Assign), expression()).or_not() -} - -fn trait_constant_declaration() -> impl NoirParser { - keyword(Keyword::Let) - .ignore_then(ident()) - .then_ignore(just(Token::Colon)) - .then(parse_type()) - .then(optional_default_value()) - .then_ignore(just(Token::Semicolon)) - .validate(|((name, typ), default_value), span, emit| { - emit(ParserError::with_reason( - ParserErrorReason::ExperimentalFeature("Associated constants"), - span, - )); - TraitItem::Constant { name, typ, default_value } - }) -} - -/// trait_function_declaration: 'fn' ident generics '(' declaration_parameters ')' function_return_type -fn trait_function_declaration() -> impl NoirParser { - let trait_function_body_or_semicolon = - block(fresh_statement()).map(Option::from).or(just(Token::Semicolon).to(Option::None)); - - keyword(Keyword::Fn) - .ignore_then(ident()) - .then(generics()) - .then(parenthesized(function_declaration_parameters())) - .then(function_return_type().map(|(_, typ)| typ)) - .then(where_clause()) - .then(trait_function_body_or_semicolon) - .map(|(((((name, generics), parameters), return_type), where_clause), body)| { - TraitItem::Function { name, generics, parameters, return_type, where_clause, body } + Param { span: pattern.span(), pattern, typ: self_type, visibility: Visibility::Private } }) } -fn validate_attributes( - attributes: Vec, - span: Span, - emit: &mut dyn FnMut(ParserError), -) -> Attributes { - let mut primary = None; - let mut secondary = Vec::new(); - - for attribute in attributes { - match attribute { - Attribute::Function(attr) => { - if primary.is_some() { - emit(ParserError::with_reason( - ParserErrorReason::MultipleFunctionAttributesFound, - span, - )); - } - primary = Some(attr); - } - Attribute::Secondary(attr) => secondary.push(attr), - } - } - - Attributes { function: primary, secondary } -} - -fn validate_struct_attributes( - attributes: Vec, - span: Span, - emit: &mut dyn FnMut(ParserError), -) -> Vec { - let mut struct_attributes = vec![]; - - for attribute in attributes { - match attribute { - Attribute::Function(..) => { - emit(ParserError::with_reason( - ParserErrorReason::NoFunctionAttributesAllowedOnStruct, - span, - )); - } - Attribute::Secondary(attr) => struct_attributes.push(attr), - } - } - - struct_attributes -} - /// Function declaration parameters differ from other parameters in that parameter /// patterns are not allowed in declarations. All parameters must be identifiers. fn function_declaration_parameters() -> impl NoirParser> { @@ -536,89 +283,6 @@ fn function_declaration_parameters() -> impl NoirParser impl NoirParser { - keyword(Keyword::Type).ignore_then(ident()).then_ignore(just(Token::Semicolon)).validate( - |name, span, emit| { - emit(ParserError::with_reason( - ParserErrorReason::ExperimentalFeature("Associated types"), - span, - )); - TraitItem::Type { name } - }, - ) -} - -/// Parses a non-trait implementation, adding a set of methods to a type. -/// -/// implementation: 'impl' generics type '{' function_definition ... '}' -fn implementation() -> impl NoirParser { - keyword(Keyword::Impl) - .ignore_then(generics()) - .then(parse_type().map_with_span(|typ, span| (typ, span))) - .then_ignore(just(Token::LeftBrace)) - .then(function_definition(true).repeated()) - .then_ignore(just(Token::RightBrace)) - .map(|((generics, (object_type, type_span)), methods)| { - TopLevelStatement::Impl(TypeImpl { generics, object_type, type_span, methods }) - }) -} - -/// Parses a trait implementation, implementing a particular trait for a type. -/// This has a similar syntax to `implementation`, but the `for type` clause is required, -/// and an optional `where` clause is also useable. -/// -/// trait_implementation: 'impl' generics ident generic_args for type '{' trait_implementation_body '}' -fn trait_implementation() -> impl NoirParser { - keyword(Keyword::Impl) - .ignore_then(generics()) - .then(path()) - .then(generic_type_args(parse_type())) - .then_ignore(keyword(Keyword::For)) - .then(parse_type()) - .then(where_clause()) - .then_ignore(just(Token::LeftBrace)) - .then(trait_implementation_body()) - .then_ignore(just(Token::RightBrace)) - .map(|args| { - let ((other_args, where_clause), items) = args; - let (((impl_generics, trait_name), trait_generics), object_type) = other_args; - - TopLevelStatement::TraitImpl(NoirTraitImpl { - impl_generics, - trait_name, - trait_generics, - object_type, - items, - where_clause, - }) - }) -} - -fn trait_implementation_body() -> impl NoirParser> { - let function = function_definition(true).validate(|mut f, span, emit| { - if f.def().is_internal - || f.def().is_unconstrained - || f.def().is_open - || f.def().visibility != FunctionVisibility::Private - { - emit(ParserError::with_reason(ParserErrorReason::TraitImplFunctionModifiers, span)); - } - // Trait impl functions are always public - f.def_mut().visibility = FunctionVisibility::Public; - TraitImplItem::Function(f) - }); - - let alias = keyword(Keyword::Type) - .ignore_then(ident()) - .then_ignore(just(Token::Assign)) - .then(parse_type()) - .then_ignore(just(Token::Semicolon)) - .map(|(name, alias)| TraitImplItem::Type { name, alias }); - - function.or(alias).repeated() -} - fn where_clause() -> impl NoirParser> { struct MultiTraitConstraint { typ: UnresolvedType, @@ -713,45 +377,6 @@ fn use_statement() -> impl NoirParser { keyword(Keyword::Use).ignore_then(use_tree()).map(TopLevelStatement::Import) } -fn keyword(keyword: Keyword) -> impl NoirParser { - just(Token::Keyword(keyword)) -} - -fn token_kind(token_kind: TokenKind) -> impl NoirParser { - filter_map(move |span, found: Token| { - if found.kind() == token_kind { - Ok(found) - } else { - Err(ParserError::expected_label( - ParsingRuleLabel::TokenKind(token_kind.clone()), - found, - span, - )) - } - }) -} - -fn path() -> impl NoirParser { - let idents = || ident().separated_by(just(Token::DoubleColon)).at_least(1); - let make_path = |kind| move |segments, span| Path { segments, kind, span }; - - let prefix = |key| keyword(key).ignore_then(just(Token::DoubleColon)); - let path_kind = |key, kind| prefix(key).ignore_then(idents()).map_with_span(make_path(kind)); - - choice(( - path_kind(Keyword::Crate, PathKind::Crate), - path_kind(Keyword::Dep, PathKind::Dep), - idents().map_with_span(make_path(PathKind::Plain)), - )) -} - -fn empty_path() -> impl NoirParser { - let make_path = |kind| move |_, span| Path { segments: Vec::new(), kind, span }; - let path_kind = |key, kind| keyword(key).map_with_span(make_path(kind)); - - choice((path_kind(Keyword::Crate, PathKind::Crate), path_kind(Keyword::Dep, PathKind::Dep))) -} - fn rename() -> impl NoirParser> { ignore_then_commit(keyword(Keyword::As), ident()).or_not() } @@ -764,7 +389,7 @@ fn use_tree() -> impl NoirParser { }); let list = { - let prefix = path().or(empty_path()).then_ignore(just(Token::DoubleColon)); + let prefix = maybe_empty_path().then_ignore(just(Token::DoubleColon)); let tree = use_tree .separated_by(just(Token::Comma)) .allow_trailing() @@ -778,10 +403,6 @@ fn use_tree() -> impl NoirParser { }) } -fn ident() -> impl NoirParser { - token_kind(TokenKind::Ident).map_with_span(Ident::from_token) -} - fn statement<'a, P, P2>( expr_parser: P, expr_no_constructors: P2, @@ -792,9 +413,9 @@ where { recursive(|statement| { choice(( - constrain(expr_parser.clone()), - assertion(expr_parser.clone()), - assertion_eq(expr_parser.clone()), + assertion::constrain(expr_parser.clone()), + assertion::assertion(expr_parser.clone()), + assertion::assertion_eq(expr_parser.clone()), declaration(expr_parser.clone()), assignment(expr_parser.clone()), for_loop(expr_no_constructors, statement), @@ -808,85 +429,6 @@ fn fresh_statement() -> impl NoirParser { statement(expression(), expression_no_constructors(expression())) } -fn constrain<'a, P>(expr_parser: P) -> impl NoirParser + 'a -where - P: ExprParser + 'a, -{ - ignore_then_commit( - keyword(Keyword::Constrain).labelled(ParsingRuleLabel::Statement), - expr_parser, - ) - .map(|expr| StatementKind::Constrain(ConstrainStatement(expr, None, ConstrainKind::Constrain))) - .validate(|expr, span, emit| { - emit(ParserError::with_reason(ParserErrorReason::ConstrainDeprecated, span)); - expr - }) -} - -fn assertion<'a, P>(expr_parser: P) -> impl NoirParser + 'a -where - P: ExprParser + 'a, -{ - let argument_parser = - expr_parser.separated_by(just(Token::Comma)).allow_trailing().at_least(1).at_most(2); - - ignore_then_commit(keyword(Keyword::Assert), parenthesized(argument_parser)) - .labelled(ParsingRuleLabel::Statement) - .validate(|expressions, span, emit| { - let condition = expressions.get(0).unwrap_or(&Expression::error(span)).clone(); - let mut message_str = None; - - if let Some(message) = expressions.get(1) { - if let ExpressionKind::Literal(Literal::Str(message)) = &message.kind { - message_str = Some(message.clone()); - } else { - emit(ParserError::with_reason(ParserErrorReason::AssertMessageNotString, span)); - } - } - - StatementKind::Constrain(ConstrainStatement( - condition, - message_str, - ConstrainKind::Assert, - )) - }) -} - -fn assertion_eq<'a, P>(expr_parser: P) -> impl NoirParser + 'a -where - P: ExprParser + 'a, -{ - let argument_parser = - expr_parser.separated_by(just(Token::Comma)).allow_trailing().at_least(2).at_most(3); - - ignore_then_commit(keyword(Keyword::AssertEq), parenthesized(argument_parser)) - .labelled(ParsingRuleLabel::Statement) - .validate(|exprs: Vec, span, emit| { - let predicate = Expression::new( - ExpressionKind::Infix(Box::new(InfixExpression { - lhs: exprs.get(0).unwrap_or(&Expression::error(span)).clone(), - rhs: exprs.get(1).unwrap_or(&Expression::error(span)).clone(), - operator: Spanned::from(span, BinaryOpKind::Equal), - })), - span, - ); - let mut message_str = None; - - if let Some(message) = exprs.get(2) { - if let ExpressionKind::Literal(Literal::Str(message)) = &message.kind { - message_str = Some(message.clone()); - } else { - emit(ParserError::with_reason(ParserErrorReason::AssertMessageNotString, span)); - } - } - StatementKind::Constrain(ConstrainStatement( - predicate, - message_str, - ConstrainKind::AssertEq, - )) - }) -} - fn declaration<'a, P>(expr_parser: P) -> impl NoirParser + 'a where P: ExprParser + 'a, @@ -914,7 +456,7 @@ fn pattern() -> impl NoirParser { let mut_pattern = keyword(Keyword::Mut) .ignore_then(pattern.clone()) - .map_with_span(|inner, span| Pattern::Mutable(Box::new(inner), span)); + .map_with_span(|inner, span| Pattern::Mutable(Box::new(inner), span, false)); let short_field = ident().map(|name| (name.clone(), Pattern::Identifier(name))); let long_field = ident().then_ignore(just(Token::Colon)).then(pattern.clone()); @@ -1113,13 +655,20 @@ fn int_type() -> impl NoirParser { Err(ParserError::expected_label(ParsingRuleLabel::IntegerType, unexpected, span)) } })) - .map_with_span(|(_, token), span| UnresolvedTypeData::from_int_token(token).with_span(span)) + .validate(|(_, token), span, emit| { + UnresolvedTypeData::from_int_token(token) + .map(|data| data.with_span(span)) + .unwrap_or_else(|err| { + emit(ParserError::with_reason(ParserErrorReason::InvalidBitSize(err.0), span)); + UnresolvedType::error(span) + }) + }) } fn named_type(type_parser: impl NoirParser) -> impl NoirParser { - path() - .then(generic_type_args(type_parser)) - .map_with_span(|(path, args), span| UnresolvedTypeData::Named(path, args).with_span(span)) + path().then(generic_type_args(type_parser)).map_with_span(|(path, args), span| { + UnresolvedTypeData::Named(path, args, false).with_span(span) + }) } fn named_trait(type_parser: impl NoirParser) -> impl NoirParser { @@ -1322,13 +871,6 @@ fn create_infix_expression(lhs: Expression, (operator, rhs): (BinaryOp, Expressi Expression { span, kind: ExpressionKind::Infix(infix) } } -// Right-shift (>>) is issued as two separate > tokens by the lexer as this makes it easier -// to parse nested generic types. For normal expressions however, it means we have to manually -// parse two greater-than tokens as a single right-shift here. -fn right_shift_operator() -> impl NoirParser { - just(Token::Greater).then(just(Token::Greater)).to(Token::ShiftRight) -} - fn operator_with_precedence(precedence: Precedence) -> impl NoirParser> { right_shift_operator() .or(any()) // Parse any single token, we're validating it as an operator next @@ -1468,18 +1010,6 @@ where }) } -fn lambda<'a>( - expr_parser: impl NoirParser + 'a, -) -> impl NoirParser + 'a { - lambda_parameters() - .delimited_by(just(Token::Pipe), just(Token::Pipe)) - .then(lambda_return_type()) - .then(expr_parser) - .map(|((parameters, return_type), body)| { - ExpressionKind::Lambda(Box::new(Lambda { parameters, return_type, body })) - }) -} - fn for_loop<'a, P, S>(expr_no_constructors: P, statement: S) -> impl NoirParser + 'a where P: ExprParser + 'a, @@ -1544,41 +1074,6 @@ where expr_parser.separated_by(just(Token::Comma)).allow_trailing() } -fn not

(term_parser: P) -> impl NoirParser -where - P: ExprParser, -{ - just(Token::Bang).ignore_then(term_parser).map(|rhs| ExpressionKind::prefix(UnaryOp::Not, rhs)) -} - -fn negation

(term_parser: P) -> impl NoirParser -where - P: ExprParser, -{ - just(Token::Minus) - .ignore_then(term_parser) - .map(|rhs| ExpressionKind::prefix(UnaryOp::Minus, rhs)) -} - -fn mutable_reference

(term_parser: P) -> impl NoirParser -where - P: ExprParser, -{ - just(Token::Ampersand) - .ignore_then(keyword(Keyword::Mut)) - .ignore_then(term_parser) - .map(|rhs| ExpressionKind::prefix(UnaryOp::MutableReference, rhs)) -} - -fn dereference

(term_parser: P) -> impl NoirParser -where - P: ExprParser, -{ - just(Token::Star) - .ignore_then(term_parser) - .map(|rhs| ExpressionKind::prefix(UnaryOp::Dereference { implicitly_added: false }, rhs)) -} - /// Atoms are parameterized on whether constructor expressions are allowed or not. /// Certain constructs like `if` and `for` disallow constructor expressions when a /// block may be expected. @@ -1601,7 +1096,7 @@ where } else { nothing().boxed() }, - lambda(expr_parser.clone()), + lambdas::lambda(expr_parser.clone()), block(statement).map(ExpressionKind::Block), variable(), literal(), @@ -1669,185 +1164,12 @@ where long_form.or(short_form) } -fn variable() -> impl NoirParser { - path().map(ExpressionKind::Variable) -} - -fn literal() -> impl NoirParser { - token_kind(TokenKind::Literal).map(|token| match token { - Token::Int(x) => ExpressionKind::integer(x), - Token::Bool(b) => ExpressionKind::boolean(b), - Token::Str(s) => ExpressionKind::string(s), - Token::RawStr(s, hashes) => ExpressionKind::raw_string(s, hashes), - Token::FmtStr(s) => ExpressionKind::format_string(s), - unexpected => unreachable!("Non-literal {} parsed as a literal", unexpected), - }) -} - -fn literal_with_sign() -> impl NoirParser { - choice(( - literal(), - just(Token::Minus).then(literal()).map(|(_, exp)| match exp { - ExpressionKind::Literal(Literal::Integer(value, sign)) => { - ExpressionKind::Literal(Literal::Integer(value, !sign)) - } - _ => unreachable!(), - }), - )) -} - -fn literal_or_collection<'a>( - expr_parser: impl ExprParser + 'a, -) -> impl NoirParser + 'a { - choice((literal_with_sign(), constructor(expr_parser.clone()), array_expr(expr_parser))) -} - #[cfg(test)] mod test { - use noirc_errors::CustomDiagnostic; - + use super::test_helpers::*; use super::*; use crate::{ArrayLiteral, Literal}; - fn parse_with(parser: P, program: &str) -> Result> - where - P: NoirParser, - { - let (tokens, lexer_errors) = Lexer::lex(program); - if !lexer_errors.is_empty() { - return Err(vecmap(lexer_errors, Into::into)); - } - parser - .then_ignore(just(Token::EOF)) - .parse(tokens) - .map_err(|errors| vecmap(errors, Into::into)) - } - - fn parse_recover(parser: P, program: &str) -> (Option, Vec) - where - P: NoirParser, - { - let (tokens, lexer_errors) = Lexer::lex(program); - let (opt, errs) = parser.then_ignore(force(just(Token::EOF))).parse_recovery(tokens); - - let mut errors = vecmap(lexer_errors, Into::into); - errors.extend(errs.into_iter().map(Into::into)); - - (opt, errors) - } - - fn parse_all(parser: P, programs: Vec<&str>) -> Vec - where - P: NoirParser, - { - vecmap(programs, move |program| { - let message = format!("Failed to parse:\n{program}"); - let (op_t, diagnostics) = parse_recover(&parser, program); - diagnostics.iter().for_each(|diagnostic| { - if diagnostic.is_error() { - panic!("{} with error {}", &message, diagnostic); - } - }); - op_t.expect(&message) - }) - } - - fn parse_all_failing(parser: P, programs: Vec<&str>) -> Vec - where - P: NoirParser, - T: std::fmt::Display, - { - programs - .into_iter() - .flat_map(|program| match parse_with(&parser, program) { - Ok(expr) => { - unreachable!( - "Expected this input to fail:\n{}\nYet it successfully parsed as:\n{}", - program, expr - ) - } - Err(diagnostics) => { - if diagnostics.iter().all(|diagnostic: &CustomDiagnostic| diagnostic.is_warning()) { - unreachable!( - "Expected at least one error when parsing:\n{}\nYet it successfully parsed without errors:\n", - program - ) - }; - diagnostics - } - }) - .collect() - } - - #[derive(Copy, Clone)] - struct Case { - source: &'static str, - errors: usize, - expect: &'static str, - } - - fn check_cases_with_errors(cases: &[Case], parser: P) - where - P: NoirParser + Clone, - T: std::fmt::Display, - { - let show_errors = |v| vecmap(&v, ToString::to_string).join("\n"); - - let results = vecmap(cases, |&case| { - let (opt, errors) = parse_recover(parser.clone(), case.source); - let actual = opt.map(|ast| ast.to_string()); - let actual = if let Some(s) = &actual { s.to_string() } else { "(none)".to_string() }; - - let result = ((errors.len(), actual.clone()), (case.errors, case.expect.to_string())); - if result.0 != result.1 { - let num_errors = errors.len(); - let shown_errors = show_errors(errors); - eprintln!( - concat!( - "\nExpected {expected_errors} error(s) and got {num_errors}:", - "\n\n{shown_errors}", - "\n\nFrom input: {src}", - "\nExpected AST: {expected_result}", - "\nActual AST: {actual}\n", - ), - expected_errors = case.errors, - num_errors = num_errors, - shown_errors = shown_errors, - src = case.source, - expected_result = case.expect, - actual = actual, - ); - } - result - }); - - assert_eq!(vecmap(&results, |t| t.0.clone()), vecmap(&results, |t| t.1.clone()),); - } - - #[test] - fn regression_skip_comment() { - parse_all( - function_definition(false), - vec![ - "fn main( - // This comment should be skipped - x : Field, - // And this one - y : Field, - ) { - }", - "fn main(x : Field, y : Field,) { - foo::bar( - // Comment for x argument - x, - // Comment for y argument - y - ) - }", - ], - ); - } - #[test] fn parse_infix() { let valid = vec!["x + 6", "x - k", "x + (x + a)", " x * (x + a) + (x - 4)"]; @@ -1998,130 +1320,6 @@ mod test { } } - /// Deprecated constrain usage test - #[test] - fn parse_constrain() { - let errors = parse_with(constrain(expression()), "constrain x == y").unwrap_err(); - assert_eq!(errors.len(), 1); - assert!(format!("{}", errors.first().unwrap()).contains("deprecated")); - - // Currently we disallow constrain statements where the outer infix operator - // produces a value. This would require an implicit `==` which - // may not be intuitive to the user. - // - // If this is deemed useful, one would either apply a transformation - // or interpret it with an `==` in the evaluator - let disallowed_operators = vec![ - BinaryOpKind::And, - BinaryOpKind::Subtract, - BinaryOpKind::Divide, - BinaryOpKind::Multiply, - BinaryOpKind::Or, - ]; - - for operator in disallowed_operators { - let src = format!("constrain x {} y;", operator.as_string()); - let errors = parse_with(constrain(expression()), &src).unwrap_err(); - assert_eq!(errors.len(), 2); - assert!(format!("{}", errors.first().unwrap()).contains("deprecated")); - } - - // These are general cases which should always work. - // - // The first case is the most noteworthy. It contains two `==` - // The first (inner) `==` is a predicate which returns 0/1 - // The outer layer is an infix `==` which is - // associated with the Constrain statement - let errors = parse_all_failing( - constrain(expression()), - vec![ - "constrain ((x + y) == k) + z == y", - "constrain (x + !y) == y", - "constrain (x ^ y) == y", - "constrain (x ^ y) == (y + m)", - "constrain x + x ^ x == y | m", - ], - ); - assert_eq!(errors.len(), 5); - assert!(errors - .iter() - .all(|err| { err.is_error() && err.to_string().contains("deprecated") })); - } - - /// This is the standard way to declare an assert statement - #[test] - fn parse_assert() { - parse_with(assertion(expression()), "assert(x == y)").unwrap(); - - // Currently we disallow constrain statements where the outer infix operator - // produces a value. This would require an implicit `==` which - // may not be intuitive to the user. - // - // If this is deemed useful, one would either apply a transformation - // or interpret it with an `==` in the evaluator - let disallowed_operators = vec![ - BinaryOpKind::And, - BinaryOpKind::Subtract, - BinaryOpKind::Divide, - BinaryOpKind::Multiply, - BinaryOpKind::Or, - ]; - - for operator in disallowed_operators { - let src = format!("assert(x {} y);", operator.as_string()); - parse_with(assertion(expression()), &src).unwrap_err(); - } - - // These are general cases which should always work. - // - // The first case is the most noteworthy. It contains two `==` - // The first (inner) `==` is a predicate which returns 0/1 - // The outer layer is an infix `==` which is - // associated with the Constrain statement - parse_all( - assertion(expression()), - vec![ - "assert(((x + y) == k) + z == y)", - "assert((x + !y) == y)", - "assert((x ^ y) == y)", - "assert((x ^ y) == (y + m))", - "assert(x + x ^ x == y | m)", - ], - ); - - match parse_with(assertion(expression()), "assert(x == y, \"assertion message\")").unwrap() - { - StatementKind::Constrain(ConstrainStatement(_, message, _)) => { - assert_eq!(message, Some("assertion message".to_owned())); - } - _ => unreachable!(), - } - } - - /// This is the standard way to assert that two expressions are equivalent - #[test] - fn parse_assert_eq() { - parse_all( - assertion_eq(expression()), - vec![ - "assert_eq(x, y)", - "assert_eq(((x + y) == k) + z, y)", - "assert_eq(x + !y, y)", - "assert_eq(x ^ y, y)", - "assert_eq(x ^ y, y + m)", - "assert_eq(x + x ^ x, y | m)", - ], - ); - match parse_with(assertion_eq(expression()), "assert_eq(x, y, \"assertion message\")") - .unwrap() - { - StatementKind::Constrain(ConstrainStatement(_, message, _)) => { - assert_eq!(message, Some("assertion message".to_owned())); - } - _ => unreachable!(), - } - } - #[test] fn parse_let() { // Why is it valid to specify a let declaration as having type u8? @@ -2155,84 +1353,6 @@ mod test { ); } - #[test] - fn parse_function() { - parse_all( - function_definition(false), - vec![ - "fn func_name() {}", - "fn f(foo: pub u8, y : pub Field) -> u8 { x + a }", - "fn f(f: pub Field, y : Field, z : Field) -> u8 { x + a }", - "fn func_name(f: Field, y : pub Field, z : pub [u8;5],) {}", - "fn f(f: pub Field, y : Field, z : Field) -> u8 { x + a }", - "fn f(f: pub Field, y : T, z : Field) -> u8 { x + a }", - "fn func_name(x: [Field], y : [Field;2],y : pub [Field;2], z : pub [u8;5]) {}", - "fn main(x: pub u8, y: pub u8) -> distinct pub [u8; 2] { [x, y] }", - "fn f(f: pub Field, y : Field, z : comptime Field) -> u8 { x + a }", - "fn f(f: pub Field, y : T, z : comptime Field) -> u8 { x + a }", - "fn func_name(f: Field, y : T) where T: SomeTrait {}", - "fn func_name(f: Field, y : T) where T: SomeTrait + SomeTrait2 {}", - "fn func_name(f: Field, y : T) where T: SomeTrait, T: SomeTrait2 {}", - "fn func_name(f: Field, y : T) where T: SomeTrait + SomeTrait2 {}", - "fn func_name(f: Field, y : T) where T: SomeTrait + SomeTrait2 {}", - "fn func_name(f: Field, y : T) where T: SomeTrait + SomeTrait2 {}", - "fn func_name(f: Field, y : T) where T: SomeTrait + SomeTrait2 {}", - "fn func_name(f: Field, y : T) where T: SomeTrait + SomeTrait2 + TraitY {}", - "fn func_name(f: Field, y : T, z : U) where SomeStruct: SomeTrait {}", - // 'where u32: SomeTrait' is allowed in Rust. - // It will result in compiler error in case SomeTrait isn't implemented for u32. - "fn func_name(f: Field, y : T) where u32: SomeTrait {}", - // A trailing plus is allowed by Rust, so we support it as well. - "fn func_name(f: Field, y : T) where T: SomeTrait + {}", - // The following should produce compile error on later stage. From the parser's perspective it's fine - "fn func_name(f: Field, y : Field, z : Field) where T: SomeTrait {}", - ], - ); - - parse_all_failing( - function_definition(false), - vec![ - "fn x2( f: []Field,,) {}", - "fn ( f: []Field) {}", - "fn ( f: []Field) {}", - // TODO: Check for more specific error messages - "fn func_name(f: Field, y : pub Field, z : pub [u8;5],) where T: {}", - "fn func_name(f: Field, y : pub Field, z : pub [u8;5],) where SomeTrait {}", - "fn func_name(f: Field, y : pub Field, z : pub [u8;5],) SomeTrait {}", - // A leading plus is not allowed. - "fn func_name(f: Field, y : T) where T: + SomeTrait {}", - "fn func_name(f: Field, y : T) where T: TraitX + {}", - ], - ); - } - - #[test] - fn parse_trait() { - parse_all( - trait_definition(), - vec![ - // Empty traits are legal in Rust and sometimes used as a way to whitelist certain types - // for a particular operation. Also known as `tag` or `marker` traits: - // https://stackoverflow.com/questions/71895489/what-is-the-purpose-of-defining-empty-impl-in-rust - "trait Empty {}", - "trait TraitWithDefaultBody { fn foo(self) {} }", - "trait TraitAcceptingMutableRef { fn foo(&mut self); }", - "trait TraitWithTypeBoundOperation { fn identity() -> Self; }", - "trait TraitWithAssociatedType { type Element; fn item(self, index: Field) -> Self::Element; }", - "trait TraitWithAssociatedConstant { let Size: Field; }", - "trait TraitWithAssociatedConstantWithDefaultValue { let Size: Field = 10; }", - "trait GenericTrait { fn elem(&mut self, index: Field) -> T; }", - "trait GenericTraitWithConstraints where T: SomeTrait { fn elem(self, index: Field) -> T; }", - "trait TraitWithMultipleGenericParams where A: SomeTrait, B: AnotherTrait { let Size: Field; fn zero() -> Self; }", - ], - ); - - parse_all_failing( - trait_definition(), - vec!["trait MissingBody", "trait WrongDelimiter { fn foo() -> u8, fn bar() -> u8 }"], - ); - } - #[test] fn parse_parenthesized_expression() { parse_all( @@ -2263,104 +1383,12 @@ mod test { ); } - fn expr_to_lit(expr: ExpressionKind) -> Literal { - match expr { - ExpressionKind::Literal(literal) => literal, - _ => unreachable!("expected a literal"), - } - } - - #[test] - fn parse_int() { - let int = parse_with(literal(), "5").unwrap(); - let hex = parse_with(literal(), "0x05").unwrap(); - - match (expr_to_lit(int), expr_to_lit(hex)) { - (Literal::Integer(int, false), Literal::Integer(hex, false)) => assert_eq!(int, hex), - _ => unreachable!(), - } - } - - #[test] - fn parse_string() { - let expr = parse_with(literal(), r#""hello""#).unwrap(); - match expr_to_lit(expr) { - Literal::Str(s) => assert_eq!(s, "hello"), - _ => unreachable!(), - }; - } - - #[test] - fn parse_bool() { - let expr_true = parse_with(literal(), "true").unwrap(); - let expr_false = parse_with(literal(), "false").unwrap(); - - match (expr_to_lit(expr_true), expr_to_lit(expr_false)) { - (Literal::Bool(t), Literal::Bool(f)) => { - assert!(t); - assert!(!f); - } - _ => unreachable!(), - }; - } - #[test] fn parse_module_declaration() { parse_with(module_declaration(), "mod foo").unwrap(); parse_with(module_declaration(), "mod 1").unwrap_err(); } - #[test] - fn parse_path() { - let cases = vec![ - ("std", vec!["std"]), - ("std::hash", vec!["std", "hash"]), - ("std::hash::collections", vec!["std", "hash", "collections"]), - ("dep::foo::bar", vec!["foo", "bar"]), - ("crate::std::hash", vec!["std", "hash"]), - ]; - - for (src, expected_segments) in cases { - let path: Path = parse_with(path(), src).unwrap(); - for (segment, expected) in path.segments.into_iter().zip(expected_segments) { - assert_eq!(segment.0.contents, expected); - } - } - - parse_all_failing(path(), vec!["std::", "::std", "std::hash::", "foo::1"]); - } - - #[test] - fn parse_path_kinds() { - let cases = vec![ - ("std", PathKind::Plain), - ("dep::hash::collections", PathKind::Dep), - ("crate::std::hash", PathKind::Crate), - ]; - - for (src, expected_path_kind) in cases { - let path = parse_with(path(), src).unwrap(); - assert_eq!(path.kind, expected_path_kind); - } - - parse_all_failing( - path(), - vec!["dep", "crate", "crate::std::crate", "foo::bar::crate", "foo::dep"], - ); - } - - #[test] - fn parse_unary() { - parse_all( - term(expression(), expression_no_constructors(expression()), fresh_statement(), true), - vec!["!hello", "-hello", "--hello", "-!hello", "!-hello"], - ); - parse_all_failing( - term(expression(), expression_no_constructors(expression()), fresh_statement(), true), - vec!["+hello", "/hello"], - ); - } - #[test] fn parse_use() { parse_all( @@ -2392,26 +1420,6 @@ mod test { ); } - #[test] - fn parse_structs() { - let cases = vec![ - "struct Foo;", - "struct Foo { }", - "struct Bar { ident: Field, }", - "struct Baz { ident: Field, other: Field }", - "#[attribute] struct Baz { ident: Field, other: Field }", - ]; - parse_all(struct_definition(), cases); - - let failing = vec![ - "struct { }", - "struct Foo { bar: pub Field }", - "struct Foo { bar: pub Field }", - "#[oracle(some)] struct Foo { bar: Field }", - ]; - parse_all_failing(struct_definition(), failing); - } - #[test] fn parse_type_aliases() { let cases = vec!["type foo = u8", "type bar = String", "type baz = Vec"]; @@ -2483,7 +1491,7 @@ mod test { Case { source: "assert(x == x, x)", expect: "constrain (plain::x == plain::x)", - errors: 1, + errors: 0, }, Case { source: "assert_eq(x,)", expect: "constrain (Error == Error)", errors: 1 }, Case { @@ -2494,7 +1502,7 @@ mod test { Case { source: "assert_eq(x, x, x)", expect: "constrain (plain::x == plain::x)", - errors: 1, + errors: 0, }, ]; @@ -2503,7 +1511,7 @@ mod test { #[test] fn return_validation() { - let cases = vec![ + let cases = [ Case { source: "{ return 42; }", expect: concat!("{\n", " Error\n", "}",), @@ -2532,7 +1540,7 @@ mod test { #[test] fn expr_no_constructors() { - let cases = vec![ + let cases = [ Case { source: "{ if structure { a: 1 } {} }", expect: concat!( @@ -2583,79 +1591,4 @@ mod test { check_cases_with_errors(&cases[..], block(fresh_statement())); } - - #[test] - fn parse_raw_string_expr() { - let cases = vec![ - Case { source: r##" r"foo" "##, expect: r##"r"foo""##, errors: 0 }, - Case { source: r##" r#"foo"# "##, expect: r##"r#"foo"#"##, errors: 0 }, - // backslash - Case { source: r##" r"\\" "##, expect: r##"r"\\""##, errors: 0 }, - Case { source: r##" r#"\"# "##, expect: r##"r#"\"#"##, errors: 0 }, - Case { source: r##" r#"\\"# "##, expect: r##"r#"\\"#"##, errors: 0 }, - Case { source: r##" r#"\\\"# "##, expect: r##"r#"\\\"#"##, errors: 0 }, - // escape sequence - Case { - source: r##" r#"\t\n\\t\\n\\\t\\\n\\\\"# "##, - expect: r##"r#"\t\n\\t\\n\\\t\\\n\\\\"#"##, - errors: 0, - }, - Case { source: r##" r#"\\\\\\\\"# "##, expect: r##"r#"\\\\\\\\"#"##, errors: 0 }, - // mismatch - errors: - Case { source: r###" r#"foo"## "###, expect: r###"r#"foo"#"###, errors: 1 }, - Case { source: r###" r##"foo"# "###, expect: "(none)", errors: 2 }, - // mismatch: short: - Case { source: r###" r"foo"# "###, expect: r###"r"foo""###, errors: 1 }, - Case { source: r###" r#"foo" "###, expect: "(none)", errors: 2 }, - // empty string - Case { source: r####"r"""####, expect: r####"r"""####, errors: 0 }, - Case { source: r####"r###""###"####, expect: r####"r###""###"####, errors: 0 }, - // miscellaneous - Case { source: r###" r#\"foo\"# "###, expect: "plain::r", errors: 2 }, - Case { source: r###" r\"foo\" "###, expect: "plain::r", errors: 1 }, - Case { source: r###" r##"foo"# "###, expect: "(none)", errors: 2 }, - // missing 'r' letter - Case { source: r###" ##"foo"# "###, expect: r#""foo""#, errors: 2 }, - Case { source: r###" #"foo" "###, expect: "plain::foo", errors: 2 }, - // whitespace - Case { source: r###" r #"foo"# "###, expect: "plain::r", errors: 2 }, - Case { source: r###" r# "foo"# "###, expect: "plain::r", errors: 3 }, - Case { source: r###" r#"foo" # "###, expect: "(none)", errors: 2 }, - // after identifier - Case { source: r###" bar#"foo"# "###, expect: "plain::bar", errors: 2 }, - // nested - Case { - source: r###"r##"foo r#"bar"# r"baz" ### bye"##"###, - expect: r###"r##"foo r#"bar"# r"baz" ### bye"##"###, - errors: 0, - }, - ]; - - check_cases_with_errors(&cases[..], expression()); - } - - #[test] - fn parse_raw_string_lit() { - let lit_cases = vec![ - Case { source: r##" r"foo" "##, expect: r##"r"foo""##, errors: 0 }, - Case { source: r##" r#"foo"# "##, expect: r##"r#"foo"#"##, errors: 0 }, - // backslash - Case { source: r##" r"\\" "##, expect: r##"r"\\""##, errors: 0 }, - Case { source: r##" r#"\"# "##, expect: r##"r#"\"#"##, errors: 0 }, - Case { source: r##" r#"\\"# "##, expect: r##"r#"\\"#"##, errors: 0 }, - Case { source: r##" r#"\\\"# "##, expect: r##"r#"\\\"#"##, errors: 0 }, - // escape sequence - Case { - source: r##" r#"\t\n\\t\\n\\\t\\\n\\\\"# "##, - expect: r##"r#"\t\n\\t\\n\\\t\\\n\\\\"#"##, - errors: 0, - }, - Case { source: r##" r#"\\\\\\\\"# "##, expect: r##"r#"\\\\\\\\"#"##, errors: 0 }, - // mismatch - errors: - Case { source: r###" r#"foo"## "###, expect: r###"r#"foo"#"###, errors: 1 }, - Case { source: r###" r##"foo"# "###, expect: "(none)", errors: 2 }, - ]; - - check_cases_with_errors(&lit_cases[..], literal()); - } } diff --git a/compiler/noirc_frontend/src/parser/parser/assertion.rs b/compiler/noirc_frontend/src/parser/parser/assertion.rs new file mode 100644 index 00000000000..f9c8d7aa46b --- /dev/null +++ b/compiler/noirc_frontend/src/parser/parser/assertion.rs @@ -0,0 +1,219 @@ +use crate::ast::{Expression, ExpressionKind, StatementKind}; +use crate::parser::{ + ignore_then_commit, labels::ParsingRuleLabel, parenthesized, ExprParser, NoirParser, + ParserError, ParserErrorReason, +}; + +use crate::token::{Keyword, Token}; +use crate::{BinaryOpKind, ConstrainKind, ConstrainStatement, InfixExpression, Recoverable}; + +use chumsky::prelude::*; +use noirc_errors::Spanned; + +use super::keyword; + +pub(super) fn constrain<'a, P>(expr_parser: P) -> impl NoirParser + 'a +where + P: ExprParser + 'a, +{ + ignore_then_commit( + keyword(Keyword::Constrain).labelled(ParsingRuleLabel::Statement), + expr_parser, + ) + .map(|expr| StatementKind::Constrain(ConstrainStatement(expr, None, ConstrainKind::Constrain))) + .validate(|expr, span, emit| { + emit(ParserError::with_reason(ParserErrorReason::ConstrainDeprecated, span)); + expr + }) +} + +pub(super) fn assertion<'a, P>(expr_parser: P) -> impl NoirParser + 'a +where + P: ExprParser + 'a, +{ + let argument_parser = + expr_parser.separated_by(just(Token::Comma)).allow_trailing().at_least(1).at_most(2); + + ignore_then_commit(keyword(Keyword::Assert), parenthesized(argument_parser)) + .labelled(ParsingRuleLabel::Statement) + .validate(|expressions, span, _| { + let condition = expressions.first().unwrap_or(&Expression::error(span)).clone(); + let message = expressions.get(1).cloned(); + StatementKind::Constrain(ConstrainStatement(condition, message, ConstrainKind::Assert)) + }) +} + +pub(super) fn assertion_eq<'a, P>(expr_parser: P) -> impl NoirParser + 'a +where + P: ExprParser + 'a, +{ + let argument_parser = + expr_parser.separated_by(just(Token::Comma)).allow_trailing().at_least(2).at_most(3); + + ignore_then_commit(keyword(Keyword::AssertEq), parenthesized(argument_parser)) + .labelled(ParsingRuleLabel::Statement) + .validate(|exprs: Vec, span, _| { + let predicate = Expression::new( + ExpressionKind::Infix(Box::new(InfixExpression { + lhs: exprs.first().unwrap_or(&Expression::error(span)).clone(), + rhs: exprs.get(1).unwrap_or(&Expression::error(span)).clone(), + operator: Spanned::from(span, BinaryOpKind::Equal), + })), + span, + ); + let message = exprs.get(2).cloned(); + StatementKind::Constrain(ConstrainStatement( + predicate, + message, + ConstrainKind::AssertEq, + )) + }) +} + +#[cfg(test)] +mod test { + use super::*; + use crate::{ + parser::parser::{ + expression, + test_helpers::{parse_all, parse_all_failing, parse_with}, + }, + Literal, + }; + + /// Deprecated constrain usage test + #[test] + fn parse_constrain() { + let errors = parse_with(constrain(expression()), "constrain x == y").unwrap_err(); + assert_eq!(errors.len(), 1); + assert!(format!("{}", errors.first().unwrap()).contains("deprecated")); + + // Currently we disallow constrain statements where the outer infix operator + // produces a value. This would require an implicit `==` which + // may not be intuitive to the user. + // + // If this is deemed useful, one would either apply a transformation + // or interpret it with an `==` in the evaluator + let disallowed_operators = vec![ + BinaryOpKind::And, + BinaryOpKind::Subtract, + BinaryOpKind::Divide, + BinaryOpKind::Multiply, + BinaryOpKind::Or, + ]; + + for operator in disallowed_operators { + let src = format!("constrain x {} y;", operator.as_string()); + let errors = parse_with(constrain(expression()), &src).unwrap_err(); + assert_eq!(errors.len(), 2); + assert!(format!("{}", errors.first().unwrap()).contains("deprecated")); + } + + // These are general cases which should always work. + // + // The first case is the most noteworthy. It contains two `==` + // The first (inner) `==` is a predicate which returns 0/1 + // The outer layer is an infix `==` which is + // associated with the Constrain statement + let errors = parse_all_failing( + constrain(expression()), + vec![ + "constrain ((x + y) == k) + z == y", + "constrain (x + !y) == y", + "constrain (x ^ y) == y", + "constrain (x ^ y) == (y + m)", + "constrain x + x ^ x == y | m", + ], + ); + assert_eq!(errors.len(), 5); + assert!(errors + .iter() + .all(|err| { err.is_error() && err.to_string().contains("deprecated") })); + } + + /// This is the standard way to declare an assert statement + #[test] + fn parse_assert() { + parse_with(assertion(expression()), "assert(x == y)").unwrap(); + + // Currently we disallow constrain statements where the outer infix operator + // produces a value. This would require an implicit `==` which + // may not be intuitive to the user. + // + // If this is deemed useful, one would either apply a transformation + // or interpret it with an `==` in the evaluator + let disallowed_operators = vec![ + BinaryOpKind::And, + BinaryOpKind::Subtract, + BinaryOpKind::Divide, + BinaryOpKind::Multiply, + BinaryOpKind::Or, + ]; + + for operator in disallowed_operators { + let src = format!("assert(x {} y);", operator.as_string()); + parse_with(assertion(expression()), &src).unwrap_err(); + } + + // These are general cases which should always work. + // + // The first case is the most noteworthy. It contains two `==` + // The first (inner) `==` is a predicate which returns 0/1 + // The outer layer is an infix `==` which is + // associated with the Constrain statement + parse_all( + assertion(expression()), + vec![ + "assert(((x + y) == k) + z == y)", + "assert((x + !y) == y)", + "assert((x ^ y) == y)", + "assert((x ^ y) == (y + m))", + "assert(x + x ^ x == y | m)", + ], + ); + + match parse_with(assertion(expression()), "assert(x == y, \"assertion message\")").unwrap() + { + StatementKind::Constrain(ConstrainStatement(_, message, _)) => { + let message = message.unwrap(); + match message.kind { + ExpressionKind::Literal(Literal::Str(message_string)) => { + assert_eq!(message_string, "assertion message".to_owned()); + } + _ => unreachable!(), + } + } + _ => unreachable!(), + } + } + + /// This is the standard way to assert that two expressions are equivalent + #[test] + fn parse_assert_eq() { + parse_all( + assertion_eq(expression()), + vec![ + "assert_eq(x, y)", + "assert_eq(((x + y) == k) + z, y)", + "assert_eq(x + !y, y)", + "assert_eq(x ^ y, y)", + "assert_eq(x ^ y, y + m)", + "assert_eq(x + x ^ x, y | m)", + ], + ); + match parse_with(assertion_eq(expression()), "assert_eq(x, y, \"assertion message\")") + .unwrap() + { + StatementKind::Constrain(ConstrainStatement(_, message, _)) => { + let message = message.unwrap(); + match message.kind { + ExpressionKind::Literal(Literal::Str(message_string)) => { + assert_eq!(message_string, "assertion message".to_owned()); + } + _ => unreachable!(), + } + } + _ => unreachable!(), + } + } +} diff --git a/compiler/noirc_frontend/src/parser/parser/attributes.rs b/compiler/noirc_frontend/src/parser/parser/attributes.rs new file mode 100644 index 00000000000..4b256a95c8b --- /dev/null +++ b/compiler/noirc_frontend/src/parser/parser/attributes.rs @@ -0,0 +1,46 @@ +use chumsky::Parser; +use noirc_errors::Span; + +use crate::{ + parser::{NoirParser, ParserError, ParserErrorReason}, + token::{Attribute, Attributes, Token, TokenKind}, +}; + +use super::primitives::token_kind; + +fn attribute() -> impl NoirParser { + token_kind(TokenKind::Attribute).map(|token| match token { + Token::Attribute(attribute) => attribute, + _ => unreachable!("Parser should have already errored due to token not being an attribute"), + }) +} + +pub(super) fn attributes() -> impl NoirParser> { + attribute().repeated() +} + +pub(super) fn validate_attributes( + attributes: Vec, + span: Span, + emit: &mut dyn FnMut(ParserError), +) -> Attributes { + let mut primary = None; + let mut secondary = Vec::new(); + + for attribute in attributes { + match attribute { + Attribute::Function(attr) => { + if primary.is_some() { + emit(ParserError::with_reason( + ParserErrorReason::MultipleFunctionAttributesFound, + span, + )); + } + primary = Some(attr); + } + Attribute::Secondary(attr) => secondary.push(attr), + } + } + + Attributes { function: primary, secondary } +} diff --git a/compiler/noirc_frontend/src/parser/parser/function.rs b/compiler/noirc_frontend/src/parser/parser/function.rs new file mode 100644 index 00000000000..42ee484bfc9 --- /dev/null +++ b/compiler/noirc_frontend/src/parser/parser/function.rs @@ -0,0 +1,217 @@ +use super::{ + attributes::{attributes, validate_attributes}, + block, fresh_statement, ident, keyword, nothing, optional_distinctness, optional_visibility, + parameter_name_recovery, parameter_recovery, parenthesized, parse_type, pattern, + self_parameter, where_clause, NoirParser, +}; +use crate::parser::labels::ParsingRuleLabel; +use crate::parser::spanned; +use crate::token::{Keyword, Token}; +use crate::{ + Distinctness, FunctionDefinition, FunctionReturnType, FunctionVisibility, Ident, NoirFunction, + Param, Visibility, +}; + +use chumsky::prelude::*; + +/// function_definition: attribute function_modifiers 'fn' ident generics '(' function_parameters ')' function_return_type block +/// function_modifiers 'fn' ident generics '(' function_parameters ')' function_return_type block +pub(super) fn function_definition(allow_self: bool) -> impl NoirParser { + attributes() + .then(function_modifiers()) + .then_ignore(keyword(Keyword::Fn)) + .then(ident()) + .then(generics()) + .then(parenthesized(function_parameters(allow_self))) + .then(function_return_type()) + .then(where_clause()) + .then(spanned(block(fresh_statement()))) + .validate(|(((args, ret), where_clause), (body, body_span)), span, emit| { + let ((((attributes, modifiers), name), generics), parameters) = args; + + // Validate collected attributes, filtering them into function and secondary variants + let attributes = validate_attributes(attributes, span, emit); + FunctionDefinition { + span: body_span, + name, + attributes, + is_unconstrained: modifiers.0, + is_open: modifiers.2, + is_internal: modifiers.3, + visibility: modifiers.1, + generics, + parameters, + body, + where_clause, + return_type: ret.1, + return_visibility: ret.0 .1, + return_distinctness: ret.0 .0, + } + .into() + }) +} + +/// visibility_modifier: 'pub(crate)'? 'pub'? '' +fn visibility_modifier() -> impl NoirParser { + let is_pub_crate = (keyword(Keyword::Pub) + .then_ignore(just(Token::LeftParen)) + .then_ignore(keyword(Keyword::Crate)) + .then_ignore(just(Token::RightParen))) + .map(|_| FunctionVisibility::PublicCrate); + + let is_pub = keyword(Keyword::Pub).map(|_| FunctionVisibility::Public); + + let is_private = empty().map(|_| FunctionVisibility::Private); + + choice((is_pub_crate, is_pub, is_private)) +} + +/// function_modifiers: 'unconstrained'? (visibility)? 'open'? 'internal'? +/// +/// returns (is_unconstrained, visibility, is_open, is_internal) for whether each keyword was present +fn function_modifiers() -> impl NoirParser<(bool, FunctionVisibility, bool, bool)> { + keyword(Keyword::Unconstrained) + .or_not() + .then(visibility_modifier()) + .then(keyword(Keyword::Open).or_not()) + .then(keyword(Keyword::Internal).or_not()) + .map(|(((unconstrained, visibility), open), internal)| { + (unconstrained.is_some(), visibility, open.is_some(), internal.is_some()) + }) +} + +/// non_empty_ident_list: ident ',' non_empty_ident_list +/// | ident +/// +/// generics: '<' non_empty_ident_list '>' +/// | %empty +pub(super) fn generics() -> impl NoirParser> { + ident() + .separated_by(just(Token::Comma)) + .allow_trailing() + .at_least(1) + .delimited_by(just(Token::Less), just(Token::Greater)) + .or_not() + .map(|opt| opt.unwrap_or_default()) +} + +fn function_return_type() -> impl NoirParser<((Distinctness, Visibility), FunctionReturnType)> { + just(Token::Arrow) + .ignore_then(optional_distinctness()) + .then(optional_visibility()) + .then(spanned(parse_type())) + .or_not() + .map_with_span(|ret, span| match ret { + Some((head, (ty, _))) => (head, FunctionReturnType::Ty(ty)), + None => ( + (Distinctness::DuplicationAllowed, Visibility::Private), + FunctionReturnType::Default(span), + ), + }) +} + +fn function_parameters<'a>(allow_self: bool) -> impl NoirParser> + 'a { + let typ = parse_type().recover_via(parameter_recovery()); + + let full_parameter = pattern() + .recover_via(parameter_name_recovery()) + .then_ignore(just(Token::Colon)) + .then(optional_visibility()) + .then(typ) + .map_with_span(|((pattern, visibility), typ), span| Param { + visibility, + pattern, + typ, + span, + }); + + let self_parameter = if allow_self { self_parameter().boxed() } else { nothing().boxed() }; + + let parameter = full_parameter.or(self_parameter); + + parameter + .separated_by(just(Token::Comma)) + .allow_trailing() + .labelled(ParsingRuleLabel::Parameter) +} + +#[cfg(test)] +mod test { + use super::*; + use crate::parser::parser::test_helpers::*; + + #[test] + fn regression_skip_comment() { + parse_all( + function_definition(false), + vec![ + "fn main( + // This comment should be skipped + x : Field, + // And this one + y : Field, + ) { + }", + "fn main(x : Field, y : Field,) { + foo::bar( + // Comment for x argument + x, + // Comment for y argument + y + ) + }", + ], + ); + } + + #[test] + fn parse_function() { + parse_all( + function_definition(false), + vec![ + "fn func_name() {}", + "fn f(foo: pub u8, y : pub Field) -> u8 { x + a }", + "fn f(f: pub Field, y : Field, z : Field) -> u8 { x + a }", + "fn func_name(f: Field, y : pub Field, z : pub [u8;5],) {}", + "fn f(f: pub Field, y : Field, z : Field) -> u8 { x + a }", + "fn f(f: pub Field, y : T, z : Field) -> u8 { x + a }", + "fn func_name(x: [Field], y : [Field;2],y : pub [Field;2], z : pub [u8;5]) {}", + "fn main(x: pub u8, y: pub u8) -> distinct pub [u8; 2] { [x, y] }", + "fn f(f: pub Field, y : Field, z : comptime Field) -> u8 { x + a }", + "fn f(f: pub Field, y : T, z : comptime Field) -> u8 { x + a }", + "fn func_name(f: Field, y : T) where T: SomeTrait {}", + "fn func_name(f: Field, y : T) where T: SomeTrait + SomeTrait2 {}", + "fn func_name(f: Field, y : T) where T: SomeTrait, T: SomeTrait2 {}", + "fn func_name(f: Field, y : T) where T: SomeTrait + SomeTrait2 {}", + "fn func_name(f: Field, y : T) where T: SomeTrait + SomeTrait2 {}", + "fn func_name(f: Field, y : T) where T: SomeTrait + SomeTrait2 {}", + "fn func_name(f: Field, y : T) where T: SomeTrait + SomeTrait2 {}", + "fn func_name(f: Field, y : T) where T: SomeTrait + SomeTrait2 + TraitY {}", + "fn func_name(f: Field, y : T, z : U) where SomeStruct: SomeTrait {}", + // 'where u32: SomeTrait' is allowed in Rust. + // It will result in compiler error in case SomeTrait isn't implemented for u32. + "fn func_name(f: Field, y : T) where u32: SomeTrait {}", + // A trailing plus is allowed by Rust, so we support it as well. + "fn func_name(f: Field, y : T) where T: SomeTrait + {}", + // The following should produce compile error on later stage. From the parser's perspective it's fine + "fn func_name(f: Field, y : Field, z : Field) where T: SomeTrait {}", + ], + ); + + parse_all_failing( + function_definition(false), + vec![ + "fn x2( f: []Field,,) {}", + "fn ( f: []Field) {}", + "fn ( f: []Field) {}", + // TODO: Check for more specific error messages + "fn func_name(f: Field, y : pub Field, z : pub [u8;5],) where T: {}", + "fn func_name(f: Field, y : pub Field, z : pub [u8;5],) where SomeTrait {}", + "fn func_name(f: Field, y : pub Field, z : pub [u8;5],) SomeTrait {}", + // A leading plus is not allowed. + "fn func_name(f: Field, y : T) where T: + SomeTrait {}", + "fn func_name(f: Field, y : T) where T: TraitX + {}", + ], + ); + } +} diff --git a/compiler/noirc_frontend/src/parser/parser/lambdas.rs b/compiler/noirc_frontend/src/parser/parser/lambdas.rs new file mode 100644 index 00000000000..48ddd41ab44 --- /dev/null +++ b/compiler/noirc_frontend/src/parser/parser/lambdas.rs @@ -0,0 +1,42 @@ +use chumsky::{primitive::just, Parser}; + +use crate::{ + parser::{labels::ParsingRuleLabel, parameter_name_recovery, parameter_recovery, NoirParser}, + token::Token, + Expression, ExpressionKind, Lambda, Pattern, UnresolvedType, +}; + +use super::{parse_type, pattern}; + +pub(super) fn lambda<'a>( + expr_parser: impl NoirParser + 'a, +) -> impl NoirParser + 'a { + lambda_parameters() + .delimited_by(just(Token::Pipe), just(Token::Pipe)) + .then(lambda_return_type()) + .then(expr_parser) + .map(|((parameters, return_type), body)| { + ExpressionKind::Lambda(Box::new(Lambda { parameters, return_type, body })) + }) +} + +fn lambda_parameters() -> impl NoirParser> { + let typ = parse_type().recover_via(parameter_recovery()); + let typ = just(Token::Colon).ignore_then(typ); + + let parameter = pattern() + .recover_via(parameter_name_recovery()) + .then(typ.or_not().map(|typ| typ.unwrap_or_else(UnresolvedType::unspecified))); + + parameter + .separated_by(just(Token::Comma)) + .allow_trailing() + .labelled(ParsingRuleLabel::Parameter) +} + +fn lambda_return_type() -> impl NoirParser { + just(Token::Arrow) + .ignore_then(parse_type()) + .or_not() + .map(|ret| ret.unwrap_or_else(UnresolvedType::unspecified)) +} diff --git a/compiler/noirc_frontend/src/parser/parser/literals.rs b/compiler/noirc_frontend/src/parser/parser/literals.rs new file mode 100644 index 00000000000..32f4f03de2e --- /dev/null +++ b/compiler/noirc_frontend/src/parser/parser/literals.rs @@ -0,0 +1,157 @@ +use chumsky::Parser; + +use crate::{ + parser::NoirParser, + token::{Token, TokenKind}, + ExpressionKind, +}; + +use super::primitives::token_kind; + +pub(super) fn literal() -> impl NoirParser { + token_kind(TokenKind::Literal).map(|token| match token { + Token::Int(x) => ExpressionKind::integer(x), + Token::Bool(b) => ExpressionKind::boolean(b), + Token::Str(s) => ExpressionKind::string(s), + Token::RawStr(s, hashes) => ExpressionKind::raw_string(s, hashes), + Token::FmtStr(s) => ExpressionKind::format_string(s), + unexpected => unreachable!("Non-literal {} parsed as a literal", unexpected), + }) +} + +#[cfg(test)] +mod test { + use super::*; + use crate::parser::parser::{ + expression, expression_no_constructors, fresh_statement, term, test_helpers::*, + }; + use crate::Literal; + + fn expr_to_lit(expr: ExpressionKind) -> Literal { + match expr { + ExpressionKind::Literal(literal) => literal, + _ => unreachable!("expected a literal"), + } + } + + #[test] + fn parse_int() { + let int = parse_with(literal(), "5").unwrap(); + let hex = parse_with(literal(), "0x05").unwrap(); + + match (expr_to_lit(int), expr_to_lit(hex)) { + (Literal::Integer(int, false), Literal::Integer(hex, false)) => assert_eq!(int, hex), + _ => unreachable!(), + } + } + + #[test] + fn parse_string() { + let expr = parse_with(literal(), r#""hello""#).unwrap(); + match expr_to_lit(expr) { + Literal::Str(s) => assert_eq!(s, "hello"), + _ => unreachable!(), + }; + } + + #[test] + fn parse_bool() { + let expr_true = parse_with(literal(), "true").unwrap(); + let expr_false = parse_with(literal(), "false").unwrap(); + + match (expr_to_lit(expr_true), expr_to_lit(expr_false)) { + (Literal::Bool(t), Literal::Bool(f)) => { + assert!(t); + assert!(!f); + } + _ => unreachable!(), + }; + } + + #[test] + fn parse_unary() { + parse_all( + term(expression(), expression_no_constructors(expression()), fresh_statement(), true), + vec!["!hello", "-hello", "--hello", "-!hello", "!-hello"], + ); + parse_all_failing( + term(expression(), expression_no_constructors(expression()), fresh_statement(), true), + vec!["+hello", "/hello"], + ); + } + + #[test] + fn parse_raw_string_expr() { + let cases = vec![ + Case { source: r#" r"foo" "#, expect: r#"r"foo""#, errors: 0 }, + Case { source: r##" r#"foo"# "##, expect: r##"r#"foo"#"##, errors: 0 }, + // backslash + Case { source: r#" r"\\" "#, expect: r#"r"\\""#, errors: 0 }, + Case { source: r##" r#"\"# "##, expect: r##"r#"\"#"##, errors: 0 }, + Case { source: r##" r#"\\"# "##, expect: r##"r#"\\"#"##, errors: 0 }, + Case { source: r##" r#"\\\"# "##, expect: r##"r#"\\\"#"##, errors: 0 }, + // escape sequence + Case { + source: r##" r#"\t\n\\t\\n\\\t\\\n\\\\"# "##, + expect: r##"r#"\t\n\\t\\n\\\t\\\n\\\\"#"##, + errors: 0, + }, + Case { source: r##" r#"\\\\\\\\"# "##, expect: r##"r#"\\\\\\\\"#"##, errors: 0 }, + // mismatch - errors: + Case { source: r###" r#"foo"## "###, expect: r##"r#"foo"#"##, errors: 1 }, + Case { source: r##" r##"foo"# "##, expect: "(none)", errors: 2 }, + // mismatch: short: + Case { source: r##" r"foo"# "##, expect: r#"r"foo""#, errors: 1 }, + Case { source: r#" r#"foo" "#, expect: "(none)", errors: 2 }, + // empty string + Case { source: r#"r"""#, expect: r#"r"""#, errors: 0 }, + Case { source: r####"r###""###"####, expect: r####"r###""###"####, errors: 0 }, + // miscellaneous + Case { source: r##" r#\"foo\"# "##, expect: "plain::r", errors: 2 }, + Case { source: r#" r\"foo\" "#, expect: "plain::r", errors: 1 }, + Case { source: r##" r##"foo"# "##, expect: "(none)", errors: 2 }, + // missing 'r' letter + Case { source: r##" ##"foo"# "##, expect: r#""foo""#, errors: 2 }, + Case { source: r#" #"foo" "#, expect: "plain::foo", errors: 2 }, + // whitespace + Case { source: r##" r #"foo"# "##, expect: "plain::r", errors: 2 }, + Case { source: r##" r# "foo"# "##, expect: "plain::r", errors: 3 }, + Case { source: r#" r#"foo" # "#, expect: "(none)", errors: 2 }, + // after identifier + Case { source: r##" bar#"foo"# "##, expect: "plain::bar", errors: 2 }, + // nested + Case { + source: r###"r##"foo r#"bar"# r"baz" ### bye"##"###, + expect: r###"r##"foo r#"bar"# r"baz" ### bye"##"###, + errors: 0, + }, + ]; + + check_cases_with_errors(&cases[..], expression()); + } + + #[test] + fn parse_raw_string_lit() { + let lit_cases = vec![ + Case { source: r#" r"foo" "#, expect: r#"r"foo""#, errors: 0 }, + Case { source: r##" r#"foo"# "##, expect: r##"r#"foo"#"##, errors: 0 }, + // backslash + Case { source: r#" r"\\" "#, expect: r#"r"\\""#, errors: 0 }, + Case { source: r##" r#"\"# "##, expect: r##"r#"\"#"##, errors: 0 }, + Case { source: r##" r#"\\"# "##, expect: r##"r#"\\"#"##, errors: 0 }, + Case { source: r##" r#"\\\"# "##, expect: r##"r#"\\\"#"##, errors: 0 }, + // escape sequence + Case { + source: r##" r#"\t\n\\t\\n\\\t\\\n\\\\"# "##, + expect: r##"r#"\t\n\\t\\n\\\t\\\n\\\\"#"##, + errors: 0, + }, + Case { source: r##" r#"\\\\\\\\"# "##, expect: r##"r#"\\\\\\\\"#"##, errors: 0 }, + // mismatch - errors: + Case { source: r###" r#"foo"## "###, expect: r##"r#"foo"#"##, errors: 1 }, + Case { source: r##" r##"foo"# "##, expect: "(none)", errors: 2 }, + ]; + + check_cases_with_errors(&lit_cases[..], literal()); + } +} diff --git a/compiler/noirc_frontend/src/parser/parser/path.rs b/compiler/noirc_frontend/src/parser/parser/path.rs new file mode 100644 index 00000000000..ab812c07dce --- /dev/null +++ b/compiler/noirc_frontend/src/parser/parser/path.rs @@ -0,0 +1,78 @@ +use crate::parser::NoirParser; +use crate::{Path, PathKind}; + +use crate::token::{Keyword, Token}; + +use chumsky::prelude::*; + +use super::{ident, keyword}; + +pub(super) fn path() -> impl NoirParser { + let idents = || ident().separated_by(just(Token::DoubleColon)).at_least(1); + let make_path = |kind| move |segments, span| Path { segments, kind, span }; + + let prefix = |key| keyword(key).ignore_then(just(Token::DoubleColon)); + let path_kind = |key, kind| prefix(key).ignore_then(idents()).map_with_span(make_path(kind)); + + choice(( + path_kind(Keyword::Crate, PathKind::Crate), + path_kind(Keyword::Dep, PathKind::Dep), + idents().map_with_span(make_path(PathKind::Plain)), + )) +} + +fn empty_path() -> impl NoirParser { + let make_path = |kind| move |_, span| Path { segments: Vec::new(), kind, span }; + let path_kind = |key, kind| keyword(key).map_with_span(make_path(kind)); + + choice((path_kind(Keyword::Crate, PathKind::Crate), path_kind(Keyword::Dep, PathKind::Dep))) +} + +pub(super) fn maybe_empty_path() -> impl NoirParser { + path().or(empty_path()) +} + +#[cfg(test)] +mod test { + use super::*; + use crate::parser::parser::test_helpers::{parse_all_failing, parse_with}; + + #[test] + fn parse_path() { + let cases = vec![ + ("std", vec!["std"]), + ("std::hash", vec!["std", "hash"]), + ("std::hash::collections", vec!["std", "hash", "collections"]), + ("dep::foo::bar", vec!["foo", "bar"]), + ("crate::std::hash", vec!["std", "hash"]), + ]; + + for (src, expected_segments) in cases { + let path: Path = parse_with(path(), src).unwrap(); + for (segment, expected) in path.segments.into_iter().zip(expected_segments) { + assert_eq!(segment.0.contents, expected); + } + } + + parse_all_failing(path(), vec!["std::", "::std", "std::hash::", "foo::1"]); + } + + #[test] + fn parse_path_kinds() { + let cases = vec![ + ("std", PathKind::Plain), + ("dep::hash::collections", PathKind::Dep), + ("crate::std::hash", PathKind::Crate), + ]; + + for (src, expected_path_kind) in cases { + let path = parse_with(path(), src).unwrap(); + assert_eq!(path.kind, expected_path_kind); + } + + parse_all_failing( + path(), + vec!["dep", "crate", "crate::std::crate", "foo::bar::crate", "foo::dep"], + ); + } +} diff --git a/compiler/noirc_frontend/src/parser/parser/primitives.rs b/compiler/noirc_frontend/src/parser/parser/primitives.rs new file mode 100644 index 00000000000..34927278038 --- /dev/null +++ b/compiler/noirc_frontend/src/parser/parser/primitives.rs @@ -0,0 +1,101 @@ +use chumsky::prelude::*; + +use crate::{ + parser::{labels::ParsingRuleLabel, ExprParser, NoirParser, ParserError}, + token::{Keyword, Token, TokenKind}, + ExpressionKind, Ident, UnaryOp, +}; + +use super::path; + +/// This parser always parses no input and fails +pub(super) fn nothing() -> impl NoirParser { + one_of([]).map(|_| unreachable!("parser should always error")) +} + +pub(super) fn keyword(keyword: Keyword) -> impl NoirParser { + just(Token::Keyword(keyword)) +} + +pub(super) fn token_kind(token_kind: TokenKind) -> impl NoirParser { + filter_map(move |span, found: Token| { + if found.kind() == token_kind { + Ok(found) + } else { + Err(ParserError::expected_label( + ParsingRuleLabel::TokenKind(token_kind.clone()), + found, + span, + )) + } + }) +} + +pub(super) fn ident() -> impl NoirParser { + token_kind(TokenKind::Ident).map_with_span(Ident::from_token) +} + +// Right-shift (>>) is issued as two separate > tokens by the lexer as this makes it easier +// to parse nested generic types. For normal expressions however, it means we have to manually +// parse two greater-than tokens as a single right-shift here. +pub(super) fn right_shift_operator() -> impl NoirParser { + just(Token::Greater).then(just(Token::Greater)).to(Token::ShiftRight) +} + +pub(super) fn not

(term_parser: P) -> impl NoirParser +where + P: ExprParser, +{ + just(Token::Bang).ignore_then(term_parser).map(|rhs| ExpressionKind::prefix(UnaryOp::Not, rhs)) +} + +pub(super) fn negation

(term_parser: P) -> impl NoirParser +where + P: ExprParser, +{ + just(Token::Minus) + .ignore_then(term_parser) + .map(|rhs| ExpressionKind::prefix(UnaryOp::Minus, rhs)) +} + +pub(super) fn mutable_reference

(term_parser: P) -> impl NoirParser +where + P: ExprParser, +{ + just(Token::Ampersand) + .ignore_then(keyword(Keyword::Mut)) + .ignore_then(term_parser) + .map(|rhs| ExpressionKind::prefix(UnaryOp::MutableReference, rhs)) +} + +pub(super) fn dereference

(term_parser: P) -> impl NoirParser +where + P: ExprParser, +{ + just(Token::Star) + .ignore_then(term_parser) + .map(|rhs| ExpressionKind::prefix(UnaryOp::Dereference { implicitly_added: false }, rhs)) +} + +pub(super) fn variable() -> impl NoirParser { + path().map(ExpressionKind::Variable) +} + +#[cfg(test)] +mod test { + use crate::parser::parser::{ + expression, expression_no_constructors, fresh_statement, term, test_helpers::*, + }; + + #[test] + fn parse_unary() { + parse_all( + term(expression(), expression_no_constructors(expression()), fresh_statement(), true), + vec!["!hello", "-hello", "--hello", "-!hello", "!-hello"], + ); + parse_all_failing( + term(expression(), expression_no_constructors(expression()), fresh_statement(), true), + vec!["+hello", "/hello"], + ); + } +} diff --git a/compiler/noirc_frontend/src/parser/parser/structs.rs b/compiler/noirc_frontend/src/parser/parser/structs.rs new file mode 100644 index 00000000000..0212f56783f --- /dev/null +++ b/compiler/noirc_frontend/src/parser/parser/structs.rs @@ -0,0 +1,97 @@ +use chumsky::prelude::*; +use noirc_errors::Span; + +use crate::{ + macros_api::SecondaryAttribute, + parser::{ + parser::{ + attributes::attributes, + function, parse_type, + primitives::{ident, keyword}, + }, + NoirParser, ParserError, ParserErrorReason, TopLevelStatement, + }, + token::{Attribute, Keyword, Token}, + Ident, NoirStruct, UnresolvedType, +}; + +pub(super) fn struct_definition() -> impl NoirParser { + use self::Keyword::Struct; + use Token::*; + + let fields = struct_fields() + .delimited_by(just(LeftBrace), just(RightBrace)) + .recover_with(nested_delimiters( + LeftBrace, + RightBrace, + [(LeftParen, RightParen), (LeftBracket, RightBracket)], + |_| vec![], + )) + .or(just(Semicolon).to(Vec::new())); + + attributes() + .then_ignore(keyword(Struct)) + .then(ident()) + .then(function::generics()) + .then(fields) + .validate(|(((raw_attributes, name), generics), fields), span, emit| { + let attributes = validate_struct_attributes(raw_attributes, span, emit); + TopLevelStatement::Struct(NoirStruct { name, attributes, generics, fields, span }) + }) +} + +fn struct_fields() -> impl NoirParser> { + ident() + .then_ignore(just(Token::Colon)) + .then(parse_type()) + .separated_by(just(Token::Comma)) + .allow_trailing() +} + +fn validate_struct_attributes( + attributes: Vec, + span: Span, + emit: &mut dyn FnMut(ParserError), +) -> Vec { + let mut struct_attributes = vec![]; + + for attribute in attributes { + match attribute { + Attribute::Function(..) => { + emit(ParserError::with_reason( + ParserErrorReason::NoFunctionAttributesAllowedOnStruct, + span, + )); + } + Attribute::Secondary(attr) => struct_attributes.push(attr), + } + } + + struct_attributes +} + +#[cfg(test)] +mod test { + use super::*; + use crate::parser::parser::test_helpers::*; + + #[test] + fn parse_structs() { + let cases = vec![ + "struct Foo;", + "struct Foo { }", + "struct Bar { ident: Field, }", + "struct Baz { ident: Field, other: Field }", + "#[attribute] struct Baz { ident: Field, other: Field }", + ]; + parse_all(struct_definition(), cases); + + let failing = vec![ + "struct { }", + "struct Foo { bar: pub Field }", + "struct Foo { bar: pub Field }", + "#[oracle(some)] struct Foo { bar: Field }", + ]; + parse_all_failing(struct_definition(), failing); + } +} diff --git a/compiler/noirc_frontend/src/parser/parser/test_helpers.rs b/compiler/noirc_frontend/src/parser/parser/test_helpers.rs new file mode 100644 index 00000000000..6b8cb80a0a0 --- /dev/null +++ b/compiler/noirc_frontend/src/parser/parser/test_helpers.rs @@ -0,0 +1,122 @@ +use chumsky::primitive::just; +use chumsky::Parser; +use iter_extended::vecmap; +use noirc_errors::CustomDiagnostic; + +use crate::{ + lexer::Lexer, + parser::{force, NoirParser}, + token::Token, +}; + +pub(crate) fn parse_with(parser: P, program: &str) -> Result> +where + P: NoirParser, +{ + let (tokens, lexer_errors) = Lexer::lex(program); + if !lexer_errors.is_empty() { + return Err(vecmap(lexer_errors, Into::into)); + } + parser.then_ignore(just(Token::EOF)).parse(tokens).map_err(|errors| vecmap(errors, Into::into)) +} + +pub(crate) fn parse_recover(parser: P, program: &str) -> (Option, Vec) +where + P: NoirParser, +{ + let (tokens, lexer_errors) = Lexer::lex(program); + let (opt, errs) = parser.then_ignore(force(just(Token::EOF))).parse_recovery(tokens); + + let mut errors = vecmap(lexer_errors, Into::into); + errors.extend(errs.into_iter().map(Into::into)); + + (opt, errors) +} + +pub(crate) fn parse_all(parser: P, programs: Vec<&str>) -> Vec +where + P: NoirParser, +{ + vecmap(programs, move |program| { + let message = format!("Failed to parse:\n{program}"); + let (op_t, diagnostics) = parse_recover(&parser, program); + diagnostics.iter().for_each(|diagnostic| { + if diagnostic.is_error() { + panic!("{} with error {}", &message, diagnostic); + } + }); + op_t.expect(&message) + }) +} + +pub(crate) fn parse_all_failing(parser: P, programs: Vec<&str>) -> Vec +where + P: NoirParser, + T: std::fmt::Display, +{ + programs + .into_iter() + .flat_map(|program| match parse_with(&parser, program) { + Ok(expr) => { + unreachable!( + "Expected this input to fail:\n{}\nYet it successfully parsed as:\n{}", + program, expr + ) + } + Err(diagnostics) => { + if diagnostics.iter().all(|diagnostic: &CustomDiagnostic| diagnostic.is_warning()) { + unreachable!( + "Expected at least one error when parsing:\n{}\nYet it successfully parsed without errors:\n", + program + ) + }; + diagnostics + } + }) + .collect() +} + +#[derive(Copy, Clone)] +pub(crate) struct Case { + pub(crate) source: &'static str, + pub(crate) errors: usize, + pub(crate) expect: &'static str, +} + +pub(crate) fn check_cases_with_errors(cases: &[Case], parser: P) +where + P: NoirParser + Clone, + T: std::fmt::Display, +{ + let show_errors = |v| vecmap(&v, ToString::to_string).join("\n"); + + let results = vecmap(cases, |&case| { + let (opt, errors) = parse_recover(parser.clone(), case.source); + let actual = opt.map(|ast| ast.to_string()); + let actual = if let Some(s) = &actual { s.to_string() } else { "(none)".to_string() }; + + let result = ((errors.len(), actual.clone()), (case.errors, case.expect.to_string())); + if result.0 != result.1 { + let num_errors = errors.len(); + let shown_errors = show_errors(errors); + eprintln!( + concat!( + "\nExpected {expected_errors} error(s) and got {num_errors}:", + "\n\n{shown_errors}", + "\n\nFrom input: {src}", + "\nExpected AST: {expected_result}", + "\nActual AST: {actual}\n", + ), + expected_errors = case.errors, + num_errors = num_errors, + shown_errors = shown_errors, + src = case.source, + expected_result = case.expect, + actual = actual, + ); + } + result + }); + + assert_eq!(vecmap(&results, |t| t.0.clone()), vecmap(&results, |t| t.1.clone()),); +} diff --git a/compiler/noirc_frontend/src/parser/parser/traits.rs b/compiler/noirc_frontend/src/parser/parser/traits.rs new file mode 100644 index 00000000000..0d72fbd5303 --- /dev/null +++ b/compiler/noirc_frontend/src/parser/parser/traits.rs @@ -0,0 +1,217 @@ +use chumsky::prelude::*; + +use super::{ + block, expression, fresh_statement, function, function_declaration_parameters, + function_return_type, +}; + +use crate::{ + parser::{ + ignore_then_commit, parenthesized, parser::primitives::keyword, NoirParser, ParserError, + ParserErrorReason, TopLevelStatement, + }, + token::{Keyword, Token}, + Expression, FunctionVisibility, NoirTrait, NoirTraitImpl, TraitBound, TraitImplItem, TraitItem, + UnresolvedTraitConstraint, UnresolvedType, +}; + +use super::{generic_type_args, parse_type, path, primitives::ident}; + +pub(super) fn trait_definition() -> impl NoirParser { + keyword(Keyword::Trait) + .ignore_then(ident()) + .then(function::generics()) + .then(where_clause()) + .then_ignore(just(Token::LeftBrace)) + .then(trait_body()) + .then_ignore(just(Token::RightBrace)) + .map_with_span(|(((name, generics), where_clause), items), span| { + TopLevelStatement::Trait(NoirTrait { name, generics, where_clause, span, items }) + }) +} + +fn trait_body() -> impl NoirParser> { + trait_function_declaration() + .or(trait_type_declaration()) + .or(trait_constant_declaration()) + .repeated() +} + +fn optional_default_value() -> impl NoirParser> { + ignore_then_commit(just(Token::Assign), expression()).or_not() +} + +fn trait_constant_declaration() -> impl NoirParser { + keyword(Keyword::Let) + .ignore_then(ident()) + .then_ignore(just(Token::Colon)) + .then(parse_type()) + .then(optional_default_value()) + .then_ignore(just(Token::Semicolon)) + .validate(|((name, typ), default_value), span, emit| { + emit(ParserError::with_reason( + ParserErrorReason::ExperimentalFeature("Associated constants"), + span, + )); + TraitItem::Constant { name, typ, default_value } + }) +} + +/// trait_function_declaration: 'fn' ident generics '(' declaration_parameters ')' function_return_type +fn trait_function_declaration() -> impl NoirParser { + let trait_function_body_or_semicolon = + block(fresh_statement()).map(Option::from).or(just(Token::Semicolon).to(Option::None)); + + keyword(Keyword::Fn) + .ignore_then(ident()) + .then(function::generics()) + .then(parenthesized(function_declaration_parameters())) + .then(function_return_type().map(|(_, typ)| typ)) + .then(where_clause()) + .then(trait_function_body_or_semicolon) + .map(|(((((name, generics), parameters), return_type), where_clause), body)| { + TraitItem::Function { name, generics, parameters, return_type, where_clause, body } + }) +} + +/// trait_type_declaration: 'type' ident generics +fn trait_type_declaration() -> impl NoirParser { + keyword(Keyword::Type).ignore_then(ident()).then_ignore(just(Token::Semicolon)).validate( + |name, span, emit| { + emit(ParserError::with_reason( + ParserErrorReason::ExperimentalFeature("Associated types"), + span, + )); + TraitItem::Type { name } + }, + ) +} + +/// Parses a trait implementation, implementing a particular trait for a type. +/// This has a similar syntax to `implementation`, but the `for type` clause is required, +/// and an optional `where` clause is also useable. +/// +/// trait_implementation: 'impl' generics ident generic_args for type '{' trait_implementation_body '}' +pub(super) fn trait_implementation() -> impl NoirParser { + keyword(Keyword::Impl) + .ignore_then(function::generics()) + .then(path()) + .then(generic_type_args(parse_type())) + .then_ignore(keyword(Keyword::For)) + .then(parse_type()) + .then(where_clause()) + .then_ignore(just(Token::LeftBrace)) + .then(trait_implementation_body()) + .then_ignore(just(Token::RightBrace)) + .map(|args| { + let ((other_args, where_clause), items) = args; + let (((impl_generics, trait_name), trait_generics), object_type) = other_args; + + TopLevelStatement::TraitImpl(NoirTraitImpl { + impl_generics, + trait_name, + trait_generics, + object_type, + items, + where_clause, + }) + }) +} + +fn trait_implementation_body() -> impl NoirParser> { + let function = function::function_definition(true).validate(|mut f, span, emit| { + if f.def().is_internal + || f.def().is_unconstrained + || f.def().is_open + || f.def().visibility != FunctionVisibility::Private + { + emit(ParserError::with_reason(ParserErrorReason::TraitImplFunctionModifiers, span)); + } + // Trait impl functions are always public + f.def_mut().visibility = FunctionVisibility::Public; + TraitImplItem::Function(f) + }); + + let alias = keyword(Keyword::Type) + .ignore_then(ident()) + .then_ignore(just(Token::Assign)) + .then(parse_type()) + .then_ignore(just(Token::Semicolon)) + .map(|(name, alias)| TraitImplItem::Type { name, alias }); + + function.or(alias).repeated() +} + +fn where_clause() -> impl NoirParser> { + struct MultiTraitConstraint { + typ: UnresolvedType, + trait_bounds: Vec, + } + + let constraints = parse_type() + .then_ignore(just(Token::Colon)) + .then(trait_bounds()) + .map(|(typ, trait_bounds)| MultiTraitConstraint { typ, trait_bounds }); + + keyword(Keyword::Where) + .ignore_then(constraints.separated_by(just(Token::Comma))) + .or_not() + .map(|option| option.unwrap_or_default()) + .map(|x: Vec| { + let mut result: Vec = Vec::new(); + for constraint in x { + for bound in constraint.trait_bounds { + result.push(UnresolvedTraitConstraint { + typ: constraint.typ.clone(), + trait_bound: bound, + }); + } + } + result + }) +} + +fn trait_bounds() -> impl NoirParser> { + trait_bound().separated_by(just(Token::Plus)).at_least(1).allow_trailing() +} + +fn trait_bound() -> impl NoirParser { + path().then(generic_type_args(parse_type())).map(|(trait_path, trait_generics)| TraitBound { + trait_path, + trait_generics, + trait_id: None, + }) +} + +#[cfg(test)] +mod test { + use super::*; + use crate::parser::parser::test_helpers::*; + + #[test] + fn parse_trait() { + parse_all( + trait_definition(), + vec![ + // Empty traits are legal in Rust and sometimes used as a way to whitelist certain types + // for a particular operation. Also known as `tag` or `marker` traits: + // https://stackoverflow.com/questions/71895489/what-is-the-purpose-of-defining-empty-impl-in-rust + "trait Empty {}", + "trait TraitWithDefaultBody { fn foo(self) {} }", + "trait TraitAcceptingMutableRef { fn foo(&mut self); }", + "trait TraitWithTypeBoundOperation { fn identity() -> Self; }", + "trait TraitWithAssociatedType { type Element; fn item(self, index: Field) -> Self::Element; }", + "trait TraitWithAssociatedConstant { let Size: Field; }", + "trait TraitWithAssociatedConstantWithDefaultValue { let Size: Field = 10; }", + "trait GenericTrait { fn elem(&mut self, index: Field) -> T; }", + "trait GenericTraitWithConstraints where T: SomeTrait { fn elem(self, index: Field) -> T; }", + "trait TraitWithMultipleGenericParams where A: SomeTrait, B: AnotherTrait { let Size: Field; fn zero() -> Self; }", + ], + ); + + parse_all_failing( + trait_definition(), + vec!["trait MissingBody", "trait WrongDelimiter { fn foo() -> u8, fn bar() -> u8 }"], + ); + } +} diff --git a/compiler/noirc_frontend/src/parser/parser/types.rs b/compiler/noirc_frontend/src/parser/parser/types.rs new file mode 100644 index 00000000000..572397d6527 --- /dev/null +++ b/compiler/noirc_frontend/src/parser/parser/types.rs @@ -0,0 +1,172 @@ +use super::{ + expression_with_precedence, keyword, nothing, parenthesized, NoirParser, ParserError, + ParserErrorReason, Precedence, +}; +use crate::ast::{UnresolvedType, UnresolvedTypeData}; + +use crate::parser::labels::ParsingRuleLabel; +use crate::token::{Keyword, Token}; +use crate::{Recoverable, UnresolvedTypeExpression}; + +use chumsky::prelude::*; +use noirc_errors::Span; + +fn maybe_comp_time() -> impl NoirParser<()> { + keyword(Keyword::CompTime).or_not().validate(|opt, span, emit| { + if opt.is_some() { + emit(ParserError::with_reason(ParserErrorReason::ComptimeDeprecated, span)); + } + }) +} + +pub(super) fn parenthesized_type( + recursive_type_parser: impl NoirParser, +) -> impl NoirParser { + recursive_type_parser + .delimited_by(just(Token::LeftParen), just(Token::RightParen)) + .map_with_span(|typ, span| UnresolvedType { + typ: UnresolvedTypeData::Parenthesized(Box::new(typ)), + span: span.into(), + }) +} + +pub(super) fn field_type() -> impl NoirParser { + maybe_comp_time() + .then_ignore(keyword(Keyword::Field)) + .map_with_span(|_, span| UnresolvedTypeData::FieldElement.with_span(span)) +} + +pub(super) fn bool_type() -> impl NoirParser { + maybe_comp_time() + .then_ignore(keyword(Keyword::Bool)) + .map_with_span(|_, span| UnresolvedTypeData::Bool.with_span(span)) +} + +pub(super) fn string_type() -> impl NoirParser { + keyword(Keyword::String) + .ignore_then( + type_expression().delimited_by(just(Token::Less), just(Token::Greater)).or_not(), + ) + .map_with_span(|expr, span| UnresolvedTypeData::String(expr).with_span(span)) +} + +pub(super) fn format_string_type( + type_parser: impl NoirParser, +) -> impl NoirParser { + keyword(Keyword::FormatString) + .ignore_then( + type_expression() + .then_ignore(just(Token::Comma)) + .then(type_parser) + .delimited_by(just(Token::Less), just(Token::Greater)), + ) + .map_with_span(|(size, fields), span| { + UnresolvedTypeData::FormatString(size, Box::new(fields)).with_span(span) + }) +} + +pub(super) fn int_type() -> impl NoirParser { + maybe_comp_time() + .then(filter_map(|span, token: Token| match token { + Token::IntType(int_type) => Ok(int_type), + unexpected => { + Err(ParserError::expected_label(ParsingRuleLabel::IntegerType, unexpected, span)) + } + })) + .validate(|(_, token), span, emit| { + UnresolvedTypeData::from_int_token(token) + .map(|data| data.with_span(span)) + .unwrap_or_else(|err| { + emit(ParserError::with_reason(ParserErrorReason::InvalidBitSize(err.0), span)); + UnresolvedType::error(span) + }) + }) +} + +pub(super) fn array_type( + type_parser: impl NoirParser, +) -> impl NoirParser { + just(Token::LeftBracket) + .ignore_then(type_parser) + .then(just(Token::Semicolon).ignore_then(type_expression()).or_not()) + .then_ignore(just(Token::RightBracket)) + .map_with_span(|(element_type, size), span| { + UnresolvedTypeData::Array(size, Box::new(element_type)).with_span(span) + }) +} + +pub(super) fn type_expression() -> impl NoirParser { + recursive(|expr| { + expression_with_precedence( + Precedence::lowest_type_precedence(), + expr, + nothing(), + nothing(), + true, + false, + ) + }) + .labelled(ParsingRuleLabel::TypeExpression) + .try_map(UnresolvedTypeExpression::from_expr) +} + +pub(super) fn tuple_type(type_parser: T) -> impl NoirParser +where + T: NoirParser, +{ + let fields = type_parser.separated_by(just(Token::Comma)).allow_trailing(); + parenthesized(fields).map_with_span(|fields, span| { + if fields.is_empty() { + UnresolvedTypeData::Unit.with_span(span) + } else { + UnresolvedTypeData::Tuple(fields).with_span(span) + } + }) +} + +pub(super) fn function_type(type_parser: T) -> impl NoirParser +where + T: NoirParser, +{ + let args = parenthesized(type_parser.clone().separated_by(just(Token::Comma)).allow_trailing()); + + let env = just(Token::LeftBracket) + .ignore_then(type_parser.clone()) + .then_ignore(just(Token::RightBracket)) + .or_not() + .map_with_span(|t, span| { + t.unwrap_or_else(|| UnresolvedTypeData::Unit.with_span(Span::empty(span.end()))) + }); + + keyword(Keyword::Fn) + .ignore_then(env) + .then(args) + .then_ignore(just(Token::Arrow)) + .then(type_parser) + .map_with_span(|((env, args), ret), span| { + UnresolvedTypeData::Function(args, Box::new(ret), Box::new(env)).with_span(span) + }) +} + +pub(super) fn mutable_reference_type(type_parser: T) -> impl NoirParser +where + T: NoirParser, +{ + just(Token::Ampersand) + .ignore_then(keyword(Keyword::Mut)) + .ignore_then(type_parser) + .map_with_span(|element, span| { + UnresolvedTypeData::MutableReference(Box::new(element)).with_span(span) + }) +} + +#[cfg(test)] +mod test { + use super::*; + use crate::parser::parser::test_helpers::*; + + #[test] + fn parse_type_expression() { + parse_all(type_expression(), vec!["(123)", "123", "(1 + 1)", "(1 + (1))"]); + } +} diff --git a/compiler/noirc_frontend/src/resolve_locations.rs b/compiler/noirc_frontend/src/resolve_locations.rs index cfb88966b9d..b5f1b1d0c64 100644 --- a/compiler/noirc_frontend/src/resolve_locations.rs +++ b/compiler/noirc_frontend/src/resolve_locations.rs @@ -212,6 +212,8 @@ impl NodeInterner { self.type_alias_ref .iter() .find(|(_, named_type_location)| named_type_location.span.contains(&location.span)) - .map(|(type_alias_id, _found_location)| self.get_type_alias(*type_alias_id).location) + .map(|(type_alias_id, _found_location)| { + self.get_type_alias(*type_alias_id).borrow().location + }) } } diff --git a/compiler/noirc_frontend/src/tests.rs b/compiler/noirc_frontend/src/tests.rs index 9ccbddab9ec..c661cc92eef 100644 --- a/compiler/noirc_frontend/src/tests.rs +++ b/compiler/noirc_frontend/src/tests.rs @@ -52,10 +52,12 @@ mod test { ) -> (ParsedModule, Context, Vec<(CompilationError, FileId)>) { let root = std::path::Path::new("/"); let fm = FileManager::new(root); + let mut context = Context::new(fm, Default::default()); context.def_interner.populate_dummy_operator_traits(); let root_file_id = FileId::dummy(); let root_crate_id = context.crate_graph.add_crate_root(root_file_id); + let (program, parser_errors) = parse_program(src); let mut errors = vecmap(parser_errors, |e| (e.into(), root_file_id)); remove_experimental_warnings(&mut errors); @@ -954,7 +956,7 @@ mod test { #[test] fn resolve_for_expr() { let src = r#" - fn main(x : Field) { + fn main(x : u64) { for i in 1..20 { let _z = x + i; }; @@ -1126,9 +1128,9 @@ mod test { } fn check_rewrite(src: &str, expected: &str) { - let (_program, context, _errors) = get_program(src); + let (_program, mut context, _errors) = get_program(src); let main_func_id = context.def_interner.find_function("main").unwrap(); - let program = monomorphize(main_func_id, &context.def_interner); + let program = monomorphize(main_func_id, &mut context.def_interner).unwrap(); assert!(format!("{}", program) == expected); } @@ -1162,4 +1164,46 @@ fn lambda$f1(mut env$l1: (Field)) -> Field { "#; check_rewrite(src, expected_rewrite); } + + #[test] + fn deny_cyclic_structs() { + let src = r#" + struct Foo { bar: Bar } + struct Bar { foo: Foo } + fn main() {} + "#; + assert_eq!(get_program_errors(src).len(), 1); + } + + #[test] + fn deny_cyclic_globals() { + let src = r#" + global A = B; + global B = A; + fn main() {} + "#; + assert_eq!(get_program_errors(src).len(), 1); + } + + #[test] + fn deny_cyclic_type_aliases() { + let src = r#" + type A = B; + type B = A; + fn main() {} + "#; + assert_eq!(get_program_errors(src).len(), 1); + } + + #[test] + fn ensure_nested_type_aliases_type_check() { + let src = r#" + type A = B; + type B = u8; + fn main() { + let _a: A = 0 as u16; + } + "#; + assert_eq!(get_program_errors(src).len(), 1); + } } diff --git a/compiler/noirc_printable_type/src/lib.rs b/compiler/noirc_printable_type/src/lib.rs index 18f2fe0a873..24f4f275a14 100644 --- a/compiler/noirc_printable_type/src/lib.rs +++ b/compiler/noirc_printable_type/src/lib.rs @@ -6,7 +6,7 @@ use regex::{Captures, Regex}; use serde::{Deserialize, Serialize}; use thiserror::Error; -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] #[serde(tag = "kind", rename_all = "lowercase")] pub enum PrintableType { Field, @@ -50,6 +50,7 @@ pub enum PrintableValue { String(String), Vec(Vec), Struct(BTreeMap), + Other, } /// In order to display a `PrintableValue` we need a `PrintableType` to accurately @@ -69,6 +70,9 @@ pub enum ForeignCallError { #[error("Failed calling external resolver. {0}")] ExternalResolverError(#[from] jsonrpc::Error), + + #[error("Assert message resolved after an unsatisified constrain. {0}")] + ResolvedAssertMessage(String), } impl TryFrom<&[ForeignCallParam]> for PrintableValueDisplay { @@ -293,7 +297,7 @@ fn format_field_string(field: FieldElement) -> String { } /// Assumes that `field_iterator` contains enough [FieldElement] in order to decode the [PrintableType] -fn decode_value( +pub fn decode_value( field_iterator: &mut impl Iterator, typ: &PrintableType, ) -> PrintableValue { diff --git a/compiler/utils/arena/Cargo.toml b/compiler/utils/arena/Cargo.toml index e82201a2cf4..41c6ebc9a8b 100644 --- a/compiler/utils/arena/Cargo.toml +++ b/compiler/utils/arena/Cargo.toml @@ -4,8 +4,3 @@ version.workspace = true authors.workspace = true edition.workspace = true license.workspace = true - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -generational-arena = "0.2.8" diff --git a/compiler/utils/arena/src/lib.rs b/compiler/utils/arena/src/lib.rs index fc19f44ab6e..2d117304e16 100644 --- a/compiler/utils/arena/src/lib.rs +++ b/compiler/utils/arena/src/lib.rs @@ -3,5 +3,89 @@ #![warn(unreachable_pub)] #![warn(clippy::semicolon_if_nothing_returned)] -// For now we use a wrapper around generational-arena -pub use generational_arena::{Arena, Index}; +#[derive(Clone, Copy, Debug, Eq, Ord, PartialEq, PartialOrd, Hash)] +pub struct Index(usize); + +impl Index { + #[cfg(test)] + pub fn test_new(index: usize) -> Index { + Self(index) + } + + /// Return a dummy index (max value internally). + /// This should be avoided over `Option` if possible. + pub fn dummy() -> Self { + Self(usize::MAX) + } + + /// Return the zeroed index. This is unsafe since we don't know + /// if this is a valid index for any particular map yet. + pub fn unsafe_zeroed() -> Self { + Self(0) + } +} + +#[derive(Clone, Debug)] +pub struct Arena { + pub vec: Vec, +} + +impl Default for Arena { + fn default() -> Self { + Self { vec: Vec::new() } + } +} + +impl core::ops::Index for Arena { + type Output = T; + + fn index(&self, index: Index) -> &Self::Output { + self.vec.index(index.0) + } +} + +impl core::ops::IndexMut for Arena { + fn index_mut(&mut self, index: Index) -> &mut Self::Output { + self.vec.index_mut(index.0) + } +} + +impl IntoIterator for Arena { + type Item = T; + + type IntoIter = as IntoIterator>::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + self.vec.into_iter() + } +} + +impl<'a, T> IntoIterator for &'a Arena { + type Item = &'a T; + + type IntoIter = <&'a Vec as IntoIterator>::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + self.vec.iter() + } +} + +impl Arena { + pub fn insert(&mut self, item: T) -> Index { + let index = self.vec.len(); + self.vec.push(item); + Index(index) + } + + pub fn get(&self, index: Index) -> Option<&T> { + self.vec.get(index.0) + } + + pub fn get_mut(&mut self, index: Index) -> Option<&mut T> { + self.vec.get_mut(index.0) + } + + pub fn iter(&self) -> impl Iterator { + self.vec.iter().enumerate().map(|(index, item)| (Index(index), item)) + } +} diff --git a/compiler/wasm/.eslintignore b/compiler/wasm/.eslintignore index 3c3629e647f..76add878f8d 100644 --- a/compiler/wasm/.eslintignore +++ b/compiler/wasm/.eslintignore @@ -1 +1,2 @@ node_modules +dist \ No newline at end of file diff --git a/compiler/wasm/README.md b/compiler/wasm/README.md index 0b2d92b0815..52f7e83e19e 100644 --- a/compiler/wasm/README.md +++ b/compiler/wasm/README.md @@ -1,9 +1,32 @@ # Noir Lang WASM JavaScript Package -This JavaScript package enables users to compile a Noir program, i.e. generating its artifacts. +This JavaScript package enables users to compile a Noir program, i.e. generating its artifacts, both in Node.JS environments and the browser. The package also handles dependency management like how Nargo (Noir's CLI tool) operates, but the package is used just for compilation, not proving, verifying and simulating functions. +## Usage + +```typescript +// Node.js + +import { compile, createFileManager } from '@noir-lang/noir_wasm'; + +const fm = createFileManager(myProjectPath); +const myCompiledCode = await compile(fm); +``` + +```typescript +// Browser + +import { compile, createFileManager } from '@noir-lang/noir_wasm'; + +const fm = createFileManager('/'); +for (const path of files) { + await fm.writeFile(path, await getFileAsStream(path)); +} +const myCompiledCode = await compile(fm); +``` + ## Building from source Outside of the [noir repo](https://github.com/noir-lang/noir), this package can be built using the command below: diff --git a/compiler/wasm/package.json b/compiler/wasm/package.json index 2aaf4a494df..67584a2def1 100644 --- a/compiler/wasm/package.json +++ b/compiler/wasm/package.json @@ -3,7 +3,7 @@ "contributors": [ "The Noir Team " ], - "version": "0.23.0", + "version": "0.24.0", "license": "(MIT OR Apache-2.0)", "main": "dist/main.js", "types": "./dist/types/src/index.d.cts", @@ -44,7 +44,6 @@ "devDependencies": { "@esm-bundle/chai": "^4.3.4-fix.0", "@ltd/j-toml": "^1.38.0", - "@noir-lang/noirc_abi": "workspace:*", "@types/adm-zip": "^0.5.0", "@types/chai": "^4", "@types/mocha": "^10.0.6", @@ -62,12 +61,15 @@ "assert": "^2.1.0", "browserify-fs": "^1.0.0", "chai": "^4.3.10", - "copy-webpack-plugin": "^11.0.0", - "html-webpack-plugin": "^5.5.4", + "copy-webpack-plugin": "^12.0.2", + "eslint": "^8.56.0", + "eslint-plugin-prettier": "^5.0.0", + "html-webpack-plugin": "^5.6.0", "memfs": "^4.6.0", "mocha": "^10.2.0", "mocha-each": "^2.0.1", "path-browserify": "^1.0.1", + "prettier": "3.0.3", "process": "^0.11.10", "readable-stream": "^4.4.2", "sinon": "^17.0.1", @@ -76,10 +78,12 @@ "typescript": "~5.2.2", "unzipit": "^1.4.3", "url": "^0.11.3", - "webpack": "^5.49.0", - "webpack-cli": "^4.7.2" + "webpack": "^5.90.1", + "webpack-cli": "^5.1.4", + "webpack-dev-server": "^5.0.0" }, "dependencies": { + "@noir-lang/types": "workspace:*", "pako": "^2.1.0" } } diff --git a/compiler/wasm/scripts/install_wasm-pack.sh b/compiler/wasm/scripts/install_wasm-pack.sh index 28721e62fe2..b0fdffe0458 100755 --- a/compiler/wasm/scripts/install_wasm-pack.sh +++ b/compiler/wasm/scripts/install_wasm-pack.sh @@ -9,4 +9,4 @@ if [ $CARGO_BINSTALL_CHECK != "true" ]; then curl -L --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/cargo-bins/cargo-binstall/main/install-from-binstall-release.sh | bash fi -cargo-binstall wasm-pack@0.12.1 -y \ No newline at end of file +cargo-binstall wasm-pack@0.12.1 -y --force diff --git a/compiler/wasm/src/compile.rs b/compiler/wasm/src/compile.rs index b39a27a7931..9e6fca1126e 100644 --- a/compiler/wasm/src/compile.rs +++ b/compiler/wasm/src/compile.rs @@ -1,19 +1,21 @@ use fm::FileManager; use gloo_utils::format::JsValueSerdeExt; use js_sys::{JsString, Object}; -use nargo::artifacts::{ - contract::{ContractArtifact, ContractFunctionArtifact}, - program::ProgramArtifact, +use nargo::{ + artifacts::{ + contract::{ContractArtifact, ContractFunctionArtifact}, + program::ProgramArtifact, + }, + parse_all, }; use noirc_driver::{ - add_dep, compile_contract, compile_main, file_manager_with_stdlib, prepare_crate, - prepare_dependency, CompileOptions, CompiledContract, CompiledProgram, + add_dep, file_manager_with_stdlib, prepare_crate, prepare_dependency, CompileOptions, NOIR_ARTIFACT_VERSION_STRING, }; use noirc_evaluator::errors::SsaReport; use noirc_frontend::{ graph::{CrateId, CrateName}, - hir::{def_map::parse_file, Context, ParsedFiles}, + hir::Context, }; use serde::Deserialize; use std::{collections::HashMap, path::Path}; @@ -60,51 +62,64 @@ extern "C" { #[derive(Clone, Debug, PartialEq, Eq)] pub type JsDependencyGraph; - #[wasm_bindgen(extends = Object, js_name = "CompileResult", typescript_type = "CompileResult")] + #[wasm_bindgen(extends = Object, js_name = "ProgramCompileResult", typescript_type = "ProgramCompileResult")] #[derive(Clone, Debug, PartialEq, Eq)] - pub type JsCompileResult; + pub type JsCompileProgramResult; #[wasm_bindgen(constructor, js_class = "Object")] - fn constructor() -> JsCompileResult; + fn constructor() -> JsCompileProgramResult; + + #[wasm_bindgen(extends = Object, js_name = "ContractCompileResult", typescript_type = "ContractCompileResult")] + #[derive(Clone, Debug, PartialEq, Eq)] + pub type JsCompileContractResult; + + #[wasm_bindgen(constructor, js_class = "Object")] + fn constructor() -> JsCompileContractResult; } -impl JsCompileResult { - const CONTRACT_PROP: &'static str = "contract"; +impl JsCompileProgramResult { const PROGRAM_PROP: &'static str = "program"; const WARNINGS_PROP: &'static str = "warnings"; - pub fn new(resp: CompileResult) -> JsCompileResult { - let obj = JsCompileResult::constructor(); - match resp { - CompileResult::Contract { contract, warnings } => { - js_sys::Reflect::set( - &obj, - &JsString::from(JsCompileResult::CONTRACT_PROP), - &::from_serde(&contract).unwrap(), - ) - .unwrap(); - js_sys::Reflect::set( - &obj, - &JsString::from(JsCompileResult::WARNINGS_PROP), - &::from_serde(&warnings).unwrap(), - ) - .unwrap(); - } - CompileResult::Program { program, warnings } => { - js_sys::Reflect::set( - &obj, - &JsString::from(JsCompileResult::PROGRAM_PROP), - &::from_serde(&program).unwrap(), - ) - .unwrap(); - js_sys::Reflect::set( - &obj, - &JsString::from(JsCompileResult::WARNINGS_PROP), - &::from_serde(&warnings).unwrap(), - ) - .unwrap(); - } - }; + pub fn new(program: ProgramArtifact, warnings: Vec) -> JsCompileProgramResult { + let obj = JsCompileProgramResult::constructor(); + + js_sys::Reflect::set( + &obj, + &JsString::from(JsCompileProgramResult::PROGRAM_PROP), + &::from_serde(&program).unwrap(), + ) + .unwrap(); + js_sys::Reflect::set( + &obj, + &JsString::from(JsCompileProgramResult::WARNINGS_PROP), + &::from_serde(&warnings).unwrap(), + ) + .unwrap(); + + obj + } +} + +impl JsCompileContractResult { + const CONTRACT_PROP: &'static str = "contract"; + const WARNINGS_PROP: &'static str = "warnings"; + + pub fn new(contract: ContractArtifact, warnings: Vec) -> JsCompileContractResult { + let obj = JsCompileContractResult::constructor(); + + js_sys::Reflect::set( + &obj, + &JsString::from(JsCompileContractResult::CONTRACT_PROP), + &::from_serde(&contract).unwrap(), + ) + .unwrap(); + js_sys::Reflect::set( + &obj, + &JsString::from(JsCompileContractResult::WARNINGS_PROP), + &::from_serde(&warnings).unwrap(), + ) + .unwrap(); obj } @@ -140,77 +155,98 @@ impl PathToFileSourceMap { } } -pub(crate) fn parse_all(fm: &FileManager) -> ParsedFiles { - fm.as_file_map().all_file_ids().map(|&file_id| (file_id, parse_file(fm, file_id))).collect() -} - -pub enum CompileResult { - Contract { contract: ContractArtifact, warnings: Vec }, - Program { program: ProgramArtifact, warnings: Vec }, -} - #[wasm_bindgen] -pub fn compile( +pub fn compile_program( entry_point: String, - contracts: Option, dependency_graph: Option, file_source_map: PathToFileSourceMap, -) -> Result { +) -> Result { console_error_panic_hook::set_once(); - - let dependency_graph: DependencyGraph = if let Some(dependency_graph) = dependency_graph { - ::into_serde(&JsValue::from(dependency_graph)) - .map_err(|err| err.to_string())? - } else { - DependencyGraph { root_dependencies: vec![], library_dependencies: HashMap::new() } - }; - - let fm = file_manager_with_source_map(file_source_map); - let parsed_files = parse_all(&fm); - let mut context = Context::new(fm, parsed_files); - - let path = Path::new(&entry_point); - let crate_id = prepare_crate(&mut context, path); - - process_dependency_graph(&mut context, dependency_graph); + let (crate_id, mut context) = prepare_context(entry_point, dependency_graph, file_source_map)?; let compile_options = CompileOptions::default(); - // For now we default to a bounded width of 3, though we can add it as a parameter - let expression_width = acvm::ExpressionWidth::Bounded { width: 3 }; + let expression_width = acvm::acir::circuit::ExpressionWidth::Bounded { width: 3 }; - if contracts.unwrap_or_default() { - let compiled_contract = compile_contract(&mut context, crate_id, &compile_options) + let compiled_program = + noirc_driver::compile_main(&mut context, crate_id, &compile_options, None) .map_err(|errs| { CompileError::with_file_diagnostics( - "Failed to compile contract", + "Failed to compile program", errs, &context.file_manager, ) })? .0; - let optimized_contract = - nargo::ops::transform_contract(compiled_contract, expression_width); + let optimized_program = nargo::ops::transform_program(compiled_program, expression_width); + let warnings = optimized_program.warnings.clone(); - let compile_output = generate_contract_artifact(optimized_contract); - Ok(JsCompileResult::new(compile_output)) - } else { - let compiled_program = compile_main(&mut context, crate_id, &compile_options, None) + Ok(JsCompileProgramResult::new(optimized_program.into(), warnings)) +} + +#[wasm_bindgen] +pub fn compile_contract( + entry_point: String, + dependency_graph: Option, + file_source_map: PathToFileSourceMap, +) -> Result { + console_error_panic_hook::set_once(); + let (crate_id, mut context) = prepare_context(entry_point, dependency_graph, file_source_map)?; + + let compile_options = CompileOptions::default(); + // For now we default to a bounded width of 3, though we can add it as a parameter + let expression_width = acvm::acir::circuit::ExpressionWidth::Bounded { width: 3 }; + + let compiled_contract = + noirc_driver::compile_contract(&mut context, crate_id, &compile_options) .map_err(|errs| { CompileError::with_file_diagnostics( - "Failed to compile program", + "Failed to compile contract", errs, &context.file_manager, ) })? .0; - let optimized_program = nargo::ops::transform_program(compiled_program, expression_width); + let optimized_contract = nargo::ops::transform_contract(compiled_contract, expression_width); - let compile_output = generate_program_artifact(optimized_program); - Ok(JsCompileResult::new(compile_output)) - } + let functions = + optimized_contract.functions.into_iter().map(ContractFunctionArtifact::from).collect(); + + let contract_artifact = ContractArtifact { + noir_version: String::from(NOIR_ARTIFACT_VERSION_STRING), + name: optimized_contract.name, + functions, + events: optimized_contract.events, + file_map: optimized_contract.file_map, + }; + + Ok(JsCompileContractResult::new(contract_artifact, optimized_contract.warnings)) +} + +fn prepare_context( + entry_point: String, + dependency_graph: Option, + file_source_map: PathToFileSourceMap, +) -> Result<(CrateId, Context<'static, 'static>), JsCompileError> { + let dependency_graph: DependencyGraph = if let Some(dependency_graph) = dependency_graph { + ::into_serde(&JsValue::from(dependency_graph)) + .map_err(|err| err.to_string())? + } else { + DependencyGraph { root_dependencies: vec![], library_dependencies: HashMap::new() } + }; + + let fm = file_manager_with_source_map(file_source_map); + let parsed_files = parse_all(&fm); + let mut context = Context::new(fm, parsed_files); + + let path = Path::new(&entry_point); + let crate_id = prepare_crate(&mut context, path); + + process_dependency_graph(&mut context, dependency_graph); + + Ok((crate_id, context)) } // Create a new FileManager with the given source map @@ -270,35 +306,15 @@ fn add_noir_lib(context: &mut Context, library_name: &CrateName) -> CrateId { prepare_dependency(context, &path_to_lib) } -pub(crate) fn generate_program_artifact(program: CompiledProgram) -> CompileResult { - let warnings = program.warnings.clone(); - CompileResult::Program { program: program.into(), warnings } -} - -pub(crate) fn generate_contract_artifact(contract: CompiledContract) -> CompileResult { - let functions = contract.functions.into_iter().map(ContractFunctionArtifact::from).collect(); - - let contract_artifact = ContractArtifact { - noir_version: String::from(NOIR_ARTIFACT_VERSION_STRING), - name: contract.name, - functions, - events: contract.events, - file_map: contract.file_map, - }; - - CompileResult::Contract { contract: contract_artifact, warnings: contract.warnings } -} - #[cfg(test)] mod test { + use nargo::parse_all; use noirc_driver::prepare_crate; use noirc_frontend::{graph::CrateName, hir::Context}; use crate::compile::PathToFileSourceMap; - use super::{ - file_manager_with_source_map, parse_all, process_dependency_graph, DependencyGraph, - }; + use super::{file_manager_with_source_map, process_dependency_graph, DependencyGraph}; use std::{collections::HashMap, path::Path}; fn setup_test_context(source_map: PathToFileSourceMap) -> Context<'static, 'static> { diff --git a/compiler/wasm/src/compile_new.rs b/compiler/wasm/src/compile_new.rs index 4616004ae2b..d6b382f669f 100644 --- a/compiler/wasm/src/compile_new.rs +++ b/compiler/wasm/src/compile_new.rs @@ -1,10 +1,13 @@ use crate::compile::{ - file_manager_with_source_map, generate_contract_artifact, generate_program_artifact, parse_all, - JsCompileResult, PathToFileSourceMap, + file_manager_with_source_map, JsCompileContractResult, JsCompileProgramResult, + PathToFileSourceMap, }; use crate::errors::{CompileError, JsCompileError}; +use nargo::artifacts::contract::{ContractArtifact, ContractFunctionArtifact}; +use nargo::parse_all; use noirc_driver::{ add_dep, compile_contract, compile_main, prepare_crate, prepare_dependency, CompileOptions, + NOIR_ARTIFACT_VERSION_STRING, }; use noirc_frontend::{ graph::{CrateId, CrateName}, @@ -92,9 +95,9 @@ impl CompilerContext { pub fn compile_program( mut self, program_width: usize, - ) -> Result { + ) -> Result { let compile_options = CompileOptions::default(); - let np_language = acvm::ExpressionWidth::Bounded { width: program_width }; + let np_language = acvm::acir::circuit::ExpressionWidth::Bounded { width: program_width }; let root_crate_id = *self.context.root_crate_id(); @@ -110,17 +113,17 @@ impl CompilerContext { .0; let optimized_program = nargo::ops::transform_program(compiled_program, np_language); + let warnings = optimized_program.warnings.clone(); - let compile_output = generate_program_artifact(optimized_program); - Ok(JsCompileResult::new(compile_output)) + Ok(JsCompileProgramResult::new(optimized_program.into(), warnings)) } pub fn compile_contract( mut self, program_width: usize, - ) -> Result { + ) -> Result { let compile_options = CompileOptions::default(); - let np_language = acvm::ExpressionWidth::Bounded { width: program_width }; + let np_language = acvm::acir::circuit::ExpressionWidth::Bounded { width: program_width }; let root_crate_id = *self.context.root_crate_id(); let compiled_contract = @@ -136,24 +139,64 @@ impl CompilerContext { let optimized_contract = nargo::ops::transform_contract(compiled_contract, np_language); - let compile_output = generate_contract_artifact(optimized_contract); - Ok(JsCompileResult::new(compile_output)) + let functions = + optimized_contract.functions.into_iter().map(ContractFunctionArtifact::from).collect(); + + let contract_artifact = ContractArtifact { + noir_version: String::from(NOIR_ARTIFACT_VERSION_STRING), + name: optimized_contract.name, + functions, + events: optimized_contract.events, + file_map: optimized_contract.file_map, + }; + + Ok(JsCompileContractResult::new(contract_artifact, optimized_contract.warnings)) } } /// This is a method that exposes the same API as `compile` /// But uses the Context based APi internally #[wasm_bindgen] -pub fn compile_( +pub fn compile_program_( entry_point: String, - contracts: Option, dependency_graph: Option, file_source_map: PathToFileSourceMap, -) -> Result { - use std::collections::HashMap; +) -> Result { + console_error_panic_hook::set_once(); + + let compiler_context = + prepare_compiler_context(entry_point, dependency_graph, file_source_map)?; + let program_width = 3; + compiler_context.compile_program(program_width) +} + +/// This is a method that exposes the same API as `compile` +/// But uses the Context based APi internally +#[wasm_bindgen] +pub fn compile_contract_( + entry_point: String, + dependency_graph: Option, + file_source_map: PathToFileSourceMap, +) -> Result { console_error_panic_hook::set_once(); + let compiler_context = + prepare_compiler_context(entry_point, dependency_graph, file_source_map)?; + let program_width = 3; + + compiler_context.compile_contract(program_width) +} + +/// This is a method that exposes the same API as `prepare_context` +/// But uses the Context based API internally +fn prepare_compiler_context( + entry_point: String, + dependency_graph: Option, + file_source_map: PathToFileSourceMap, +) -> Result { + use std::collections::HashMap; + let dependency_graph: crate::compile::DependencyGraph = if let Some(dependency_graph) = dependency_graph { ::into_serde( @@ -218,22 +261,16 @@ pub fn compile_( } } - let is_contract = contracts.unwrap_or(false); - let program_width = 3; - - if is_contract { - compiler_context.compile_contract(program_width) - } else { - compiler_context.compile_program(program_width) - } + Ok(compiler_context) } #[cfg(test)] mod test { + use nargo::parse_all; use noirc_driver::prepare_crate; use noirc_frontend::hir::Context; - use crate::compile::{file_manager_with_source_map, parse_all, PathToFileSourceMap}; + use crate::compile::{file_manager_with_source_map, PathToFileSourceMap}; use std::path::Path; diff --git a/compiler/wasm/src/index.cts b/compiler/wasm/src/index.cts index 14687e615df..234bfa7280c 100644 --- a/compiler/wasm/src/index.cts +++ b/compiler/wasm/src/index.cts @@ -2,21 +2,101 @@ import { FileManager } from './noir/file-manager/file-manager'; import { createNodejsFileManager } from './noir/file-manager/nodejs-file-manager'; import { NoirWasmCompiler } from './noir/noir-wasm-compiler'; import { LogData, LogFn } from './utils'; -import { CompilationResult } from './types/noir_artifact'; +import { ContractCompilationArtifacts, ProgramCompilationArtifacts } from './types/noir_artifact'; import { inflateDebugSymbols } from './noir/debug'; -async function compile( +/** + * Compiles a Noir project + * + * @param fileManager - The file manager to use + * @param projectPath - The path to the project inside the file manager. Defaults to the root of the file manager + * @param logFn - A logging function. If not provided, console.log will be used + * @param debugLogFn - A debug logging function. If not provided, logFn will be used + * + * @example + * ```typescript + * // Node.js + * + * import { compile_program, createFileManager } from '@noir-lang/noir_wasm'; + * + * const fm = createFileManager(myProjectPath); + * const myCompiledCode = await compile_program(fm); + * ``` + * + * ```typescript + * // Browser + * + * import { compile_program, createFileManager } from '@noir-lang/noir_wasm'; + * + * const fm = createFileManager('/'); + * for (const path of files) { + * await fm.writeFile(path, await getFileAsStream(path)); + * } + * const myCompiledCode = await compile_program(fm); + * ``` + */ +async function compile_program( fileManager: FileManager, projectPath?: string, logFn?: LogFn, debugLogFn?: LogFn, -): Promise { +): Promise { + const compiler = await setup_compiler(fileManager, projectPath, logFn, debugLogFn); + return await compiler.compile_program(); +} + +/** + * Compiles a Noir project + * + * @param fileManager - The file manager to use + * @param projectPath - The path to the project inside the file manager. Defaults to the root of the file manager + * @param logFn - A logging function. If not provided, console.log will be used + * @param debugLogFn - A debug logging function. If not provided, logFn will be used + * + * @example + * ```typescript + * // Node.js + * + * import { compile_contract, createFileManager } from '@noir-lang/noir_wasm'; + * + * const fm = createFileManager(myProjectPath); + * const myCompiledCode = await compile_contract(fm); + * ``` + * + * ```typescript + * // Browser + * + * import { compile_contract, createFileManager } from '@noir-lang/noir_wasm'; + * + * const fm = createFileManager('/'); + * for (const path of files) { + * await fm.writeFile(path, await getFileAsStream(path)); + * } + * const myCompiledCode = await compile_contract(fm); + * ``` + */ +async function compile_contract( + fileManager: FileManager, + projectPath?: string, + logFn?: LogFn, + debugLogFn?: LogFn, +): Promise { + const compiler = await setup_compiler(fileManager, projectPath, logFn, debugLogFn); + return await compiler.compile_contract(); +} + +async function setup_compiler( + fileManager: FileManager, + projectPath?: string, + logFn?: LogFn, + debugLogFn?: LogFn, +): Promise { if (logFn && !debugLogFn) { debugLogFn = logFn; } const cjs = await require('../build/cjs'); - const compiler = await NoirWasmCompiler.new( + return await NoirWasmCompiler.new( fileManager, projectPath ?? fileManager.getDataDir(), cjs, @@ -42,9 +122,16 @@ async function compile( }, }, ); - return await compiler.compile(); } const createFileManager = createNodejsFileManager; -export { compile, createFileManager, inflateDebugSymbols, CompilationResult }; +export { + compile_program as compile, + compile_program, + compile_contract, + createFileManager, + inflateDebugSymbols, + ProgramCompilationArtifacts, + ContractCompilationArtifacts, +}; diff --git a/compiler/wasm/src/index.mts b/compiler/wasm/src/index.mts index 8774a7857ef..326a7337117 100644 --- a/compiler/wasm/src/index.mts +++ b/compiler/wasm/src/index.mts @@ -2,15 +2,95 @@ import { FileManager } from './noir/file-manager/file-manager'; import { createNodejsFileManager } from './noir/file-manager/nodejs-file-manager'; import { NoirWasmCompiler } from './noir/noir-wasm-compiler'; import { LogData, LogFn } from './utils'; -import { CompilationResult } from './types/noir_artifact'; +import { ContractCompilationArtifacts, ProgramCompilationArtifacts } from './types/noir_artifact'; import { inflateDebugSymbols } from './noir/debug'; -async function compile( +/** + * Compiles a Noir project + * + * @param fileManager - The file manager to use + * @param projectPath - The path to the project inside the file manager. Defaults to the root of the file manager + * @param logFn - A logging function. If not provided, console.log will be used + * @param debugLogFn - A debug logging function. If not provided, logFn will be used + * + * @example + * ```typescript + * // Node.js + * + * import { compile_program, createFileManager } from '@noir-lang/noir_wasm'; + * + * const fm = createFileManager(myProjectPath); + * const myCompiledCode = await compile_program(fm); + * ``` + * + * ```typescript + * // Browser + * + * import { compile_program, createFileManager } from '@noir-lang/noir_wasm'; + * + * const fm = createFileManager('/'); + * for (const path of files) { + * await fm.writeFile(path, await getFileAsStream(path)); + * } + * const myCompiledCode = await compile_program(fm); + * ``` + */ +async function compile_program( fileManager: FileManager, projectPath?: string, logFn?: LogFn, debugLogFn?: LogFn, -): Promise { +): Promise { + const compiler = await setup_compiler(fileManager, projectPath, logFn, debugLogFn); + return await compiler.compile_program(); +} + +/** + * Compiles a Noir project + * + * @param fileManager - The file manager to use + * @param projectPath - The path to the project inside the file manager. Defaults to the root of the file manager + * @param logFn - A logging function. If not provided, console.log will be used + * @param debugLogFn - A debug logging function. If not provided, logFn will be used + * + * @example + * ```typescript + * // Node.js + * + * import { compile_contract, createFileManager } from '@noir-lang/noir_wasm'; + * + * const fm = createFileManager(myProjectPath); + * const myCompiledCode = await compile_contract(fm); + * ``` + * + * ```typescript + * // Browser + * + * import { compile_contract, createFileManager } from '@noir-lang/noir_wasm'; + * + * const fm = createFileManager('/'); + * for (const path of files) { + * await fm.writeFile(path, await getFileAsStream(path)); + * } + * const myCompiledCode = await compile_contract(fm); + * ``` + */ +async function compile_contract( + fileManager: FileManager, + projectPath?: string, + logFn?: LogFn, + debugLogFn?: LogFn, +): Promise { + const compiler = await setup_compiler(fileManager, projectPath, logFn, debugLogFn); + return await compiler.compile_contract(); +} + +async function setup_compiler( + fileManager: FileManager, + projectPath?: string, + logFn?: LogFn, + debugLogFn?: LogFn, +): Promise { if (logFn && !debugLogFn) { debugLogFn = logFn; } @@ -18,7 +98,7 @@ async function compile( const esm = await import(/* webpackMode: "eager" */ '../build/esm'); await esm.default(); - const compiler = await NoirWasmCompiler.new( + return await NoirWasmCompiler.new( fileManager, projectPath ?? fileManager.getDataDir(), esm, @@ -44,9 +124,16 @@ async function compile( }, }, ); - return await compiler.compile(); } const createFileManager = createNodejsFileManager; -export { compile, createFileManager, inflateDebugSymbols, CompilationResult }; +export { + compile_program as compile, + compile_program, + compile_contract, + createFileManager, + inflateDebugSymbols, + ProgramCompilationArtifacts, + ContractCompilationArtifacts, +}; diff --git a/compiler/wasm/src/lib.rs b/compiler/wasm/src/lib.rs index 6d737a0ea6d..6753faf2009 100644 --- a/compiler/wasm/src/lib.rs +++ b/compiler/wasm/src/lib.rs @@ -18,10 +18,10 @@ mod compile; mod compile_new; mod errors; -pub use compile::compile; +pub use compile::{compile_contract, compile_program}; // Expose the new Context-Centric API -pub use compile_new::{compile_, CompilerContext, CrateIDWrapper}; +pub use compile_new::{compile_contract_, compile_program_, CompilerContext, CrateIDWrapper}; use wasm_bindgen::{prelude::wasm_bindgen, JsValue}; #[derive(Serialize, Deserialize)] @@ -32,12 +32,12 @@ pub struct BuildInfo { } #[wasm_bindgen] -pub fn init_log_level(filter: String) { +pub fn init_log_level(level: String) { // Set the static variable from Rust use std::sync::Once; - let filter: EnvFilter = - filter.parse().expect("Could not parse log filter while initializing logger"); + let level_filter: EnvFilter = + level.parse().expect("Could not parse log filter while initializing logger"); static SET_HOOK: Once = Once::new(); SET_HOOK.call_once(|| { @@ -46,7 +46,7 @@ pub fn init_log_level(filter: String) { .without_time() .with_writer(MakeWebConsoleWriter::new()); - tracing_subscriber::registry().with(fmt_layer.with_filter(filter)).init(); + tracing_subscriber::registry().with(fmt_layer.with_filter(level_filter)).init(); }); } diff --git a/compiler/wasm/src/noir/debug.ts b/compiler/wasm/src/noir/debug.ts index 7a65f4b68c2..1a4ccfe95ec 100644 --- a/compiler/wasm/src/noir/debug.ts +++ b/compiler/wasm/src/noir/debug.ts @@ -1,6 +1,9 @@ import { inflate } from 'pako'; -/** Decompresses and decodes the debug symbols */ +/** + * Decompresses and decodes the debug symbols + * @param debugSymbols - The base64 encoded debug symbols + */ export function inflateDebugSymbols(debugSymbols: string) { return JSON.parse(inflate(Buffer.from(debugSymbols, 'base64'), { to: 'string', raw: true })); } diff --git a/compiler/wasm/src/noir/file-manager/nodejs-file-manager.ts b/compiler/wasm/src/noir/file-manager/nodejs-file-manager.ts index 1a8250f49cc..195eea8a70d 100644 --- a/compiler/wasm/src/noir/file-manager/nodejs-file-manager.ts +++ b/compiler/wasm/src/noir/file-manager/nodejs-file-manager.ts @@ -18,8 +18,9 @@ export async function readdirRecursive(dir: string): Promise { } /** - * Creates a new FileManager instance based on nodejs fs - * @param dataDir - where to store files + * Creates a new FileManager instance based on fs in node and memfs in the browser (via webpack alias) + * + * @param dataDir - root of the file system */ export function createNodejsFileManager(dataDir: string): FileManager { return new FileManager( diff --git a/compiler/wasm/src/noir/noir-wasm-compiler.ts b/compiler/wasm/src/noir/noir-wasm-compiler.ts index 2a0af5d8fee..1ec3af1fd65 100644 --- a/compiler/wasm/src/noir/noir-wasm-compiler.ts +++ b/compiler/wasm/src/noir/noir-wasm-compiler.ts @@ -6,7 +6,7 @@ import { LocalDependencyResolver } from './dependencies/local-dependency-resolve import { FileManager } from './file-manager/file-manager'; import { Package } from './package'; import { LogFn } from '../utils'; -import { CompilationResult } from '../types/noir_artifact'; +import { ContractCompilationArtifacts, ProgramCompilationArtifacts } from '../types/noir_artifact'; /** Compilation options */ export type NoirWasmCompileOptions = { @@ -84,21 +84,64 @@ export class NoirWasmCompiler { /** * Compile EntryPoint */ + public async compile_program(): Promise { + console.log(`Compiling at ${this.#package.getEntryPointPath()}`); + + if (this.#package.getType() !== 'bin') { + throw new Error(`Expected to find package type "bin" but found ${this.#package.getType()}`); + } + await this.#dependencyManager.resolveDependencies(); + this.#debugLog(`Dependencies: ${this.#dependencyManager.getPackageNames().join(', ')}`); + + try { + const entrypoint = this.#package.getEntryPointPath(); + const deps = { + /* eslint-disable camelcase */ + root_dependencies: this.#dependencyManager.getEntrypointDependencies(), + library_dependencies: this.#dependencyManager.getLibraryDependencies(), + /* eslint-enable camelcase */ + }; + const packageSources = await this.#package.getSources(this.#fm); + const librarySources = ( + await Promise.all( + this.#dependencyManager + .getLibraries() + .map(async ([alias, library]) => await library.package.getSources(this.#fm, alias)), + ) + ).flat(); + [...packageSources, ...librarySources].forEach((sourceFile) => { + this.#debugLog(`Adding source ${sourceFile.path}`); + this.#sourceMap.add_source_code(sourceFile.path, sourceFile.source); + }); + const result = this.#wasmCompiler.compile_program(entrypoint, deps, this.#sourceMap); + + return result; + } catch (err) { + if (err instanceof Error && err.name === 'CompileError') { + const logs = await this.#processCompileError(err); + for (const log of logs) { + this.#log(log); + } + throw new Error(logs.join('\n')); + } + + throw err; + } + } + /** * Compile EntryPoint */ - public async compile(): Promise { + public async compile_contract(): Promise { console.log(`Compiling at ${this.#package.getEntryPointPath()}`); - if (!(this.#package.getType() === 'contract' || this.#package.getType() === 'bin')) { - throw new Error(`Only supports compiling "contract" and "bin" package types (${this.#package.getType()})`); + if (this.#package.getType() !== 'contract') { + throw new Error(`Expected to find package type "contract" but found ${this.#package.getType()}`); } await this.#dependencyManager.resolveDependencies(); this.#debugLog(`Dependencies: ${this.#dependencyManager.getPackageNames().join(', ')}`); try { - const isContract: boolean = this.#package.getType() === 'contract'; - const entrypoint = this.#package.getEntryPointPath(); const deps = { /* eslint-disable camelcase */ @@ -118,11 +161,7 @@ export class NoirWasmCompiler { this.#debugLog(`Adding source ${sourceFile.path}`); this.#sourceMap.add_source_code(sourceFile.path, sourceFile.source); }); - const result = this.#wasmCompiler.compile(entrypoint, isContract, deps, this.#sourceMap); - - if ((isContract && !('contract' in result)) || (!isContract && !('program' in result))) { - throw new Error('Invalid compilation result'); - } + const result = this.#wasmCompiler.compile_contract(entrypoint, deps, this.#sourceMap); return result; } catch (err) { diff --git a/compiler/wasm/src/noir/package.ts b/compiler/wasm/src/noir/package.ts index a2496a03b3a..81178e6ae96 100644 --- a/compiler/wasm/src/noir/package.ts +++ b/compiler/wasm/src/noir/package.ts @@ -91,7 +91,7 @@ export class Package { * Gets this package's dependencies. */ public getDependencies(): Record { - return this.#config.dependencies; + return this.#config.dependencies ?? {}; } /** diff --git a/compiler/wasm/src/types/noir_artifact.ts b/compiler/wasm/src/types/noir_artifact.ts index 715877e335f..832a6ed9bf9 100644 --- a/compiler/wasm/src/types/noir_artifact.ts +++ b/compiler/wasm/src/types/noir_artifact.ts @@ -1,4 +1,4 @@ -import { Abi, AbiType } from '@noir-lang/noirc_abi'; +import { Abi, AbiType } from '@noir-lang/types'; /** * A named type. @@ -73,6 +73,8 @@ export interface ContractArtifact { * The compilation result of an Noir contract. */ export interface ProgramArtifact { + /** Version of noir used for the build. */ + noir_version: string; /** The hash of the circuit. */ hash?: number; /** * The ABI of the function. */ @@ -178,22 +180,3 @@ export interface ProgramCompilationArtifacts { /** Compilation warnings. */ warnings: Warning[]; } - -/** - * output of Noir Wasm compilation, can be for a contract or lib/binary - */ -export type CompilationResult = ContractCompilationArtifacts | ProgramCompilationArtifacts; - -/** - * Check if it has Contract unique property - */ -export function isContractCompilationArtifacts(artifact: CompilationResult): artifact is ContractCompilationArtifacts { - return (artifact as ContractCompilationArtifacts).contract !== undefined; -} - -/** - * Check if it has Contract unique property - */ -export function isProgramCompilationArtifacts(artifact: CompilationResult): artifact is ProgramCompilationArtifacts { - return (artifact as ProgramCompilationArtifacts).program !== undefined; -} diff --git a/compiler/wasm/src/types/noir_package_config.ts b/compiler/wasm/src/types/noir_package_config.ts index 5f07c380cf3..0203763039a 100644 --- a/compiler/wasm/src/types/noir_package_config.ts +++ b/compiler/wasm/src/types/noir_package_config.ts @@ -20,7 +20,7 @@ type NoirPackageConfigSchema = { backend?: string; license?: string; }; - dependencies: Record; + dependencies?: Record; }; /** diff --git a/compiler/wasm/test/compiler/browser/compile.test.ts b/compiler/wasm/test/compiler/browser/compile.test.ts new file mode 100644 index 00000000000..7d4b3da55aa --- /dev/null +++ b/compiler/wasm/test/compiler/browser/compile.test.ts @@ -0,0 +1,79 @@ +/* eslint-disable @typescript-eslint/ban-ts-comment */ +import { getPaths } from '../../shared'; +import { expect } from '@esm-bundle/chai'; +import { compile_program, compile_contract, createFileManager } from '@noir-lang/noir_wasm'; +import { ContractArtifact, ProgramArtifact } from '../../../src/types/noir_artifact'; +import { shouldCompileContractIdentically, shouldCompileProgramIdentically } from '../shared/compile.test'; + +const paths = getPaths('.'); + +async function getFile(path: string) { + // @ts-ignore + const basePath = new URL('./../../', import.meta.url).toString().replace(/\/$/g, ''); + const url = `${basePath}${path.replace('.', '')}`; + const response = await fetch(url); + return response; +} + +// eslint-disable-next-line @typescript-eslint/no-explicit-any +async function getPrecompiledSource(path: string): Promise { + const response = await getFile(path); + const compiledData = await response.text(); + return JSON.parse(compiledData); +} + +describe('noir-compiler/browser', () => { + shouldCompileProgramIdentically( + async () => { + const { simpleScriptExpectedArtifact } = paths; + const fm = createFileManager('/'); + const files = Object.values(paths).filter((fileOrDir) => /^\.?\/.*\..*$/.test(fileOrDir)); + for (const path of files) { + console.log(path); + await fm.writeFile(path, (await getFile(path)).body as ReadableStream); + } + const nargoArtifact = (await getPrecompiledSource(simpleScriptExpectedArtifact)) as ProgramArtifact; + const noirWasmArtifact = await compile_program(fm, '/fixtures/simple'); + + return { nargoArtifact, noirWasmArtifact }; + }, + expect, + 60 * 20e3, + ); + + shouldCompileProgramIdentically( + async () => { + const { depsScriptExpectedArtifact } = paths; + const fm = createFileManager('/'); + const files = Object.values(paths).filter((fileOrDir) => /^\.?\/.*\..*$/.test(fileOrDir)); + for (const path of files) { + console.log(path); + await fm.writeFile(path, (await getFile(path)).body as ReadableStream); + } + const nargoArtifact = (await getPrecompiledSource(depsScriptExpectedArtifact)) as ProgramArtifact; + const noirWasmArtifact = await compile_program(fm, '/fixtures/with-deps'); + + return { nargoArtifact, noirWasmArtifact }; + }, + expect, + 60 * 20e3, + ); + + shouldCompileContractIdentically( + async () => { + const { contractExpectedArtifact } = paths; + const fm = createFileManager('/'); + const files = Object.values(paths).filter((fileOrDir) => /^\.?\/.*\..*$/.test(fileOrDir)); + for (const path of files) { + console.log(path); + await fm.writeFile(path, (await getFile(path)).body as ReadableStream); + } + const nargoArtifact = (await getPrecompiledSource(contractExpectedArtifact)) as ContractArtifact; + const noirWasmArtifact = await compile_contract(fm, '/fixtures/noir-contract'); + + return { nargoArtifact, noirWasmArtifact }; + }, + expect, + 60 * 20e3, + ); +}); diff --git a/compiler/wasm/test/compiler/browser/compile_with_deps.test.ts b/compiler/wasm/test/compiler/browser/compile_with_deps.test.ts deleted file mode 100644 index 0d1e22e288f..00000000000 --- a/compiler/wasm/test/compiler/browser/compile_with_deps.test.ts +++ /dev/null @@ -1,43 +0,0 @@ -/* eslint-disable @typescript-eslint/ban-ts-comment */ -import { getPaths } from '../../shared'; -import { expect } from '@esm-bundle/chai'; -import { compile, createFileManager } from '@noir-lang/noir_wasm'; -import { ContractArtifact } from '../../../src/types/noir_artifact'; -import { shouldCompileIdentically } from '../shared/compile_with_deps.test'; - -const paths = getPaths('.'); - -async function getFile(path: string) { - // @ts-ignore - const basePath = new URL('./../../', import.meta.url).toString().replace(/\/$/g, ''); - const url = `${basePath}${path.replace('.', '')}`; - const response = await fetch(url); - return response; -} - -// eslint-disable-next-line @typescript-eslint/no-explicit-any -async function getPrecompiledSource(path: string): Promise { - const response = await getFile(path); - const compiledData = await response.text(); - return JSON.parse(compiledData); -} - -describe('noir-compiler/browser', () => { - shouldCompileIdentically( - async () => { - const { contractExpectedArtifact } = paths; - const fm = createFileManager('/'); - const files = Object.values(paths).filter((fileOrDir) => /^\.?\/.*\..*$/.test(fileOrDir)); - for (const path of files) { - console.log(path); - await fm.writeFile(path, (await getFile(path)).body as ReadableStream); - } - const nargoArtifact = (await getPrecompiledSource(contractExpectedArtifact)) as ContractArtifact; - const noirWasmArtifact = await compile(fm, '/fixtures/noir-contract'); - - return { nargoArtifact, noirWasmArtifact }; - }, - expect, - 60 * 20e3, - ); -}); diff --git a/compiler/wasm/test/compiler/node/compile.test.ts b/compiler/wasm/test/compiler/node/compile.test.ts new file mode 100644 index 00000000000..811dc95ce16 --- /dev/null +++ b/compiler/wasm/test/compiler/node/compile.test.ts @@ -0,0 +1,39 @@ +import { join, resolve } from 'path'; +import { getPaths } from '../../shared'; + +import { expect } from 'chai'; +import { compile_program, compile_contract, createFileManager } from '@noir-lang/noir_wasm'; +import { readFile } from 'fs/promises'; +import { ContractArtifact, ProgramArtifact } from '../../../src/types/noir_artifact'; +import { shouldCompileContractIdentically, shouldCompileProgramIdentically } from '../shared/compile.test'; + +const basePath = resolve(join(__dirname, '../../')); + +describe('noir-compiler/node', () => { + shouldCompileProgramIdentically(async () => { + const { simpleScriptProjectPath, simpleScriptExpectedArtifact } = getPaths(basePath); + + const fm = createFileManager(simpleScriptProjectPath); + const nargoArtifact = JSON.parse((await readFile(simpleScriptExpectedArtifact)).toString()) as ProgramArtifact; + const noirWasmArtifact = await compile_program(fm); + return { nargoArtifact, noirWasmArtifact }; + }, expect); + + shouldCompileProgramIdentically(async () => { + const { depsScriptProjectPath, depsScriptExpectedArtifact } = getPaths(basePath); + + const fm = createFileManager(depsScriptProjectPath); + const nargoArtifact = JSON.parse((await readFile(depsScriptExpectedArtifact)).toString()) as ProgramArtifact; + const noirWasmArtifact = await compile_program(fm); + return { nargoArtifact, noirWasmArtifact }; + }, expect); + + shouldCompileContractIdentically(async () => { + const { contractProjectPath, contractExpectedArtifact } = getPaths(basePath); + + const fm = createFileManager(contractProjectPath); + const nargoArtifact = JSON.parse((await readFile(contractExpectedArtifact)).toString()) as ContractArtifact; + const noirWasmArtifact = await compile_contract(fm); + return { nargoArtifact, noirWasmArtifact }; + }, expect); +}); diff --git a/compiler/wasm/test/compiler/node/compile_with_deps.test.ts b/compiler/wasm/test/compiler/node/compile_with_deps.test.ts deleted file mode 100644 index 2a402dc9d02..00000000000 --- a/compiler/wasm/test/compiler/node/compile_with_deps.test.ts +++ /dev/null @@ -1,20 +0,0 @@ -import { join, resolve } from 'path'; -import { getPaths } from '../../shared'; - -import { expect } from 'chai'; -import { compile, createFileManager } from '@noir-lang/noir_wasm'; -import { readFile } from 'fs/promises'; -import { ContractArtifact } from '../../../src/types/noir_artifact'; -import { shouldCompileIdentically } from '../shared/compile_with_deps.test'; - -const basePath = resolve(join(__dirname, '../../')); -const { contractProjectPath, contractExpectedArtifact } = getPaths(basePath); - -describe('noir-compiler/node', () => { - shouldCompileIdentically(async () => { - const fm = createFileManager(contractProjectPath); - const nargoArtifact = JSON.parse((await readFile(contractExpectedArtifact)).toString()) as ContractArtifact; - const noirWasmArtifact = await compile(fm); - return { nargoArtifact, noirWasmArtifact }; - }, expect); -}); diff --git a/compiler/wasm/test/compiler/shared/compile_with_deps.test.ts b/compiler/wasm/test/compiler/shared/compile.test.ts similarity index 51% rename from compiler/wasm/test/compiler/shared/compile_with_deps.test.ts rename to compiler/wasm/test/compiler/shared/compile.test.ts index 0960cba0665..52cef14968b 100644 --- a/compiler/wasm/test/compiler/shared/compile_with_deps.test.ts +++ b/compiler/wasm/test/compiler/shared/compile.test.ts @@ -1,4 +1,4 @@ -import { CompilationResult, inflateDebugSymbols } from '@noir-lang/noir_wasm'; +import { inflateDebugSymbols } from '@noir-lang/noir_wasm'; import { type expect as Expect } from 'chai'; import { ContractArtifact, @@ -6,10 +6,12 @@ import { DebugFileMap, DebugInfo, NoirFunctionEntry, + ProgramArtifact, + ProgramCompilationArtifacts, } from '../../../src/types/noir_artifact'; -export function shouldCompileIdentically( - compileFn: () => Promise<{ nargoArtifact: ContractArtifact; noirWasmArtifact: CompilationResult }>, +export function shouldCompileProgramIdentically( + compileFn: () => Promise<{ nargoArtifact: ProgramArtifact; noirWasmArtifact: ProgramCompilationArtifacts }>, expect: typeof Expect, timeout = 5000, ) { @@ -18,13 +20,49 @@ export function shouldCompileIdentically( const { nargoArtifact, noirWasmArtifact } = await compileFn(); // Prepare nargo artifact - const [nargoDebugInfos, nargoFileMap] = deleteDebugMetadata(nargoArtifact); + const [_nargoDebugInfos, nargoFileMap] = deleteProgramDebugMetadata(nargoArtifact); normalizeVersion(nargoArtifact); // Prepare noir-wasm artifact - const noirWasmContract = (noirWasmArtifact as ContractCompilationArtifacts).contract; + const noirWasmProgram = noirWasmArtifact.program; + expect(noirWasmProgram).not.to.be.undefined; + const [_noirWasmDebugInfos, norWasmFileMap] = deleteProgramDebugMetadata(noirWasmProgram); + normalizeVersion(noirWasmProgram); + + // We first compare both contracts without considering debug info + delete (noirWasmProgram as Partial).hash; + delete (nargoArtifact as Partial).hash; + expect(nargoArtifact).to.deep.eq(noirWasmProgram); + + // Compare the file maps, ignoring keys, since those depend in the order in which files are visited, + // which may change depending on the file manager implementation. Also ignores paths, since the base + // path is reported differently between nargo and noir-wasm. + expect(getSources(nargoFileMap)).to.have.members(getSources(norWasmFileMap)); + + // Compare the debug symbol information, ignoring the actual ids used for file identifiers. + // Debug symbol info looks like the following, what we need is to ignore the 'file' identifiers + // {"locations":{"0":[{"span":{"start":141,"end":156},"file":39},{"span":{"start":38,"end":76},"file":38},{"span":{"start":824,"end":862},"file":23}]}} + // expect(nargoDebugInfos).to.deep.eq(noirWasmDebugInfos); + }).timeout(timeout); +} + +export function shouldCompileContractIdentically( + compileFn: () => Promise<{ nargoArtifact: ContractArtifact; noirWasmArtifact: ContractCompilationArtifacts }>, + expect: typeof Expect, + timeout = 5000, +) { + it('both nargo and noir_wasm should compile identically', async () => { + // Compile! + const { nargoArtifact, noirWasmArtifact } = await compileFn(); + + // Prepare nargo artifact + const [nargoDebugInfos, nargoFileMap] = deleteContractDebugMetadata(nargoArtifact); + normalizeVersion(nargoArtifact); + + // Prepare noir-wasm artifact + const noirWasmContract = noirWasmArtifact.contract; expect(noirWasmContract).not.to.be.undefined; - const [noirWasmDebugInfos, norWasmFileMap] = deleteDebugMetadata(noirWasmContract); + const [noirWasmDebugInfos, norWasmFileMap] = deleteContractDebugMetadata(noirWasmContract); normalizeVersion(noirWasmContract); // We first compare both contracts without considering debug info @@ -43,7 +81,7 @@ export function shouldCompileIdentically( } /** Remove commit identifier from version, which may not match depending on cached nargo and noir-wasm */ -function normalizeVersion(contract: ContractArtifact) { +function normalizeVersion(contract: ProgramArtifact | ContractArtifact) { contract.noir_version = contract.noir_version.replace(/\+.+$/, ''); } @@ -57,8 +95,18 @@ function extractDebugInfos(fns: NoirFunctionEntry[]) { }); } +/** Deletes all debug info from a program and returns it. */ +function deleteProgramDebugMetadata(program: ProgramArtifact) { + const debugSymbols = inflateDebugSymbols(program.debug_symbols); + const fileMap = program.file_map; + + delete (program as Partial).debug_symbols; + delete (program as Partial).file_map; + return [debugSymbols, fileMap]; +} + /** Deletes all debug info from a contract and returns it. */ -function deleteDebugMetadata(contract: ContractArtifact) { +function deleteContractDebugMetadata(contract: ContractArtifact) { contract.functions.sort((a, b) => a.name.localeCompare(b.name)); const fileMap = contract.file_map; delete (contract as Partial).file_map; diff --git a/compiler/wasm/test/shared.ts b/compiler/wasm/test/shared.ts index 9181919ff39..9f4d417a614 100644 --- a/compiler/wasm/test/shared.ts +++ b/compiler/wasm/test/shared.ts @@ -1,14 +1,23 @@ export function getPaths(basePath: string) { const fixtures = `${basePath}/fixtures`; - const simpleScriptSourcePath = `${fixtures}/simple/src/main.nr`; - const simpleScriptExpectedArtifact = `${fixtures}/simple/target/noir_wasm_testing.json`; + const simpleScriptProjectPath = `${fixtures}/simple`; + const simpleScriptSourcePath = `${simpleScriptProjectPath}/src/main.nr`; + const simpleScriptTOMLPath = `${simpleScriptProjectPath}/Nargo.toml`; + const simpleScriptExpectedArtifact = `${simpleScriptProjectPath}/target/noir_wasm_testing.json`; - const depsScriptSourcePath = `${fixtures}/with-deps/src/main.nr`; - const depsScriptExpectedArtifact = `${fixtures}/with-deps/target/noir_wasm_testing.json`; + const depsScriptProjectPath = `${fixtures}/with-deps`; + const depsScriptSourcePath = `${depsScriptProjectPath}/src/main.nr`; + const depsScriptTOMLPath = `${depsScriptProjectPath}/Nargo.toml`; + const depsScriptExpectedArtifact = `${depsScriptProjectPath}/target/noir_wasm_testing.json`; - const libASourcePath = `${fixtures}/deps/lib-a/src/lib.nr`; - const libBSourcePath = `${fixtures}/deps/lib-b/src/lib.nr`; + const libAProjectPath = `${fixtures}/deps/lib-a`; + const libASourcePath = `${libAProjectPath}/src/lib.nr`; + const libATOMLPath = `${libAProjectPath}/Nargo.toml`; + + const libBProjectPath = `${fixtures}/deps/lib-b`; + const libBSourcePath = `${libBProjectPath}/src/lib.nr`; + const libBTOMLPath = `${libBProjectPath}/Nargo.toml`; const contractProjectPath = `${fixtures}/noir-contract`; const contractSourcePath = `${contractProjectPath}/src/main.nr`; @@ -22,12 +31,18 @@ export function getPaths(basePath: string) { const libCTOMLPath = `${libCProjectPath}/Nargo.toml`; return { + simpleScriptProjectPath, simpleScriptSourcePath, + simpleScriptTOMLPath, simpleScriptExpectedArtifact, + depsScriptProjectPath, depsScriptSourcePath, + depsScriptTOMLPath, depsScriptExpectedArtifact, libASourcePath, + libATOMLPath, libBSourcePath, + libBTOMLPath, contractProjectPath, contractSourcePath, contractTOMLPath, diff --git a/compiler/wasm/test/wasm/browser/index.test.ts b/compiler/wasm/test/wasm/browser/index.test.ts index 3122fa57945..b59b4ae417a 100644 --- a/compiler/wasm/test/wasm/browser/index.test.ts +++ b/compiler/wasm/test/wasm/browser/index.test.ts @@ -2,7 +2,7 @@ import { getPaths } from '../../shared'; import { expect } from '@esm-bundle/chai'; -import init, { compile, PathToFileSourceMap, compile_, CompilerContext } from '../../../build/esm'; +import init, { compile_program, PathToFileSourceMap, compile_program_, CompilerContext } from '../../../build/esm'; // @ts-ignore await init(); @@ -35,7 +35,7 @@ describe('noir wasm compilation', () => { it('matching nargos compilation', async () => { const sourceMap = new PathToFileSourceMap(); sourceMap.add_source_code('script/main.nr', await getFileAsString(simpleScriptSourcePath)); - const wasmCircuit = compile('script/main.nr', undefined, undefined, sourceMap); + const wasmCircuit = compile_program('script/main.nr', undefined, sourceMap); const cliCircuit = await getPrecompiledSource(simpleScriptExpectedArtifact); if (!('program' in wasmCircuit)) { @@ -58,9 +58,8 @@ describe('noir wasm compilation', () => { }); it('matching nargos compilation', async () => { - const wasmCircuit = compile( + const wasmCircuit = compile_program( 'script/main.nr', - false, { root_dependencies: ['lib_a'], library_dependencies: { @@ -132,9 +131,8 @@ describe('noir wasm compilation', () => { }).timeout(60 * 20e3); it('matching nargos compilation - context-implementation-compile-api', async () => { - const wasmCircuit = await compile_( + const wasmCircuit = await compile_program_( 'script/main.nr', - false, { root_dependencies: ['lib_a'], library_dependencies: { diff --git a/compiler/wasm/test/wasm/node/index.test.ts b/compiler/wasm/test/wasm/node/index.test.ts index c73ce7477e5..23c87cc059a 100644 --- a/compiler/wasm/test/wasm/node/index.test.ts +++ b/compiler/wasm/test/wasm/node/index.test.ts @@ -3,7 +3,7 @@ import { readFileSync } from 'fs'; import { join, resolve } from 'path'; import { expect } from 'chai'; -import { compile, PathToFileSourceMap, compile_, CompilerContext } from '../../../build/cjs'; +import { compile_program, PathToFileSourceMap, compile_program_, CompilerContext } from '../../../build/cjs'; const basePath = resolve(join(__dirname, '../../')); const { @@ -26,7 +26,7 @@ describe('noir wasm compilation', () => { it('matching nargos compilation', async () => { const sourceMap = new PathToFileSourceMap(); sourceMap.add_source_code(simpleScriptSourcePath, readFileSync(simpleScriptSourcePath, 'utf-8')); - const wasmCircuit = compile(simpleScriptSourcePath, undefined, undefined, sourceMap); + const wasmCircuit = compile_program(simpleScriptSourcePath, undefined, sourceMap); const cliCircuit = await getPrecompiledSource(simpleScriptExpectedArtifact); if (!('program' in wasmCircuit)) { @@ -49,9 +49,8 @@ describe('noir wasm compilation', () => { }); it('matching nargos compilation', async () => { - const wasmCircuit = compile( + const wasmCircuit = compile_program( 'script/main.nr', - false, { root_dependencies: ['lib_a'], library_dependencies: { @@ -123,9 +122,8 @@ describe('noir wasm compilation', () => { }).timeout(60 * 20e3); it('matching nargos compilation - context-implementation-compile-api', async () => { - const wasmCircuit = await compile_( + const wasmCircuit = await compile_program_( 'script/main.nr', - false, { root_dependencies: ['lib_a'], library_dependencies: { diff --git a/compiler/wasm/tsconfig.json b/compiler/wasm/tsconfig.json index 6096b419d78..d2ae58b8fc9 100644 --- a/compiler/wasm/tsconfig.json +++ b/compiler/wasm/tsconfig.json @@ -16,5 +16,6 @@ "module": "CommonJS", "moduleResolution": "Node", "allowJs": true, - } + }, + "exclude": ["node_modules"] } \ No newline at end of file diff --git a/compiler/wasm/webpack.config.ts b/compiler/wasm/webpack.config.ts index d5d70df2b8a..456c5d82dca 100644 --- a/compiler/wasm/webpack.config.ts +++ b/compiler/wasm/webpack.config.ts @@ -1,6 +1,6 @@ import { resolve, join } from 'path'; import webpack from 'webpack'; -import 'webpack-dev-server'; +import type { Configuration as DevServerConfiguration } from 'webpack-dev-server'; import WasmPackPlugin from '@wasm-tool/wasm-pack-plugin'; import HtmlWebpackPlugin from 'html-webpack-plugin'; import CopyWebpackPlugin from 'copy-webpack-plugin'; @@ -25,6 +25,10 @@ const config: webpack.Configuration = { }, }; +const devServerConfig: DevServerConfiguration = { + static: join(__dirname, 'dist'), +}; + const webConfig: webpack.Configuration = { name: 'web', entry: './src/index.mts', @@ -74,9 +78,7 @@ const webConfig: webpack.Configuration = { }, ], }, - devServer: { - static: join(__dirname, 'dist'), - }, + devServer: devServerConfig, resolve: { ...config.resolve, alias: { diff --git a/cspell.json b/cspell.json index 12b1e3f63d3..a96e3de901a 100644 --- a/cspell.json +++ b/cspell.json @@ -13,12 +13,14 @@ "arithmetization", "arity", "arkworks", - "arraysort", + "backpropagate", + "Backpropagation", "barebones", "barretenberg", "bincode", "bindgen", "bitand", + "bitmask", "blackbox", "boilerplate", "boilerplates", @@ -38,6 +40,7 @@ "codegen", "codegenned", "codegens", + "codespace", "Codespaces", "codespan", "coeff", @@ -84,10 +87,12 @@ "higher-kinded", "Hindley-Milner", "idents", + "ilog", "impls", "indexmap", "injective", "Inlines", + "instrumenter", "interner", "intrinsics", "jmp", @@ -99,10 +104,12 @@ "keccak", "keccakf", "krate", + "libc", "losslessly", "lvalue", "Maddiaa", "mathbb", + "memfs", "merkle", "metas", "minreq", @@ -113,6 +120,7 @@ "monomorphizes", "monomorphizing", "montcurve", + "MSRV", "nand", "nargo", "neovim", @@ -157,6 +165,7 @@ "subshell", "subtyping", "swcurve", + "Taiko", "tecurve", "tempdir", "tempfile", diff --git a/deny.toml b/deny.toml index 5edce08fb70..72150f08a3c 100644 --- a/deny.toml +++ b/deny.toml @@ -54,10 +54,10 @@ allow = [ "LicenseRef-ring", # https://github.com/rustls/webpki/blob/main/LICENSE ISC Style "LicenseRef-rustls-webpki", - # bitmaps 2.1.0, generational-arena 0.2.9,im 15.1.0 + # bitmaps 2.1.0, im 15.1.0 "MPL-2.0", # Boost Software License - "BSL-1.0" + "BSL-1.0", ] # Allow 1 or more licenses on a per-crate basis, so that particular licenses @@ -93,7 +93,12 @@ unknown-registry = "warn" # Lint level for what to happen when a crate from a git repository that is not # in the allow list is encountered unknown-git = "deny" + +# DON'T YOU DARE ADD ANYTHING TO THIS IF YOU WANT TO PUBLISH ANYTHING NOIR RELATED TO CRATES.IO +# +# crates.io rejects git dependencies so anything depending on these is unpublishable and you'll ruin my day +# when I find out. allow-git = [ - "https://github.com/noir-lang/grumpkin", - "https://github.com/jfecher/chumsky" + "https://github.com/jfecher/chumsky", + "https://github.com/noir-lang/clap-markdown", ] diff --git a/docs/docs/getting_started/installation/other_install_methods.md b/docs/docs/getting_started/installation/other_install_methods.md index a532f83750e..a35e34aaf9c 100644 --- a/docs/docs/getting_started/installation/other_install_methods.md +++ b/docs/docs/getting_started/installation/other_install_methods.md @@ -1,38 +1,102 @@ --- title: Alternative Install Methods -description: - There are different ways to install Nargo, the one-stop shop and command-line tool for developing Noir programs. This guide explains other methods that don't rely on noirup, such as compiling from source, installing from binaries, and using WSL for windows +description: There are different ways to install Nargo, the one-stop shop and command-line tool for developing Noir programs. This guide explains other methods that don't rely on noirup, such as compiling from source, installing from binaries, and using WSL for windows keywords: [ - Installation - Nargo - Noirup - Binaries - Compiling from Source - WSL for Windows - macOS - Linux - Nix - Direnv - Shell & editor experience - Building and testing - Uninstalling Nargo - Noir vs code extension -] + Installation + Nargo + Noirup + Binaries + Compiling from Source + WSL for Windows + macOS + Linux + Nix + Direnv + Shell & editor experience + Building and testing + Uninstalling Nargo + Noir vs code extension, + ] sidebar_position: 1 --- +## Encouraged Installation Method: Noirup -## Installation +Noirup is the endorsed method for installing Nargo, streamlining the process of fetching binaries or compiling from source. It supports a range of options to cater to your specific needs, from nightly builds and specific versions to compiling from various sources. -The most common method of installing Nargo is through [Noirup](./index.md) +### Installing Noirup + +First, ensure you have `noirup` installed: + +```sh +curl -L https://raw.githubusercontent.com/noir-lang/noirup/main/install | bash +``` + +### Fetching Binaries + +With `noirup`, you can easily switch between different Nargo versions, including nightly builds: + +- **Nightly Version**: Install the latest nightly build. + + ```sh + noirup --version nightly + ``` + +- **Specific Version**: Install a specific version of Nargo. + ```sh + noirup --version + ``` + +### Compiling from Source + +`noirup` also enables compiling Nargo from various sources: + +- **From a Specific Branch**: Install from the latest commit on a branch. + + ```sh + noirup --branch + ``` + +- **From a Fork**: Install from the main branch of a fork. + + ```sh + noirup --repo + ``` + +- **From a Specific Branch in a Fork**: Install from a specific branch in a fork. + + ```sh + noirup --repo --branch + ``` + +- **From a Specific Pull Request**: Install from a specific PR. + + ```sh + noirup --pr + ``` + +- **From a Specific Commit**: Install from a specific commit. + + ```sh + noirup -C + ``` + +- **From Local Source**: Compile and install from a local directory. + ```sh + noirup --path ./path/to/local/source + ``` + +## Alternate Installation Methods (No Longer Recommended) + +While the following methods are available, they are no longer recommended. We advise using noirup for a more efficient and flexible installation experience. However, there are other methods for installing Nargo: -- [Binaries](#binaries) -- [Compiling from Source](#compile-from-source) -- [WSL for Windows](#wsl-for-windows) +- [Binaries](#option-1-installing-from-binaries) +- [Compiling from Source](#option-2-compile-from-source) +- [WSL for Windows](#option-3-wsl-for-windows) -### Binaries +### Option 1: Installing from Binaries See [GitHub Releases](https://github.com/noir-lang/noir/releases) for the latest and previous platform specific binaries. @@ -48,7 +112,7 @@ Paste and run the following in the terminal to extract and install the binary: ```bash mkdir -p $HOME/.nargo/bin && \ -curl -o $HOME/.nargo/bin/nargo-aarch64-apple-darwin.tar.gz -L https://github.com/noir-lang/noir/releases/download/v0.6.0/nargo-aarch64-apple-darwin.tar.gz && \ +curl -o $HOME/.nargo/bin/nargo-aarch64-apple-darwin.tar.gz -L https://github.com/noir-lang/noir/releases/download/v0.24.0/nargo-aarch64-apple-darwin.tar.gz && \ tar -xvf $HOME/.nargo/bin/nargo-aarch64-apple-darwin.tar.gz -C $HOME/.nargo/bin/ && \ echo '\nexport PATH=$PATH:$HOME/.nargo/bin' >> ~/.zshrc && \ source ~/.zshrc @@ -58,7 +122,7 @@ source ~/.zshrc ```bash mkdir -p $HOME/.nargo/bin && \ -curl -o $HOME/.nargo/bin/nargo-x86_64-apple-darwin.tar.gz -L https://github.com/noir-lang/noir/releases/download/v0.6.0/nargo-x86_64-apple-darwin.tar.gz && \ +curl -o $HOME/.nargo/bin/nargo-x86_64-apple-darwin.tar.gz -L https://github.com/noir-lang/noir/releases/download/v0.24.0/nargo-x86_64-apple-darwin.tar.gz && \ tar -xvf $HOME/.nargo/bin/nargo-x86_64-apple-darwin.tar.gz -C $HOME/.nargo/bin/ && \ echo '\nexport PATH=$PATH:$HOME/.nargo/bin' >> ~/.zshrc && \ source ~/.zshrc @@ -68,7 +132,7 @@ source ~/.zshrc ```bash mkdir -p $HOME/.nargo/bin && \ -curl -o $HOME/.nargo/bin/nargo-x86_64-unknown-linux-gnu.tar.gz -L https://github.com/noir-lang/noir/releases/download/v0.6.0/nargo-x86_64-unknown-linux-gnu.tar.gz && \ +curl -o $HOME/.nargo/bin/nargo-x86_64-unknown-linux-gnu.tar.gz -L https://github.com/noir-lang/noir/releases/download/v0.24.0/nargo-x86_64-unknown-linux-gnu.tar.gz && \ tar -xvf $HOME/.nargo/bin/nargo-x86_64-unknown-linux-gnu.tar.gz -C $HOME/.nargo/bin/ && \ echo -e '\nexport PATH=$PATH:$HOME/.nargo/bin' >> ~/.bashrc && \ source ~/.bashrc @@ -81,7 +145,7 @@ Check if the installation was successful by running `nargo --version`. You shoul > **macOS:** If you are prompted with an OS alert, right-click and open the _nargo_ executable from > Finder. Close the new terminal popped up and `nargo` should now be accessible. -### Option 3: Compile from Source +### Option 2: Compile from Source Due to the large number of native dependencies, Noir projects uses [Nix](https://nixos.org/) and [direnv](https://direnv.net/) to streamline the development experience. It helps mitigating issues commonly associated with dependency management, such as conflicts between required package versions for different projects (often referred to as "dependency hell"). @@ -148,7 +212,7 @@ code . #### Building and testing Assuming you are using `direnv` to populate your environment, building and testing the project can be done -with the typical `cargo build`, `cargo test`, and `cargo clippy` commands. You'll notice that the `cargo` version matches the version we specify in `rust-toolchain.toml`, which is 1.71.1 at the time of this writing. +with the typical `cargo build`, `cargo test`, and `cargo clippy` commands. You'll notice that the `cargo` version matches the version we specify in `rust-toolchain.toml`, which is 1.73.0 at the time of this writing. If you want to build the entire project in an isolated sandbox, you can use Nix commands: @@ -161,19 +225,19 @@ If you have hesitations with using direnv, you can launch a subshell with `nix d Advanced: If you aren't using direnv nor launching your editor within the subshell, you can try to install Barretenberg and other global dependencies the package needs. This is an advanced workflow and likely won't receive support! -### Option 4: WSL (for Windows) +### Option 3: WSL (for Windows) The default backend for Noir (Barretenberg) doesn't provide Windows binaries at this time. For that reason, Noir cannot be installed natively. However, it is available by using Windows Subsystem for Linux (WSL). Step 1: Follow the instructions [here](https://learn.microsoft.com/en-us/windows/wsl/install) to install and run WSL. -step 2: Follow the [Noirup instructions](./index.md). +step 2: Follow the [Noirup instructions](#encouraged-installation-method-noirup). ## Uninstalling Nargo ### Noirup -If you installed Noir with `noirup`, you can uninstall Noir by removing the files in `~/.nargo`, `~/nargo` and `~/noir_cache`. +If you installed Nargo with `noirup` or through directly downloading binaries, you can uninstall Nargo by removing the files in `~/.nargo`, `~/nargo`, and `~/noir_cache`. This ensures that all installed binaries, configurations, and cache related to Nargo are fully removed from your system. ```bash rm -r ~/.nargo @@ -183,7 +247,7 @@ rm -r ~/noir_cache ### Nix -If you installed Noir with Nix or from source, you can remove the binary located at `~/.nix-profile/bin/nargo`. +If you installed Nargo with Nix or compiled it from source, you can remove the binary located at `~/.nix-profile/bin/nargo`. ```bash rm ~/.nix-profile/bin/nargo diff --git a/docs/docs/how_to/how-to-recursion.md b/docs/docs/how_to/how-to-recursion.md index f34647a99d5..4c45bb87ae2 100644 --- a/docs/docs/how_to/how-to-recursion.md +++ b/docs/docs/how_to/how-to-recursion.md @@ -42,9 +42,9 @@ In short: ::: -In a standard recursive app, you're also dealing with at least two circuits. For the purpose of this guide, we will assume these two: +In a standard recursive app, you're also dealing with at least two circuits. For the purpose of this guide, we will assume the following: -- `main`: a circuit of type `assert(x != y)` +- `main`: a circuit of type `assert(x != y)`, where `main` is marked with a `#[recursive]` attribute. This attribute states that the backend should generate proofs that are friendly for verification within another circuit. - `recursive`: a circuit that verifies `main` For a full example on how recursive proofs work, please refer to the [noir-examples](https://github.com/noir-lang/noir-examples) repository. We will *not* be using it as a reference for this guide. @@ -77,7 +77,7 @@ const { witness } = noir.execute(input) With this witness, you are now able to generate the intermediate proof for the main circuit: ```js -const { proof, publicInputs } = await backend.generateIntermediateProof(witness) +const { proof, publicInputs } = await backend.generateProof(witness) ``` :::warning @@ -95,13 +95,13 @@ With this in mind, it becomes clear that our intermediate proof is the one *mean Optionally, you are able to verify the intermediate proof: ```js -const verified = await backend.verifyIntermediateProof({ proof, publicInputs }) +const verified = await backend.verifyProof({ proof, publicInputs }) ``` -This can be useful to make sure our intermediate proof was correctly generated. But the real goal is to do it within another circuit. For that, we need to generate the intermediate artifacts: +This can be useful to make sure our intermediate proof was correctly generated. But the real goal is to do it within another circuit. For that, we need to generate recursive proof artifacts that will be passed to the circuit that is verifying the proof we just generated. Instead of passing the proof and verification key as a byte array, we pass them as fields which makes it cheaper to verify in a circuit: ```js -const { proofAsFields, vkAsFields, vkHash } = await backend.generateIntermediateProofArtifacts( { publicInputs, proof }, publicInputsCount) +const { proofAsFields, vkAsFields, vkHash } = await backend.generateRecursiveProofArtifacts( { publicInputs, proof }, publicInputsCount) ``` This call takes the public inputs and the proof, but also the public inputs count. While this is easily retrievable by simply counting the `publicInputs` length, the backend interface doesn't currently abstract it away. @@ -135,8 +135,8 @@ const recursiveInputs = { } const { witness, returnValue } = noir.execute(recursiveInputs) // we're executing the recursive circuit now! -const { proof, publicInputs } = backend.generateFinalProof(witness) -const verified = backend.verifyFinalProof({ proof, publicInputs }) +const { proof, publicInputs } = backend.generateProof(witness) +const verified = backend.verifyProof({ proof, publicInputs }) ``` You can obviously chain this proof into another proof. In fact, if you're using recursive proofs, you're probably interested of using them this way! @@ -165,15 +165,15 @@ This allows you to neatly call exactly the method you want without conflicting n ```js // Alice runs this 👇 const { witness: mainWitness } = await noir_programs.main.execute(input) -const proof = await backends.main.generateIntermediateProof(mainWitness) +const proof = await backends.main.generateProof(mainWitness) // Bob runs this 👇 -const verified = await backends.main.verifyIntermediateProof(proof) -const { proofAsFields, vkAsFields, vkHash } = await backends.main.generateIntermediateProofArtifacts( +const verified = await backends.main.verifyProof(proof) +const { proofAsFields, vkAsFields, vkHash } = await backends.main.generateRecursiveProofArtifacts( proof, numPublicInputs, ); -const recursiveProof = await noir_programs.recursive.generateFinalProof(recursiveInputs) +const recursiveProof = await noir_programs.recursive.generateProof(recursiveInputs) ``` ::: diff --git a/docs/docs/index.mdx b/docs/docs/index.mdx index 2cec2397051..75086ddcdde 100644 --- a/docs/docs/index.mdx +++ b/docs/docs/index.mdx @@ -34,7 +34,7 @@ Noir works differently from most ZK languages by taking a two-pronged path. Firs :::info -Noir is backend agnostic, which means it makes no assumptions on which proving backend powers the ZK proof. Being the language that powers [Aztec Contracts](https://docs.aztec.network/dev_docs/contracts/main), it defaults to Aztec's Barretenberg proving backend. +Noir is backend agnostic, which means it makes no assumptions on which proving backend powers the ZK proof. Being the language that powers [Aztec Contracts](https://docs.aztec.network/developers/contracts/main), it defaults to Aztec's Barretenberg proving backend. However, the ACIR output can be transformed to be compatible with other PLONK-based backends, or into a [rank-1 constraint system](https://www.rareskills.io/post/rank-1-constraint-system) suitable for backends such as Arkwork's Marlin. @@ -48,7 +48,7 @@ Noir can be used both in complex cloud-based backends and in user's smartphones, Noir Logo - Aztec Contracts leverage Noir to allow for the storage and execution of private information. Writing an Aztec Contract is as easy as writing Noir, and Aztec developers can easily interact with the network storage and execution through the [Aztec.nr](https://docs.aztec.network/dev_docs/contracts/main) library. + Aztec Contracts leverage Noir to allow for the storage and execution of private information. Writing an Aztec Contract is as easy as writing Noir, and Aztec developers can easily interact with the network storage and execution through the [Aztec.nr](https://docs.aztec.network/developers/contracts/main) library. Soliditry Verifier Example diff --git a/docs/docs/migration_notes.md b/docs/docs/migration_notes.md index 9f27230a1a0..6bd740024e5 100644 --- a/docs/docs/migration_notes.md +++ b/docs/docs/migration_notes.md @@ -6,6 +6,20 @@ keywords: [Noir, notes, migration, updating, upgrading] Noir is in full-speed development. Things break fast, wild, and often. This page attempts to leave some notes on errors you might encounter when upgrading and how to resolve them until proper patches are built. +### `backend encountered an error: libc++.so.1` + +Depending on your OS, you may encounter the following error when running `nargo prove` for the first time: + +```text +The backend encountered an error: "/home/codespace/.nargo/backends/acvm-backend-barretenberg/backend_binary: error while loading shared libraries: libc++.so.1: cannot open shared object file: No such file or directory\n" +``` + +Install the `libc++-dev` library with: + +```bash +sudo apt install libc++-dev +``` + ## ≥0.19 ### Enforcing `compiler_version` diff --git a/docs/docs/noir/concepts/assert.md b/docs/docs/noir/concepts/assert.md index c5f9aff139c..bcff613a695 100644 --- a/docs/docs/noir/concepts/assert.md +++ b/docs/docs/noir/concepts/assert.md @@ -18,10 +18,28 @@ fn main(x : Field, y : Field) { } ``` +> Assertions only work for predicate operations, such as `==`. If there's any ambiguity on the operation, the program will fail to compile. For example, it is unclear if `assert(x + y)` would check for `x + y == 0` or simply would return `true`. + You can optionally provide a message to be logged when the assertion fails: ```rust assert(x == y, "x and y are not equal"); ``` -> Assertions only work for predicate operations, such as `==`. If there's any ambiguity on the operation, the program will fail to compile. For example, it is unclear if `assert(x + y)` would check for `x + y == 0` or simply would return `true`. +Aside string literals, the optional message can be a format string or any other type supported as input for Noir's [print](../standard_library/logging.md) functions. This feature lets you incorporate runtime variables into your failed assertion logs: + +```rust +assert(x == y, f"Expected x == y, but got {x} == {y}"); +``` + +Using a variable as an assertion message directly: + +```rust +struct myStruct { + myField: Field +} + +let s = myStruct { myField: y }; +assert(s.myField == x, s); +``` + diff --git a/docs/docs/noir/concepts/data_types/fields.md b/docs/docs/noir/concepts/data_types/fields.md index 7870c98c858..99b4aa63549 100644 --- a/docs/docs/noir/concepts/data_types/fields.md +++ b/docs/docs/noir/concepts/data_types/fields.md @@ -181,3 +181,12 @@ Parity of (prime) Field element, i.e. sgn0(x mod p) = 0 if x ∈ \{0, ..., p-1\} ```rust fn sgn0(self) -> u1 ``` + + +### lt + +Returns true if the field is less than the other field + +```rust +pub fn lt(self, another: Field) -> bool +``` diff --git a/docs/docs/noir/concepts/data_types/index.md b/docs/docs/noir/concepts/data_types/index.md index 3c9cd4c2437..97b3b2cb094 100644 --- a/docs/docs/noir/concepts/data_types/index.md +++ b/docs/docs/noir/concepts/data_types/index.md @@ -91,6 +91,20 @@ fn main() { } ``` +Type aliases can even refer to other aliases. An error will be issued if they form a cycle: + +```rust +// Ok! +type A = B; +type B = Field; + +type Bad1 = Bad2; + +// error: Dependency cycle found +type Bad2 = Bad1; +// ^^^^^^^^^^^ 'Bad2' recursively depends on itself: Bad2 -> Bad1 -> Bad2 +``` + ### BigInt You can achieve BigInt functionality using the [Noir BigInt](https://github.com/shuklaayush/noir-bigint) library. diff --git a/docs/docs/noir/concepts/data_types/integers.md b/docs/docs/noir/concepts/data_types/integers.md index 7d1e83cf4e9..4d58d96fed5 100644 --- a/docs/docs/noir/concepts/data_types/integers.md +++ b/docs/docs/noir/concepts/data_types/integers.md @@ -5,7 +5,7 @@ keywords: [noir, integer types, methods, examples, arithmetic] sidebar_position: 1 --- -An integer type is a range constrained field type. The Noir frontend supports arbitrarily-sized, both unsigned and signed integer types. +An integer type is a range constrained field type. The Noir frontend supports both unsigned and signed integer types. The allowed sizes are 1, 8, 32 and 64 bits. :::info @@ -45,11 +45,53 @@ fn main() { The bit size determines the maximum and minimum range of value the integer type can store. For example, an `i8` variable can store a value in the range of -128 to 127 (i.e. $\\-2^{7}\\$ to $\\2^{7}-1\\$). -:::tip +## 128 bits Unsigned Integers -If you are using the default proving backend with Noir, both even (e.g. _u2_, _i2_) and odd (e.g. _u3_, _i3_) arbitrarily-sized integer types up to 127 bits (i.e. _u127_ and _i127_) are supported. +The built-in structure `U128` allows you to use 128-bit unsigned integers almost like a native integer type. However, there are some differences to keep in mind: +- You cannot cast between a native integer and `U128` +- There is a higher performance cost when using `U128`, compared to a native type. -::: +Conversion between unsigned integer types and U128 are done through the use of `from_integer` and `to_integer` functions. + +```rust +fn main() { + let x = U128::from_integer(23); + let y = U128::from_hex("0x7"); + let z = x + y; + assert(z.to_integer() == 30); +} +``` + +`U128` is implemented with two 64 bits limbs, representing the low and high bits, which explains the performance cost. You should expect `U128` to be twice more costly for addition and four times more costly for multiplication. +You can construct a U128 from its limbs: +```rust +fn main(x: u64, y: u64) { + let x = U128::from_u64s_be(x,y); + assert(z.hi == x as Field); + assert(z.lo == y as Field); +} +``` + +Note that the limbs are stored as Field elements in order to avoid unnecessary conversions. +Apart from this, most operations will work as usual: + +```rust +fn main(x: U128, y: U128) { + // multiplication + let c = x * y; + // addition and subtraction + let c = c - x + y; + // division + let c = x / y; + // bit operation; + let c = x & y | y; + // bit shift + let c = x << y; + // comparisons; + let c = x < y; + let c = x == y; +} +``` ## Overflows @@ -108,6 +150,6 @@ Example of how it is used: use dep::std; fn main(x: u8, y: u8) -> pub u8 { - std::wrapping_add(x + y) + std::wrapping_add(x, y) } ``` diff --git a/docs/docs/noir/concepts/globals.md b/docs/docs/noir/concepts/globals.md new file mode 100644 index 00000000000..063a3d89248 --- /dev/null +++ b/docs/docs/noir/concepts/globals.md @@ -0,0 +1,72 @@ +--- +title: Global Variables +description: + Learn about global variables in Noir. Discover how + to declare, modify, and use them in your programs. +keywords: [noir programming language, globals, global variables, constants] +sidebar_position: 8 +--- + +## Globals + + +Noir supports global variables. The global's type can be inferred by the compiler entirely: + +```rust +global N = 5; // Same as `global N: Field = 5` + +global TUPLE = (3, 2); + +fn main() { + assert(N == 5); + assert(N == TUPLE.0 + TUPLE.1); +} +``` + +:::info + +Globals can be defined as any expression, so long as they don't depend on themselves - otherwise there would be a dependency cycle! For example: + +```rust +global T = foo(T); // dependency error +``` + +::: + + +If they are initialized to a literal integer, globals can be used to specify an array's length: + +```rust +global N: Field = 2; + +fn main(y : [Field; N]) { + assert(y[0] == y[1]) +} +``` + +A global from another module can be imported or referenced externally like any other name: + +```rust +global N = 20; + +fn main() { + assert(my_submodule::N != N); +} + +mod my_submodule { + global N: Field = 10; +} +``` + +When a global is used, Noir replaces the name with its definition on each occurrence. +This means globals defined using function calls will repeat the call each time they're used: + +```rust +global RESULT = foo(); + +fn foo() -> [Field; 100] { ... } +``` + +This is usually fine since Noir will generally optimize any function call that does not +refer to a program input into a constant. It should be kept in mind however, if the called +function performs side-effects like `println`, as these will still occur on each use. diff --git a/docs/docs/noir/concepts/mutability.md b/docs/docs/noir/concepts/mutability.md index 9cc10429cb4..fdeef6a87c5 100644 --- a/docs/docs/noir/concepts/mutability.md +++ b/docs/docs/noir/concepts/mutability.md @@ -1,9 +1,9 @@ --- title: Mutability description: - Learn about mutable variables, constants, and globals in Noir programming language. Discover how + Learn about mutable variables in Noir. Discover how to declare, modify, and use them in your programs. -keywords: [noir programming language, mutability in noir, mutable variables, constants, globals] +keywords: [noir programming language, mutability in noir, mutable variables] sidebar_position: 8 --- @@ -49,45 +49,73 @@ fn helper(mut x: i32) { } ``` -## Comptime Values +## Non-local mutability -:::warning +Non-local mutability can be achieved through the mutable reference type `&mut T`: -The 'comptime' keyword was removed in version 0.10. The comptime keyword and syntax are currently still kept and parsed for backwards compatibility, but are now deprecated and will issue a warning when used. `comptime` has been removed because it is no longer needed for accessing arrays. - -::: +```rust +fn set_to_zero(x: &mut Field) { + *x = 0; +} -## Globals +fn main() { + let mut y = 42; + set_to_zero(&mut y); + assert(*y == 0); +} +``` -Noir also supports global variables. However, they must be known at compile-time. The global type can also be inferred by the compiler entirely. Globals can also be used to specify array -annotations for function parameters and can be imported from submodules. +When creating a mutable reference, the original variable being referred to (`y` in this +example) must also be mutable. Since mutable references are a reference type, they must +be explicitly dereferenced via `*` to retrieve the underlying value. Note that this yields +a copy of the value, so mutating this copy will not change the original value behind the +reference: ```rust -global N: Field = 5; // Same as `global N: Field = 5` +fn main() { + let mut x = 1; + let x_ref = &mut x; + + let mut y = *x_ref; + let y_ref = &mut y; -fn main(x : Field, y : [Field; N]) { - let res = x * N; + x = 2; + *x_ref = 3; - assert(res == y[0]); + y = 4; + *y_ref = 5; - let res2 = x * my_submodule::N; - assert(res != res2); + assert(x == 3); + assert(*x_ref == 3); + assert(y == 5); + assert(*y_ref == 5); } +``` -mod my_submodule { - use dep::std; +Note that types in Noir are actually deeply immutable so the copy that occurs when +dereferencing is only a conceptual copy - no additional constraints will occur. - global N: Field = 10; +Mutable references can also be stored within structs. Note that there is also +no lifetime parameter on these unlike rust. This is because the allocated memory +always lasts the entire program - as if it were an array of one element. - fn my_helper() -> Field { - let x = N; - x +```rust +struct Foo { + x: &mut Field +} + +impl Foo { + fn incr(mut self) { + *self.x += 1; } } -``` -## Why only local mutability? +fn main() { + let foo = Foo { x: &mut 0 }; + foo.incr(); + assert(*foo.x == 1); +} +``` -Witnesses in a proving system are immutable in nature. Noir aims to _closely_ mirror this setting -without applying additional overhead to the user. Modeling a mutable reference is not as -straightforward as on conventional architectures and would incur some possibly unexpected overhead. +In general, you should avoid non-local & shared mutability unless it is needed. Sticking +to only local mutability will improve readability and potentially improve compiler optimizations as well. diff --git a/docs/docs/noir/concepts/unconstrained.md b/docs/docs/noir/concepts/unconstrained.md index 6b3424f7993..89d12c1c971 100644 --- a/docs/docs/noir/concepts/unconstrained.md +++ b/docs/docs/noir/concepts/unconstrained.md @@ -40,7 +40,7 @@ Total ACIR opcodes generated for language PLONKCSat { width: 3 }: 91 Backend circuit size: 3619 ``` -A lot of the operations in this function are optimized away by the compiler (all the bit-shifts turn into divisions by constants). However we can save a bunch of gates by casting to u8 a bit earlier. This automatically truncates the bit-shifted value to fit in a u8 which allows us to remove the XOR against 0xff. This saves us ~480 gates in total. +A lot of the operations in this function are optimized away by the compiler (all the bit-shifts turn into divisions by constants). However we can save a bunch of gates by casting to u8 a bit earlier. This automatically truncates the bit-shifted value to fit in a u8 which allows us to remove the AND against 0xff. This saves us ~480 gates in total. ```rust fn main(num: u72) -> pub [u8; 8] { diff --git a/docs/docs/noir/modules_packages_crates/crates_and_packages.md b/docs/docs/noir/modules_packages_crates/crates_and_packages.md index 760a463094c..95ee9f52ab2 100644 --- a/docs/docs/noir/modules_packages_crates/crates_and_packages.md +++ b/docs/docs/noir/modules_packages_crates/crates_and_packages.md @@ -24,7 +24,7 @@ _Library crates_ don't have a `main` function and they don't compile down to ACI #### Contracts -Contract crates are similar to binary crates in that they compile to ACIR which you can create proofs against. They are different in that they do not have a single `main` function, but are a collection of functions to be deployed to the [Aztec network](https://aztec.network). You can learn more about the technical details of Aztec in the [monorepo](https://github.com/AztecProtocol/aztec-packages) or contract [examples](https://github.com/AztecProtocol/aztec-packages/tree/master/yarn-project/noir-contracts/contracts). +Contract crates are similar to binary crates in that they compile to ACIR which you can create proofs against. They are different in that they do not have a single `main` function, but are a collection of functions to be deployed to the [Aztec network](https://aztec.network). You can learn more about the technical details of Aztec in the [monorepo](https://github.com/AztecProtocol/aztec-packages) or contract [examples](https://github.com/AztecProtocol/aztec-packages/tree/master/noir-projects/noir-contracts/contracts). ### Crate Root diff --git a/docs/docs/noir/modules_packages_crates/dependencies.md b/docs/docs/noir/modules_packages_crates/dependencies.md index a37dc401b7d..04c1703d929 100644 --- a/docs/docs/noir/modules_packages_crates/dependencies.md +++ b/docs/docs/noir/modules_packages_crates/dependencies.md @@ -35,7 +35,7 @@ If the module is in a subdirectory, you can define a subdirectory in your git re # Nargo.toml [dependencies] -easy_private_token_contract = {tag ="v0.1.0-alpha62", git = "https://github.com/AztecProtocol/aztec-packages", directory = "yarn-project/noir-contracts/contracts/easy_private_token_contract"} +easy_private_token_contract = {tag ="v0.1.0-alpha62", git = "https://github.com/AztecProtocol/aztec-packages", directory = "noir-contracts/contracts/easy_private_token_contract"} ``` ## Specifying a local dependency diff --git a/docs/docs/noir/standard_library/black_box_fns.md b/docs/docs/noir/standard_library/black_box_fns.md index 6b22d0e7466..eae8744abf0 100644 --- a/docs/docs/noir/standard_library/black_box_fns.md +++ b/docs/docs/noir/standard_library/black_box_fns.md @@ -15,7 +15,7 @@ Here is a list of the current black box functions: - [SHA256](./cryptographic_primitives/hashes#sha256) - [Schnorr signature verification](./cryptographic_primitives/schnorr) - [Blake2s](./cryptographic_primitives/hashes#blake2s) -- [Blake3](./cryptographic_primitives/hashes#blake2s) +- [Blake3](./cryptographic_primitives/hashes#blake3) - [Pedersen Hash](./cryptographic_primitives/hashes#pedersen_hash) - [Pedersen Commitment](./cryptographic_primitives/hashes#pedersen_commitment) - [ECDSA signature verification](./cryptographic_primitives/ecdsa_sig_verification) diff --git a/docs/docs/noir/standard_library/bn254.md b/docs/docs/noir/standard_library/bn254.md new file mode 100644 index 00000000000..3294f005dbb --- /dev/null +++ b/docs/docs/noir/standard_library/bn254.md @@ -0,0 +1,46 @@ +--- +title: Bn254 Field Library +--- + +Noir provides a module in standard library with some optimized functions for bn254 Fr in `std::field::bn254`. + +## decompose + +```rust +fn decompose(x: Field) -> (Field, Field) {} +``` + +Decomposes a single field into two fields, low and high. The low field contains the lower 16 bytes of the input field and the high field contains the upper 16 bytes of the input field. Both field results are range checked to 128 bits. + + +## assert_gt + +```rust +fn assert_gt(a: Field, b: Field) {} +``` + +Asserts that a > b. This will generate less constraints than using `assert(gt(a, b))`. + +## assert_lt + +```rust +fn assert_lt(a: Field, b: Field) {} +``` + +Asserts that a < b. This will generate less constraints than using `assert(lt(a, b))`. + +## gt + +```rust +fn gt(a: Field, b: Field) -> bool {} +``` + +Returns true if a > b. + +## lt + +```rust +fn lt(a: Field, b: Field) -> bool {} +``` + +Returns true if a < b. \ No newline at end of file diff --git a/docs/docs/noir/standard_library/containers/boundedvec.md b/docs/docs/noir/standard_library/containers/boundedvec.md new file mode 100644 index 00000000000..cd0f725f870 --- /dev/null +++ b/docs/docs/noir/standard_library/containers/boundedvec.md @@ -0,0 +1,210 @@ +--- +title: Bounded Vectors +keywords: [noir, vector, bounded vector, slice] +sidebar_position: 1 +--- + +A `BoundedVec` is a growable storage similar to a `Vec` except that it +is bounded with a maximum possible length. Unlike `Vec`, `BoundedVec` is not implemented +via slices and thus is not subject to the same restrictions slices are (notably, nested +slices - and thus nested vectors as well - are disallowed). + +Since a BoundedVec is backed by a normal array under the hood, growing the BoundedVec by +pushing an additional element is also more efficient - the length only needs to be increased +by one. + +For these reasons `BoundedVec` should generally be preferred over `Vec` when there +is a reasonable maximum bound that can be placed on the vector. + +Example: + +```rust +let mut vector: BoundedVec = BoundedVec::new(); +for i in 0..5 { + vector.push(i); +} +assert(vector.len() == 5); +assert(vector.max_len() == 10); +``` + +## Methods + +### new + +```rust +pub fn new() -> Self +``` + +Creates a new, empty vector of length zero. + +Since this container is backed by an array internally, it still needs an initial value +to give each element. To resolve this, each element is zeroed internally. This value +is guaranteed to be inaccessible unless `get_unchecked` is used. + +Example: + +```rust +let empty_vector: BoundedVec = BoundedVec::new(); +assert(empty_vector.len() == 0); +``` + +Note that whenever calling `new` the maximum length of the vector should always be specified +via a type signature: + +#include_code new_example test_programs/noir_test_success/bounded_vec/src/main.nr rust + +This defaulting of `MaxLen` (and numeric generics in general) to zero may change in future noir versions +but for now make sure to use type annotations when using bounded vectors. Otherwise, you will receive a constraint failure at runtime when the vec is pushed to. + +### get + +```rust +pub fn get(mut self: Self, index: u64) -> T { +``` + +Retrieves an element from the vector at the given index, starting from zero. + +If the given index is equal to or greater than the length of the vector, this +will issue a constraint failure. + +Example: + +```rust +fn foo(v: BoundedVec) { + let first = v.get(0); + let last = v.get(v.len() - 1); + assert(first != last); +} +``` + +### get_unchecked + +```rust +pub fn get_unchecked(mut self: Self, index: u64) -> T { +``` + +Retrieves an element from the vector at the given index, starting from zero, without +performing a bounds check. + +Since this function does not perform a bounds check on length before accessing the element, +it is unsafe! Use at your own risk! + +Example: + +#include_code get_unchecked_example test_programs/noir_test_success/bounded_vec/src/main.nr rust + + +### push + +```rust +pub fn push(&mut self, elem: T) { +``` + +Pushes an element to the end of the vector. This increases the length +of the vector by one. + +Panics if the new length of the vector will be greater than the max length. + +Example: + +#include_code bounded-vec-push-example test_programs/noir_test_success/bounded_vec/src/main.nr rust + +### pop + +```rust +pub fn pop(&mut self) -> T +``` + +Pops the element at the end of the vector. This will decrease the length +of the vector by one. + +Panics if the vector is empty. + +Example: + +#include_code bounded-vec-pop-example test_programs/noir_test_success/bounded_vec/src/main.nr rust + +### len + +```rust +pub fn len(self) -> u64 { +``` + +Returns the current length of this vector + +Example: + +#include_code bounded-vec-len-example test_programs/noir_test_success/bounded_vec/src/main.nr rust + +### max_len + +```rust +pub fn max_len(_self: BoundedVec) -> u64 { +``` + +Returns the maximum length of this vector. This is always +equal to the `MaxLen` parameter this vector was initialized with. + +Example: + +#include_code bounded-vec-max-len-example test_programs/noir_test_success/bounded_vec/src/main.nr rust + +### storage + +```rust +pub fn storage(self) -> [T; MaxLen] { +``` + +Returns the internal array within this vector. +Since arrays in Noir are immutable, mutating the returned storage array will not mutate +the storage held internally by this vector. + +Note that uninitialized elements may be zeroed out! + +Example: + +#include_code bounded-vec-storage-example test_programs/noir_test_success/bounded_vec/src/main.nr rust + +### extend_from_array + +```rust +pub fn extend_from_array(&mut self, array: [T; Len]) +``` + +Pushes each element from the given array to this vector. + +Panics if pushing each element would cause the length of this vector +to exceed the maximum length. + +Example: + +#include_code bounded-vec-extend-from-array-example test_programs/noir_test_success/bounded_vec/src/main.nr rust + +### extend_from_bounded_vec + +```rust +pub fn extend_from_bounded_vec(&mut self, vec: BoundedVec) +``` + +Pushes each element from the other vector to this vector. The length of +the other vector is left unchanged. + +Panics if pushing each element would cause the length of this vector +to exceed the maximum length. + +Example: + +#include_code bounded-vec-extend-from-bounded-vec-example test_programs/noir_test_success/bounded_vec/src/main.nr rust + +### any + +```rust +pub fn any(self, predicate: fn[Env](T) -> bool) -> bool +``` + +Returns true if the given predicate returns true for any element +in this vector. + +Example: + +#include_code bounded-vec-any-example test_programs/noir_test_success/bounded_vec/src/main.nr rust diff --git a/docs/docs/noir/standard_library/containers/vec.mdx b/docs/docs/noir/standard_library/containers/vec.mdx new file mode 100644 index 00000000000..1954f05bc76 --- /dev/null +++ b/docs/docs/noir/standard_library/containers/vec.mdx @@ -0,0 +1,151 @@ +--- +title: Vectors +description: Delve into the Vec data type in Noir. Learn about its methods, practical examples, and best practices for using Vectors in your Noir code. +keywords: [noir, vector type, methods, examples, dynamic arrays] +sidebar_position: 6 +--- + +import Experimental from '@site/src/components/Notes/_experimental.mdx'; + + + +A vector is a collection type similar to Rust's `Vec` type. In Noir, it is a convenient way to use slices as mutable arrays. + +Example: + +```rust +let mut vector: Vec = Vec::new(); +for i in 0..5 { + vector.push(i); +} +assert(vector.len() == 5); +``` + +## Methods + +### new + +Creates a new, empty vector. + +```rust +pub fn new() -> Self +``` + +Example: + +```rust +let empty_vector: Vec = Vec::new(); +assert(empty_vector.len() == 0); +``` + +### from_slice + +Creates a vector containing each element from a given slice. Mutations to the resulting vector will not affect the original slice. + +```rust +pub fn from_slice(slice: [T]) -> Self +``` + +Example: + +```rust +let arr: [Field] = [1, 2, 3]; +let vector_from_slice = Vec::from_slice(arr); +assert(vector_from_slice.len() == 3); +``` + +### len + +Returns the number of elements in the vector. + +```rust +pub fn len(self) -> Field +``` + +Example: + +```rust +let empty_vector: Vec = Vec::new(); +assert(empty_vector.len() == 0); +``` + +### get + +Retrieves an element from the vector at a given index. Panics if the index points beyond the vector's end. + +```rust +pub fn get(self, index: Field) -> T +``` + +Example: + +```rust +let vector: Vec = Vec::from_slice([10, 20, 30]); +assert(vector.get(1) == 20); +``` + +### push + +Adds a new element to the vector's end, returning a new vector with a length one greater than the original unmodified vector. + +```rust +pub fn push(&mut self, elem: T) +``` + +Example: + +```rust +let mut vector: Vec = Vec::new(); +vector.push(10); +assert(vector.len() == 1); +``` + +### pop + +Removes an element from the vector's end, returning a new vector with a length one less than the original vector, along with the removed element. Panics if the vector's length is zero. + +```rust +pub fn pop(&mut self) -> T +``` + +Example: + +```rust +let mut vector = Vec::from_slice([10, 20]); +let popped_elem = vector.pop(); +assert(popped_elem == 20); +assert(vector.len() == 1); +``` + +### insert + +Inserts an element at a specified index, shifting subsequent elements to the right. + +```rust +pub fn insert(&mut self, index: Field, elem: T) +``` + +Example: + +```rust +let mut vector = Vec::from_slice([10, 30]); +vector.insert(1, 20); +assert(vector.get(1) == 20); +``` + +### remove + +Removes an element at a specified index, shifting subsequent elements to the left, and returns the removed element. + +```rust +pub fn remove(&mut self, index: Field) -> T +``` + +Example: + +```rust +let mut vector = Vec::from_slice([10, 20, 30]); +let removed_elem = vector.remove(1); +assert(removed_elem == 20); +assert(vector.len() == 2); +``` diff --git a/docs/docs/noir/standard_library/cryptographic_primitives/hashes.mdx b/docs/docs/noir/standard_library/cryptographic_primitives/hashes.mdx index 85706384eee..b9239f822e8 100644 --- a/docs/docs/noir/standard_library/cryptographic_primitives/hashes.mdx +++ b/docs/docs/noir/standard_library/cryptographic_primitives/hashes.mdx @@ -114,6 +114,19 @@ example: #include_code poseidon test_programs/execution_success/poseidon_bn254_hash/src/main.nr rust +## poseidon 2 + +Given an array of Fields, returns a new Field with the Poseidon2 Hash. Contrary to the Poseidon +function, there is only one hash and you can specify a message_size to hash only the first +`message_size` bytes of the input, + +```rust +// example for hashing the first three elements of the input +Poseidon2::hash(input, 3); +``` + +The above example for Poseidon also includes Poseidon2. + ## mimc_bn254 and mimc `mimc_bn254` is `mimc`, but with hardcoded parameters for the BN254 curve. You can use it by diff --git a/docs/docs/noir/standard_library/options.md b/docs/docs/noir/standard_library/options.md index 970c9cfbf11..a1bd4e1de5f 100644 --- a/docs/docs/noir/standard_library/options.md +++ b/docs/docs/noir/standard_library/options.md @@ -56,6 +56,10 @@ Returns the wrapped value if `self.is_some()`. Otherwise, returns the given defa Returns the wrapped value if `self.is_some()`. Otherwise, calls the given function to return a default value. +### expect + +Asserts `self.is_some()` with a provided custom message and returns the contained `Some` value. The custom message is expected to be a format string. + ### map If self is `Some(x)`, this returns `Some(f(x))`. Otherwise, this returns `None`. diff --git a/docs/docs/noir/standard_library/recursion.md b/docs/docs/noir/standard_library/recursion.md index f252150c8b5..9337499dac8 100644 --- a/docs/docs/noir/standard_library/recursion.md +++ b/docs/docs/noir/standard_library/recursion.md @@ -8,6 +8,26 @@ Noir supports recursively verifying proofs, meaning you verify the proof of a No Read [the explainer on recursion](../../explainers/explainer-recursion.md) to know more about this function and the [guide on how to use it.](../../how_to/how-to-recursion.md) +## The `#[recursive]` Attribute + +In Noir, the `#[recursive]` attribute is used to indicate that a circuit is designed for recursive proof generation. When applied, it informs the compiler and the tooling that the circuit should be compiled in a way that makes its proofs suitable for recursive verification. This attribute eliminates the need for manual flagging of recursion at the tooling level, streamlining the proof generation process for recursive circuits. + +### Example usage with `#[recursive]` + +```rust +#[recursive] +fn main(x: Field, y: pub Field) { + assert(x == y, "x and y are not equal"); +} + +// This marks the circuit as recursion-friendly and indicates that proofs generated from this circuit +// are intended for recursive verification. +``` + +By incorporating this attribute directly in the circuit's definition, tooling like Nargo and NoirJS can automatically execute recursive-specific duties for Noir programs (e.g. recursive-friendly proof artifact generation) without additional flags or configurations. + +## Verifying Recursive Proofs + ```rust #[foreign(verify_proof)] fn verify_proof(_verification_key : [Field], _proof : [Field], _public_input : Field, _key_hash : Field) {} diff --git a/docs/docs/reference/nargo_commands.md b/docs/docs/reference/nargo_commands.md deleted file mode 100644 index fc2671b2bfc..00000000000 --- a/docs/docs/reference/nargo_commands.md +++ /dev/null @@ -1,253 +0,0 @@ ---- -title: Nargo -description: - Noir CLI Commands for Noir Prover and Verifier to create, execute, prove and verify programs, - generate Solidity verifier smart contract and compile into JSON file containing ACIR - representation and ABI of circuit. -keywords: - [ - Nargo, - Noir CLI, - Noir Prover, - Noir Verifier, - generate Solidity verifier, - compile JSON file, - ACIR representation, - ABI of circuit, - TypeScript, - ] -sidebar_position: 0 ---- - -## General options - -| Option | Description | -| -------------------- | -------------------------------------------------- | -| `--show-ssa` | Emit debug information for the intermediate SSA IR | -| `--deny-warnings` | Quit execution when warnings are emitted | -| `--silence-warnings` | Suppress warnings | -| `-h, --help` | Print help | - -## `nargo help [subcommand]` - -Prints the list of available commands or specific information of a subcommand. - -_Arguments_ - -| Argument | Description | -| -------------- | -------------------------------------------- | -| `` | The subcommand whose help message to display | - -## `nargo backend` - -Installs and selects custom backends used to generate and verify proofs. - -### Commands - -| Command | Description | -| ----------- | --------------------------------------------------------- | -| `current` | Prints the name of the currently active backend | -| `ls` | Prints the list of currently installed backends | -| `use` | Select the backend to use | -| `install` | Install a new backend from a URL | -| `uninstall` | Uninstalls a backend | -| `help` | Print this message or the help of the given subcommand(s) | - -### Options - -| Option | Description | -| ------------ | ----------- | -| `-h, --help` | Print help | - -## `nargo check` - -Generate the `Prover.toml` and `Verifier.toml` files for specifying prover and verifier in/output -values of the Noir program respectively. - -### Options - -| Option | Description | -| --------------------- | ------------------------------------- | -| `--package ` | The name of the package to check | -| `--workspace` | Check all packages in the workspace | -| `--print-acir` | Display the ACIR for compiled circuit | -| `--deny-warnings` | Treat all warnings as errors | -| `--silence-warnings` | Suppress warnings | -| `-h, --help` | Print help | - -### `nargo codegen-verifier` - -Generate a Solidity verifier smart contract for the program. - -### Options - -| Option | Description | -| --------------------- | ------------------------------------- | -| `--package ` | The name of the package to codegen | -| `--workspace` | Codegen all packages in the workspace | -| `--print-acir` | Display the ACIR for compiled circuit | -| `--deny-warnings` | Treat all warnings as errors | -| `--silence-warnings` | Suppress warnings | -| `-h, --help` | Print help | - -## `nargo compile` - -Compile the program into a JSON build artifact file containing the ACIR representation and the ABI -of the circuit. This build artifact can then be used to generate and verify proofs. - -You can also use "build" as an alias for compile (e.g. `nargo build`). - -### Options - -| Option | Description | -| --------------------- | ------------------------------------------------------------ | -| `--package ` | The name of the package to compile | -| `--workspace` | Compile all packages in the workspace | -| `--print-acir` | Display the ACIR for compiled circuit | -| `--deny-warnings` | Treat all warnings as errors | -| `--silence-warnings` | Suppress warnings | -| `-h, --help` | Print help | - -## `nargo new ` - -Creates a new Noir project in a new folder. - -**Arguments** - -| Argument | Description | -| -------- | -------------------------------- | -| `` | The path to save the new project | - -### Options - -| Option | Description | -| --------------- | ----------------------------------------------------- | -| `--name ` | Name of the package [default: package directory name] | -| `--lib` | Use a library template | -| `--bin` | Use a binary template [default] | -| `--contract` | Use a contract template | -| `-h, --help` | Print help | - -## `nargo init` - -Creates a new Noir project in the current directory. - -### Options - -| Option | Description | -| --------------- | ----------------------------------------------------- | -| `--name ` | Name of the package [default: current directory name] | -| `--lib` | Use a library template | -| `--bin` | Use a binary template [default] | -| `--contract` | Use a contract template | -| `-h, --help` | Print help | - -## `nargo execute [WITNESS_NAME]` - -Runs the Noir program and prints its return value. - -**Arguments** - -| Argument | Description | -| ---------------- | ----------------------------------------- | -| `[WITNESS_NAME]` | Write the execution witness to named file | - -### Options - -| Option | Description | -| --------------------------------- | ------------------------------------------------------------------------------------ | -| `-p, --prover-name ` | The name of the toml file which contains the inputs for the prover [default: Prover] | -| `--package ` | The name of the package to execute | -| `--workspace` | Execute all packages in the workspace | -| `--print-acir` | Display the ACIR for compiled circuit | -| `--deny-warnings` | Treat all warnings as errors | -| `--silence-warnings` | Suppress warnings | -| `--oracle-resolver` | JSON RPC url to solve oracle calls | -| `-h, --help` | Print help | - -_Usage_ - -The inputs to the circuit are read from the `Prover.toml` file generated by `nargo check`, which -must be filled in. - -To save the witness to file, run the command with a value for the `WITNESS_NAME` argument. A -`.tr` file will then be saved in the `./target` folder. - -## `nargo prove` - -Creates a proof for the program. - -### Options - -| Option | Description | -| ------------------------------------- | ---------------------------------------------------------------------------------------- | -| `-p, --prover-name ` | The name of the toml file which contains the inputs for the prover [default: Prover] | -| `-v, --verifier-name ` | The name of the toml file which contains the inputs for the verifier [default: Verifier] | -| `--verify` | Verify proof after proving | -| `--package ` | The name of the package to prove | -| `--workspace` | Prove all packages in the workspace | -| `--print-acir` | Display the ACIR for compiled circuit | -| `--deny-warnings` | Treat all warnings as errors | -| `--silence-warnings` | Suppress warnings | -| `--oracle-resolver` | JSON RPC url to solve oracle calls | -| `-h, --help` | Print help | - -## `nargo verify` - -Given a proof and a program, verify whether the proof is valid. - -### Options - -| Option | Description | -| ------------------------------------- | ---------------------------------------------------------------------------------------- | -| `-v, --verifier-name ` | The name of the toml file which contains the inputs for the verifier [default: Verifier] | -| `--package ` | The name of the package to verify | -| `--workspace` | Verify all packages in the workspace | -| `--print-acir` | Display the ACIR for compiled circuit | -| `--deny-warnings` | Treat all warnings as errors | -| `--silence-warnings` | Suppress warnings | -| `-h, --help` | Print help | - -## `nargo test [TEST_NAME]` - -Nargo will automatically compile and run any functions which have the decorator `#[test]` on them if -you run `nargo test`. To print `println` statements in tests, use the `--show-output` flag. - -Takes an optional `--exact` flag which allows you to select tests based on an exact name. - -See an example on the [testing page](../getting_started/tooling/testing.md). - -### Options - -| Option | Description | -| --------------------- | -------------------------------------- | -| `--show-output` | Display output of `println` statements | -| `--exact` | Only run tests that match exactly | -| `--package ` | The name of the package to test | -| `--workspace` | Test all packages in the workspace | -| `--print-acir` | Display the ACIR for compiled circuit | -| `--deny-warnings` | Treat all warnings as errors | -| `--silence-warnings` | Suppress warnings | -| `--oracle-resolver` | JSON RPC url to solve oracle calls | -| `-h, --help` | Print help | - -## `nargo info` - -Prints a table containing the information of the package. - -Currently the table provide - -1. The number of ACIR opcodes -2. The final number gates in the circuit used by a backend - -If the file contains a contract the table will provide the -above information about each function of the contract. - -## `nargo lsp` - -Start a long-running Language Server process that communicates over stdin/stdout. -Usually this command is not run by a user, but instead will be run by a Language Client, such as [vscode-noir](https://github.com/noir-lang/vscode-noir). - -## `nargo fmt` - -Automatically formats your Noir source code based on the default formatting settings. diff --git a/docs/docs/tutorials/noirjs_app.md b/docs/docs/tutorials/noirjs_app.md index 23534795dde..ad76dd255cc 100644 --- a/docs/docs/tutorials/noirjs_app.md +++ b/docs/docs/tutorials/noirjs_app.md @@ -101,7 +101,7 @@ At this point in the tutorial, your folder structure should look like this: `npx create vite` is amazing but it creates a bunch of files we don't really need for our simple example. Actually, let's just delete everything except for `index.html`, `main.js` and `package.json`. I feel lighter already. -![my heart is ready for you, noir.js](../../static/img/memes/titanic.jpeg) +![my heart is ready for you, noir.js](@site/static/img/memes/titanic.jpeg) ## HTML @@ -270,7 +270,7 @@ if (verification) display('logs', 'Verifying proof... ✅'); You have successfully generated a client-side Noir web app! -![coded app without math knowledge](../../static/img/memes/flextape.jpeg) +![coded app without math knowledge](@site/static/img/memes/flextape.jpeg) ## Further Reading diff --git a/docs/docusaurus.config.ts b/docs/docusaurus.config.ts index d1d344ba635..49566c5c380 100644 --- a/docs/docusaurus.config.ts +++ b/docs/docusaurus.config.ts @@ -26,7 +26,7 @@ export default { '@docusaurus/preset-classic', { docs: { - path: "processed-docs", + path: 'processed-docs', sidebarPath: './sidebars.js', routeBasePath: '/docs', remarkPlugins: [math], @@ -38,7 +38,7 @@ export default { }, }, editUrl: ({ versionDocsDirPath, docPath }) => - `https://github.com/noir-lang/noir/edit/master/docs/${versionDocsDirPath}/${docPath}`, + `https://github.com/noir-lang/noir/edit/master/docs/${versionDocsDirPath.replace('processed-docs', 'docs')}/${docPath}`, }, blog: false, theme: { @@ -133,7 +133,7 @@ export default { // Public API key: it is safe to commit it apiKey: 'b9b94d2f1c58f7d509f0bc1f13b381fb', - + contextualSearch: true, indexName: 'noir-lang', }, }, @@ -210,6 +210,37 @@ export default { membersWithOwnFile: ['Interface', 'Class', 'TypeAlias'], }, ], + [ + 'docusaurus-plugin-typedoc', + { + id: 'noir_wasm', + entryPoints: ['../compiler/wasm/src/index.cts'], + tsconfig: '../compiler/wasm/tsconfig.json', + entryPointStrategy: 'resolve', + out: 'processed-docs/reference/NoirJS/noir_wasm', + plugin: ['typedoc-plugin-markdown'], + name: 'noir_wasm', + disableSources: true, + excludePrivate: true, + skipErrorChecking: true, + sidebar: { + filteredIds: ['reference/noir_wasm/index'], + }, + readme: 'none', + hidePageHeader: true, + hideBreadcrumbs: true, + hideInPageTOC: true, + useCodeBlocks: true, + typeDeclarationFormat: 'table', + propertiesFormat: 'table', + parametersFormat: 'table', + enumMembersFormat: 'table', + indexFormat: 'table', + outputFileStrategy: 'members', + memberPageTitle: '{name}', + membersWithOwnFile: ['Function', 'TypeAlias'], + }, + ], ], markdown: { format: 'detect', diff --git a/docs/package.json b/docs/package.json index 71b624ff565..146c2a9800c 100644 --- a/docs/package.json +++ b/docs/package.json @@ -3,7 +3,7 @@ "version": "0.0.0", "private": true, "scripts": { - "preprocess": "yarn node ./scripts/preprocess/index.js", + "preprocess": "./scripts/codegen_nargo_reference.sh && yarn node ./scripts/preprocess/index.js", "start": "yarn preprocess && docusaurus start", "build": "yarn preprocess && yarn version::stables && docusaurus build", "version::stables": "ts-node ./scripts/setStable.ts", @@ -18,7 +18,6 @@ "@noir-lang/noir_js": "workspace:*", "@noir-lang/noirc_abi": "workspace:*", "@noir-lang/types": "workspace:*", - "@signorecello/noir_playground": "^0.7.0", "axios": "^1.4.0", "clsx": "^1.2.1", "hast-util-is-element": "^1.1.0", diff --git a/docs/scripts/codegen_nargo_reference.sh b/docs/scripts/codegen_nargo_reference.sh new file mode 100755 index 00000000000..4ff7d43d142 --- /dev/null +++ b/docs/scripts/codegen_nargo_reference.sh @@ -0,0 +1,33 @@ +#!/usr/bin/env bash +set -eu + +cd $(dirname "$0")/.. + +REFERENCE_DIR="./processed-docs/reference" +NARGO_REFERENCE="$REFERENCE_DIR/nargo_commands.md" +rm -f $NARGO_REFERENCE +mkdir -p $REFERENCE_DIR + +echo "--- +title: Nargo +description: + Noir CLI Commands for Noir Prover and Verifier to create, execute, prove and verify programs, + generate Solidity verifier smart contract and compile into JSON file containing ACIR + representation and ABI of circuit. +keywords: + [ + Nargo, + Noir CLI, + Noir Prover, + Noir Verifier, + generate Solidity verifier, + compile JSON file, + ACIR representation, + ABI of circuit, + TypeScript, + ] +sidebar_position: 0 +--- +" > $NARGO_REFERENCE + +cargo run -F codegen-docs -- info >> $NARGO_REFERENCE diff --git a/docs/src/css/custom.css b/docs/src/css/custom.css index 5a526ec5bfd..b08766fbc3b 100644 --- a/docs/src/css/custom.css +++ b/docs/src/css/custom.css @@ -209,3 +209,8 @@ html[data-theme='dark'] { border-width: 0; border-style: solid; } + +input#docsearch-input { + background-color: transparent; +} + diff --git a/docs/src/pages/index.jsx b/docs/src/pages/index.jsx index 6b52628a5ff..b372871e7b4 100644 --- a/docs/src/pages/index.jsx +++ b/docs/src/pages/index.jsx @@ -5,19 +5,7 @@ import Link from '@docusaurus/Link'; import headerPic from '@site/static/img/homepage_header_pic.png'; import { BeatLoader } from 'react-spinners'; -const NoirEditor = lazy(() => import('@signorecello/noir_playground')); - -const Spinner = () => { - return ( -

- ); -}; - export default function Landing() { - const [tryIt, setTryIt] = React.useState(false); - return (
@@ -41,65 +29,46 @@ export default function Landing() { compatible proving system. Its design choices are influenced heavily by Rust and focuses on a simple, familiar syntax.

- {!tryIt && ( -
-
- - - - - - -
-
- )} - {tryIt && ( - }> + +
+
- - - )} +
+
- {!tryIt && ( -
-
-

Learn

- - - - - - -
-
-

Coming from...

- - - - - - -
-
-

New to Everything

- - - - - - -
+
+
+

Learn

+ + + + + + +
+
+

Coming from...

+ + + + + + +
+
+

New to Everything

+ + + + + +
- )} +
diff --git a/docs/versioned_docs/version-v0.19.0/language_concepts/data_types/01_integers.md b/docs/versioned_docs/version-v0.19.0/language_concepts/data_types/01_integers.md index b1e7ad11bfd..1814365800a 100644 --- a/docs/versioned_docs/version-v0.19.0/language_concepts/data_types/01_integers.md +++ b/docs/versioned_docs/version-v0.19.0/language_concepts/data_types/01_integers.md @@ -107,6 +107,6 @@ Example of how it is used: use dep::std; fn main(x: u8, y: u8) -> pub u8 { - std::wrapping_add(x + y) + std::wrapping_add(x, y) } ``` diff --git a/docs/versioned_docs/version-v0.19.1/language_concepts/data_types/01_integers.md b/docs/versioned_docs/version-v0.19.1/language_concepts/data_types/01_integers.md index b1e7ad11bfd..1814365800a 100644 --- a/docs/versioned_docs/version-v0.19.1/language_concepts/data_types/01_integers.md +++ b/docs/versioned_docs/version-v0.19.1/language_concepts/data_types/01_integers.md @@ -107,6 +107,6 @@ Example of how it is used: use dep::std; fn main(x: u8, y: u8) -> pub u8 { - std::wrapping_add(x + y) + std::wrapping_add(x, y) } ``` diff --git a/docs/versioned_docs/version-v0.19.2/language_concepts/data_types/01_integers.md b/docs/versioned_docs/version-v0.19.2/language_concepts/data_types/01_integers.md index b1e7ad11bfd..1814365800a 100644 --- a/docs/versioned_docs/version-v0.19.2/language_concepts/data_types/01_integers.md +++ b/docs/versioned_docs/version-v0.19.2/language_concepts/data_types/01_integers.md @@ -107,6 +107,6 @@ Example of how it is used: use dep::std; fn main(x: u8, y: u8) -> pub u8 { - std::wrapping_add(x + y) + std::wrapping_add(x, y) } ``` diff --git a/docs/versioned_docs/version-v0.19.3/language_concepts/data_types/01_integers.md b/docs/versioned_docs/version-v0.19.3/language_concepts/data_types/01_integers.md index b1e7ad11bfd..1814365800a 100644 --- a/docs/versioned_docs/version-v0.19.3/language_concepts/data_types/01_integers.md +++ b/docs/versioned_docs/version-v0.19.3/language_concepts/data_types/01_integers.md @@ -107,6 +107,6 @@ Example of how it is used: use dep::std; fn main(x: u8, y: u8) -> pub u8 { - std::wrapping_add(x + y) + std::wrapping_add(x, y) } ``` diff --git a/docs/versioned_docs/version-v0.22.0/how_to/solidity_verifier.md b/docs/versioned_docs/version-v0.22.0/how_to/solidity_verifier.md index 8022b0e5f20..6aaad542ee0 100644 --- a/docs/versioned_docs/version-v0.22.0/how_to/solidity_verifier.md +++ b/docs/versioned_docs/version-v0.22.0/how_to/solidity_verifier.md @@ -120,6 +120,7 @@ You can currently deploy the Solidity verifier contracts to most EVM compatible - Polygon PoS - Scroll - Celo +- Taiko Other EVM chains should work, but have not been tested directly by our team. If you test any other chains, please open a PR on this page to update the list. See [this doc](https://github.com/noir-lang/noir-starter/tree/main/with-foundry#testing-on-chain) for more info about testing verifier contracts on different EVM chains. diff --git a/docs/versioned_docs/version-v0.22.0/migration_notes.md b/docs/versioned_docs/version-v0.22.0/migration_notes.md index 184ca283539..9c1809fd609 100644 --- a/docs/versioned_docs/version-v0.22.0/migration_notes.md +++ b/docs/versioned_docs/version-v0.22.0/migration_notes.md @@ -6,6 +6,20 @@ keywords: [Noir, notes, migration, updating, upgrading] Noir is in full-speed development. Things break fast, wild, and often. This page attempts to leave some notes on errors you might encounter when upgrading and how to resolve them until proper patches are built. +### `backend encountered an error: libc++.so.1` + +Depending on your OS, you may encounter the following error when running `nargo prove` for the first time: + +```text +The backend encountered an error: "/home/codespace/.nargo/backends/acvm-backend-barretenberg/backend_binary: error while loading shared libraries: libc++.so.1: cannot open shared object file: No such file or directory\n" +``` + +Install the `libc++-dev` library with: + +```bash +sudo apt install libc++-dev +``` + ## ≥0.19 ### Enforcing `compiler_version` diff --git a/docs/versioned_docs/version-v0.22.0/noir/syntax/data_types/integers.md b/docs/versioned_docs/version-v0.22.0/noir/syntax/data_types/integers.md index 7d1e83cf4e9..baf5f4742c4 100644 --- a/docs/versioned_docs/version-v0.22.0/noir/syntax/data_types/integers.md +++ b/docs/versioned_docs/version-v0.22.0/noir/syntax/data_types/integers.md @@ -108,6 +108,6 @@ Example of how it is used: use dep::std; fn main(x: u8, y: u8) -> pub u8 { - std::wrapping_add(x + y) + std::wrapping_add(x, y) } ``` diff --git a/docs/versioned_docs/version-v0.23.0/getting_started/installation/other_install_methods.md b/docs/versioned_docs/version-v0.23.0/getting_started/installation/other_install_methods.md index a532f83750e..746633b628d 100644 --- a/docs/versioned_docs/version-v0.23.0/getting_started/installation/other_install_methods.md +++ b/docs/versioned_docs/version-v0.23.0/getting_started/installation/other_install_methods.md @@ -48,7 +48,7 @@ Paste and run the following in the terminal to extract and install the binary: ```bash mkdir -p $HOME/.nargo/bin && \ -curl -o $HOME/.nargo/bin/nargo-aarch64-apple-darwin.tar.gz -L https://github.com/noir-lang/noir/releases/download/v0.6.0/nargo-aarch64-apple-darwin.tar.gz && \ +curl -o $HOME/.nargo/bin/nargo-aarch64-apple-darwin.tar.gz -L https://github.com/noir-lang/noir/releases/download/v0.23.0/nargo-aarch64-apple-darwin.tar.gz && \ tar -xvf $HOME/.nargo/bin/nargo-aarch64-apple-darwin.tar.gz -C $HOME/.nargo/bin/ && \ echo '\nexport PATH=$PATH:$HOME/.nargo/bin' >> ~/.zshrc && \ source ~/.zshrc @@ -58,7 +58,7 @@ source ~/.zshrc ```bash mkdir -p $HOME/.nargo/bin && \ -curl -o $HOME/.nargo/bin/nargo-x86_64-apple-darwin.tar.gz -L https://github.com/noir-lang/noir/releases/download/v0.6.0/nargo-x86_64-apple-darwin.tar.gz && \ +curl -o $HOME/.nargo/bin/nargo-x86_64-apple-darwin.tar.gz -L https://github.com/noir-lang/noir/releases/download/v0.23.0/nargo-x86_64-apple-darwin.tar.gz && \ tar -xvf $HOME/.nargo/bin/nargo-x86_64-apple-darwin.tar.gz -C $HOME/.nargo/bin/ && \ echo '\nexport PATH=$PATH:$HOME/.nargo/bin' >> ~/.zshrc && \ source ~/.zshrc @@ -68,7 +68,7 @@ source ~/.zshrc ```bash mkdir -p $HOME/.nargo/bin && \ -curl -o $HOME/.nargo/bin/nargo-x86_64-unknown-linux-gnu.tar.gz -L https://github.com/noir-lang/noir/releases/download/v0.6.0/nargo-x86_64-unknown-linux-gnu.tar.gz && \ +curl -o $HOME/.nargo/bin/nargo-x86_64-unknown-linux-gnu.tar.gz -L https://github.com/noir-lang/noir/releases/download/v0.23.0/nargo-x86_64-unknown-linux-gnu.tar.gz && \ tar -xvf $HOME/.nargo/bin/nargo-x86_64-unknown-linux-gnu.tar.gz -C $HOME/.nargo/bin/ && \ echo -e '\nexport PATH=$PATH:$HOME/.nargo/bin' >> ~/.bashrc && \ source ~/.bashrc diff --git a/docs/versioned_docs/version-v0.23.0/index.mdx b/docs/versioned_docs/version-v0.23.0/index.mdx index 2cec2397051..75086ddcdde 100644 --- a/docs/versioned_docs/version-v0.23.0/index.mdx +++ b/docs/versioned_docs/version-v0.23.0/index.mdx @@ -34,7 +34,7 @@ Noir works differently from most ZK languages by taking a two-pronged path. Firs :::info -Noir is backend agnostic, which means it makes no assumptions on which proving backend powers the ZK proof. Being the language that powers [Aztec Contracts](https://docs.aztec.network/dev_docs/contracts/main), it defaults to Aztec's Barretenberg proving backend. +Noir is backend agnostic, which means it makes no assumptions on which proving backend powers the ZK proof. Being the language that powers [Aztec Contracts](https://docs.aztec.network/developers/contracts/main), it defaults to Aztec's Barretenberg proving backend. However, the ACIR output can be transformed to be compatible with other PLONK-based backends, or into a [rank-1 constraint system](https://www.rareskills.io/post/rank-1-constraint-system) suitable for backends such as Arkwork's Marlin. @@ -48,7 +48,7 @@ Noir can be used both in complex cloud-based backends and in user's smartphones, Noir Logo - Aztec Contracts leverage Noir to allow for the storage and execution of private information. Writing an Aztec Contract is as easy as writing Noir, and Aztec developers can easily interact with the network storage and execution through the [Aztec.nr](https://docs.aztec.network/dev_docs/contracts/main) library. + Aztec Contracts leverage Noir to allow for the storage and execution of private information. Writing an Aztec Contract is as easy as writing Noir, and Aztec developers can easily interact with the network storage and execution through the [Aztec.nr](https://docs.aztec.network/developers/contracts/main) library. Soliditry Verifier Example diff --git a/docs/versioned_docs/version-v0.23.0/migration_notes.md b/docs/versioned_docs/version-v0.23.0/migration_notes.md index 9f27230a1a0..68acd5e566e 100644 --- a/docs/versioned_docs/version-v0.23.0/migration_notes.md +++ b/docs/versioned_docs/version-v0.23.0/migration_notes.md @@ -6,6 +6,22 @@ keywords: [Noir, notes, migration, updating, upgrading] Noir is in full-speed development. Things break fast, wild, and often. This page attempts to leave some notes on errors you might encounter when upgrading and how to resolve them until proper patches are built. +## 0.22.0 + +### `backend encountered an error: libc++.so.1` + +Depending on your OS, you may encounter the following error when running `nargo prove` for the first time: + +```text +The backend encountered an error: "/home/codespace/.nargo/backends/acvm-backend-barretenberg/backend_binary: error while loading shared libraries: libc++.so.1: cannot open shared object file: No such file or directory\n" +``` + +Install the `libc++-dev` library with: + +```bash +sudo apt install libc++-dev +``` + ## ≥0.19 ### Enforcing `compiler_version` diff --git a/docs/versioned_docs/version-v0.23.0/noir/concepts/data_types/fields.md b/docs/versioned_docs/version-v0.23.0/noir/concepts/data_types/fields.md index a1c67945d66..99b4aa63549 100644 --- a/docs/versioned_docs/version-v0.23.0/noir/concepts/data_types/fields.md +++ b/docs/versioned_docs/version-v0.23.0/noir/concepts/data_types/fields.md @@ -157,6 +157,23 @@ fn main() { } ``` +### assert_max_bit_size + +Adds a constraint to specify that the field can be represented with `bit_size` number of bits + +```rust +fn assert_max_bit_size(self, bit_size: u32) +``` + +example: + +```rust +fn main() { + let field = 2 + field.assert_max_bit_size(32); +} +``` + ### sgn0 Parity of (prime) Field element, i.e. sgn0(x mod p) = 0 if x ∈ \{0, ..., p-1\} is even, otherwise sgn0(x mod p) = 1. @@ -164,3 +181,12 @@ Parity of (prime) Field element, i.e. sgn0(x mod p) = 0 if x ∈ \{0, ..., p-1\} ```rust fn sgn0(self) -> u1 ``` + + +### lt + +Returns true if the field is less than the other field + +```rust +pub fn lt(self, another: Field) -> bool +``` diff --git a/docs/versioned_docs/version-v0.23.0/noir/concepts/data_types/integers.md b/docs/versioned_docs/version-v0.23.0/noir/concepts/data_types/integers.md index 7d1e83cf4e9..30135d76e4a 100644 --- a/docs/versioned_docs/version-v0.23.0/noir/concepts/data_types/integers.md +++ b/docs/versioned_docs/version-v0.23.0/noir/concepts/data_types/integers.md @@ -51,6 +51,55 @@ If you are using the default proving backend with Noir, both even (e.g. _u2_, _i ::: + +## 128 bits Unsigned Integers + +The built-in structure `U128` allows you to use 128-bit unsigned integers almost like a native integer type. However, there are some differences to keep in mind: +- You cannot cast between a native integer and `U128` +- There is a higher performance cost when using `U128`, compared to a native type. + +Conversion between unsigned integer types and U128 are done through the use of `from_integer` and `to_integer` functions. + +```rust +fn main() { + let x = U128::from_integer(23); + let y = U128::from_hex("0x7"); + let z = x + y; + assert(z.to_integer() == 30); +} +``` + +`U128` is implemented with two 64 bits limbs, representing the low and high bits, which explains the performance cost. You should expect `U128` to be twice more costly for addition and four times more costly for multiplication. +You can construct a U128 from its limbs: +```rust +fn main(x: u64, y: u64) { + let x = U128::from_u64s_be(x,y); + assert(z.hi == x as Field); + assert(z.lo == y as Field); +} +``` + +Note that the limbs are stored as Field elements in order to avoid unnecessary conversions. +Apart from this, most operations will work as usual: + +```rust +fn main(x: U128, y: U128) { + // multiplication + let c = x * y; + // addition and subtraction + let c = c - x + y; + // division + let c = x / y; + // bit operation; + let c = x & y | y; + // bit shift + let c = x << y; + // comparisons; + let c = x < y; + let c = x == y; +} +``` + ## Overflows Computations that exceed the type boundaries will result in overflow errors. This happens with both signed and unsigned integers. For example, attempting to prove: @@ -108,6 +157,6 @@ Example of how it is used: use dep::std; fn main(x: u8, y: u8) -> pub u8 { - std::wrapping_add(x + y) + std::wrapping_add(x, y) } ``` diff --git a/docs/versioned_docs/version-v0.23.0/noir/standard_library/black_box_fns.md b/docs/versioned_docs/version-v0.23.0/noir/standard_library/black_box_fns.md index 4b1efbd17de..eae8744abf0 100644 --- a/docs/versioned_docs/version-v0.23.0/noir/standard_library/black_box_fns.md +++ b/docs/versioned_docs/version-v0.23.0/noir/standard_library/black_box_fns.md @@ -6,40 +6,26 @@ keywords: [noir, black box functions] Black box functions are functions in Noir that rely on backends implementing support for specialized constraints. This makes certain zk-snark unfriendly computations cheaper than if they were implemented in Noir. -:::warning - -It is likely that not all backends will support a particular black box function. - -::: - -Because it is not guaranteed that all backends will support black box functions, it is possible that certain Noir programs won't compile against a particular backend if they use an unsupported black box function. It is possible to fallback to less efficient implementations written in Noir/ACIR in some cases. - -Black box functions are specified with the `#[foreign(black_box_fn)]` attribute. For example, the SHA256 function in the Noir [source code](https://github.com/noir-lang/noir/blob/v0.5.1/noir_stdlib/src/hash.nr) looks like: - -```rust -#[foreign(sha256)] -fn sha256(_input : [u8; N]) -> [u8; 32] {} -``` +The ACVM spec defines a set of blackbox functions which backends will be expected to implement. This allows backends to use optimized implementations of these constraints if they have them, however they may also fallback to less efficient naive implementations if not. ## Function list -Here is a list of the current black box functions that are supported by UltraPlonk: +Here is a list of the current black box functions: -- AES - [SHA256](./cryptographic_primitives/hashes#sha256) - [Schnorr signature verification](./cryptographic_primitives/schnorr) - [Blake2s](./cryptographic_primitives/hashes#blake2s) +- [Blake3](./cryptographic_primitives/hashes#blake3) - [Pedersen Hash](./cryptographic_primitives/hashes#pedersen_hash) - [Pedersen Commitment](./cryptographic_primitives/hashes#pedersen_commitment) - [ECDSA signature verification](./cryptographic_primitives/ecdsa_sig_verification) - [Fixed base scalar multiplication](./cryptographic_primitives/scalar) -- [Compute merkle root](./merkle_trees#compute_merkle_root) - AND - XOR - RANGE - [Keccak256](./cryptographic_primitives/hashes#keccak256) - [Recursive proof verification](./recursion) -Most black box functions are included as part of the Noir standard library, however `AND`, `XOR` and `RANGE` are used as part of the Noir language syntax. For instance, using the bitwise operator `&` will invoke the `AND` black box function. To ensure compatibility across backends, the ACVM has fallback implementations of `AND`, `XOR` and `RANGE` defined in its standard library which it can seamlessly fallback to if the backend doesn't support them. +Most black box functions are included as part of the Noir standard library, however `AND`, `XOR` and `RANGE` are used as part of the Noir language syntax. For instance, using the bitwise operator `&` will invoke the `AND` black box function. You can view the black box functions defined in the ACVM code [here](https://github.com/noir-lang/noir/blob/master/acvm-repo/acir/src/circuit/black_box_functions.rs). diff --git a/docs/versioned_docs/version-v0.23.0/noir/standard_library/bn254.md b/docs/versioned_docs/version-v0.23.0/noir/standard_library/bn254.md new file mode 100644 index 00000000000..3294f005dbb --- /dev/null +++ b/docs/versioned_docs/version-v0.23.0/noir/standard_library/bn254.md @@ -0,0 +1,46 @@ +--- +title: Bn254 Field Library +--- + +Noir provides a module in standard library with some optimized functions for bn254 Fr in `std::field::bn254`. + +## decompose + +```rust +fn decompose(x: Field) -> (Field, Field) {} +``` + +Decomposes a single field into two fields, low and high. The low field contains the lower 16 bytes of the input field and the high field contains the upper 16 bytes of the input field. Both field results are range checked to 128 bits. + + +## assert_gt + +```rust +fn assert_gt(a: Field, b: Field) {} +``` + +Asserts that a > b. This will generate less constraints than using `assert(gt(a, b))`. + +## assert_lt + +```rust +fn assert_lt(a: Field, b: Field) {} +``` + +Asserts that a < b. This will generate less constraints than using `assert(lt(a, b))`. + +## gt + +```rust +fn gt(a: Field, b: Field) -> bool {} +``` + +Returns true if a > b. + +## lt + +```rust +fn lt(a: Field, b: Field) -> bool {} +``` + +Returns true if a < b. \ No newline at end of file diff --git a/docs/versioned_docs/version-v0.23.0/noir/standard_library/cryptographic_primitives/ecdsa_sig_verification.mdx b/docs/versioned_docs/version-v0.23.0/noir/standard_library/cryptographic_primitives/ecdsa_sig_verification.mdx index 1376c51dfde..4bf09cef178 100644 --- a/docs/versioned_docs/version-v0.23.0/noir/standard_library/cryptographic_primitives/ecdsa_sig_verification.mdx +++ b/docs/versioned_docs/version-v0.23.0/noir/standard_library/cryptographic_primitives/ecdsa_sig_verification.mdx @@ -13,9 +13,16 @@ Noir supports ECDSA signatures verification over the secp256k1 and secp256r1 cur Verifier for ECDSA Secp256k1 signatures -```rust -fn verify_signature(_public_key_x : [u8; 32], _public_key_y : [u8; 32], _signature: [u8; 64], _message: [u8]) -> bool +```rust title="ecdsa_secp256k1" showLineNumbers +pub fn verify_signature( + public_key_x: [u8; 32], + public_key_y: [u8; 32], + signature: [u8; 64], + message_hash: [u8; N] +) -> bool ``` +>
Source code: noir_stdlib/src/ecdsa_secp256k1.nr#L2-L9 + example: @@ -30,9 +37,16 @@ fn main(hashed_message : [u8;32], pub_key_x : [u8;32], pub_key_y : [u8;32], sign Verifier for ECDSA Secp256r1 signatures -```rust -fn verify_signature(_public_key_x : [u8; 32], _public_key_y : [u8; 32], _signature: [u8; 64], _message: [u8]) -> bool +```rust title="ecdsa_secp256r1" showLineNumbers +pub fn verify_signature( + public_key_x: [u8; 32], + public_key_y: [u8; 32], + signature: [u8; 64], + message_hash: [u8; N] +) -> bool ``` +> Source code: noir_stdlib/src/ecdsa_secp256r1.nr#L2-L9 + example: diff --git a/docs/versioned_docs/version-v0.23.0/noir/standard_library/cryptographic_primitives/hashes.mdx b/docs/versioned_docs/version-v0.23.0/noir/standard_library/cryptographic_primitives/hashes.mdx index 3c5f7f79603..730b6d4117f 100644 --- a/docs/versioned_docs/version-v0.23.0/noir/standard_library/cryptographic_primitives/hashes.mdx +++ b/docs/versioned_docs/version-v0.23.0/noir/standard_library/cryptographic_primitives/hashes.mdx @@ -14,9 +14,11 @@ import BlackBoxInfo from '@site/src/components/Notes/_blackbox.mdx'; Given an array of bytes, returns the resulting sha256 hash. -```rust -fn sha256(_input : [u8]) -> [u8; 32] +```rust title="sha256" showLineNumbers +pub fn sha256(input: [u8; N]) -> [u8; 32] ``` +> Source code: noir_stdlib/src/hash.nr#L5-L7 + example: @@ -33,9 +35,11 @@ fn main() { Given an array of bytes, returns an array with the Blake2 hash -```rust -fn blake2s(_input : [u8]) -> [u8; 32] +```rust title="blake2s" showLineNumbers +pub fn blake2s(input: [u8; N]) -> [u8; 32] ``` +> Source code: noir_stdlib/src/hash.nr#L11-L13 + example: @@ -48,43 +52,81 @@ fn main() { -## pedersen_hash +## blake3 -Given an array of Fields, returns the Pedersen hash. +Given an array of bytes, returns an array with the Blake3 hash -```rust -fn pedersen_hash(_input : [Field]) -> Field +```rust title="blake3" showLineNumbers +pub fn blake3(input: [u8; N]) -> [u8; 32] ``` +> Source code: noir_stdlib/src/hash.nr#L17-L19 + example: ```rust fn main() { let x = [163, 117, 178, 149]; // some random bytes - let hash = std::hash::pedersen_hash(x); + let hash = std::hash::blake3(x); } ``` +## pedersen_hash + +Given an array of Fields, returns the Pedersen hash. + +```rust title="pedersen_hash" showLineNumbers +pub fn pedersen_hash(input: [Field; N]) -> Field +``` +> Source code: noir_stdlib/src/hash.nr#L42-L44 + + +example: + +```rust title="pedersen-hash" showLineNumbers +use dep::std; + +fn main(x: Field, y: Field, expected_hash: Field) { + let hash = std::hash::pedersen_hash([x, y]); + assert_eq(hash, expected_hash); +} +``` +> Source code: test_programs/execution_success/pedersen_hash/src/main.nr#L1-L8 + + + ## pedersen_commitment Given an array of Fields, returns the Pedersen commitment. -```rust -fn pedersen_commitment(_input : [Field]) -> [Field; 2] +```rust title="pedersen_commitment" showLineNumbers +struct PedersenPoint { + x : Field, + y : Field, +} + +pub fn pedersen_commitment(input: [Field; N]) -> PedersenPoint ``` +> Source code: noir_stdlib/src/hash.nr#L22-L29 + example: -```rust -fn main() { - let x = [163, 117, 178, 149]; // some random bytes - let commitment = std::hash::pedersen_commitment(x); +```rust title="pedersen-commitment" showLineNumbers +use dep::std; + +fn main(x: Field, y: Field, expected_commitment: std::hash::PedersenPoint) { + let commitment = std::hash::pedersen_commitment([x, y]); + assert_eq(commitment.x, expected_commitment.x); + assert_eq(commitment.y, expected_commitment.y); } ``` +> Source code: test_programs/execution_success/pedersen_commitment/src/main.nr#L1-L9 + @@ -94,19 +136,38 @@ Given an array of bytes (`u8`), returns the resulting keccak hash as an array of (`[u8; 32]`). Specify a message_size to hash only the first `message_size` bytes of the input. -```rust -fn keccak256(_input : [u8; N], _message_size: u32) -> [u8; 32] +```rust title="keccak256" showLineNumbers +pub fn keccak256(input: [u8; N], message_size: u32) -> [u8; 32] ``` +> Source code: noir_stdlib/src/hash.nr#L67-L69 + example: -```rust -fn main() { - let x = [163, 117, 178, 149]; // some random bytes +```rust title="keccak256" showLineNumbers +use dep::std; + +fn main(x: Field, result: [u8; 32]) { + // We use the `as` keyword here to denote the fact that we want to take just the first byte from the x Field + // The padding is taken care of by the program + let digest = std::hash::keccak256([x as u8], 1); + assert(digest == result); + + //#1399: variable message size let message_size = 4; - let hash = std::hash::keccak256(x, message_size); + let hash_a = std::hash::keccak256([1, 2, 3, 4], message_size); + let hash_b = std::hash::keccak256([1, 2, 3, 4, 0, 0, 0, 0], message_size); + + assert(hash_a == hash_b); + + let message_size_big = 8; + let hash_c = std::hash::keccak256([1, 2, 3, 4, 0, 0, 0, 0], message_size_big); + + assert(hash_a != hash_c); } ``` +> Source code: test_programs/execution_success/keccak256/src/main.nr#L1-L22 + @@ -122,13 +183,19 @@ fn hash_1(input: [Field; 1]) -> Field example: -```rust -fn main() -{ - let hash_2 = std::hash::poseidon::bn254::hash_2([1, 2]); - assert(hash2 == 0x115cc0f5e7d690413df64c6b9662e9cf2a3617f2743245519e19607a4417189a); +```rust title="poseidon" showLineNumbers +use dep::std::hash::poseidon; + +fn main(x1: [Field; 2], y1: pub Field, x2: [Field; 4], y2: pub Field) { + let hash1 = poseidon::bn254::hash_2(x1); + assert(hash1 == y1); + + let hash2 = poseidon::bn254::hash_4(x2); + assert(hash2 == y2); } ``` +> Source code: test_programs/execution_success/poseidon_bn254_hash/src/main.nr#L1-L11 + ## mimc_bn254 and mimc diff --git a/docs/versioned_docs/version-v0.23.0/noir/standard_library/cryptographic_primitives/scalar.mdx b/docs/versioned_docs/version-v0.23.0/noir/standard_library/cryptographic_primitives/scalar.mdx index aa4fb8cbaed..df411ca5443 100644 --- a/docs/versioned_docs/version-v0.23.0/noir/standard_library/cryptographic_primitives/scalar.mdx +++ b/docs/versioned_docs/version-v0.23.0/noir/standard_library/cryptographic_primitives/scalar.mdx @@ -12,9 +12,14 @@ import BlackBoxInfo from '@site/src/components/Notes/_blackbox.mdx'; Performs scalar multiplication over the embedded curve whose coordinates are defined by the configured noir field. For the BN254 scalar field, this is BabyJubJub or Grumpkin. -```rust -fn fixed_base_embedded_curve(_input : Field) -> [Field; 2] +```rust title="fixed_base_embedded_curve" showLineNumbers +pub fn fixed_base_embedded_curve( + low: Field, + high: Field +) -> [Field; 2] ``` +> Source code: noir_stdlib/src/scalar_mul.nr#L27-L32 + example diff --git a/docs/versioned_docs/version-v0.23.0/noir/standard_library/cryptographic_primitives/schnorr.mdx b/docs/versioned_docs/version-v0.23.0/noir/standard_library/cryptographic_primitives/schnorr.mdx index 7a2c9c20226..ae12e6c12dc 100644 --- a/docs/versioned_docs/version-v0.23.0/noir/standard_library/cryptographic_primitives/schnorr.mdx +++ b/docs/versioned_docs/version-v0.23.0/noir/standard_library/cryptographic_primitives/schnorr.mdx @@ -11,9 +11,16 @@ import BlackBoxInfo from '@site/src/components/Notes/_blackbox.mdx'; Verifier for Schnorr signatures over the embedded curve (for BN254 it is Grumpkin). -```rust -fn verify_signature(_public_key_x: Field, _public_key_y: Field, _signature: [u8; 64], _message: [u8]) -> bool +```rust title="schnorr_verify" showLineNumbers +pub fn verify_signature( + public_key_x: Field, + public_key_y: Field, + signature: [u8; 64], + message: [u8; N] +) -> bool ``` +> Source code: noir_stdlib/src/schnorr.nr#L2-L9 + where `_signature` can be generated like so using the npm package [@noir-lang/barretenberg](https://www.npmjs.com/package/@noir-lang/barretenberg) diff --git a/docs/versioned_docs/version-v0.23.0/reference/NoirJS/noir_wasm/.nojekyll b/docs/versioned_docs/version-v0.23.0/reference/NoirJS/noir_wasm/.nojekyll new file mode 100644 index 00000000000..e2ac6616add --- /dev/null +++ b/docs/versioned_docs/version-v0.23.0/reference/NoirJS/noir_wasm/.nojekyll @@ -0,0 +1 @@ +TypeDoc added this file to prevent GitHub Pages from using Jekyll. You can turn off this behavior by setting the `githubPages` option to false. \ No newline at end of file diff --git a/docs/versioned_docs/version-v0.23.0/reference/NoirJS/noir_wasm/functions/compile.md b/docs/versioned_docs/version-v0.23.0/reference/NoirJS/noir_wasm/functions/compile.md new file mode 100644 index 00000000000..33eb434c3db --- /dev/null +++ b/docs/versioned_docs/version-v0.23.0/reference/NoirJS/noir_wasm/functions/compile.md @@ -0,0 +1,51 @@ +# compile() + +```ts +compile( + fileManager, + projectPath?, + logFn?, +debugLogFn?): Promise +``` + +Compiles a Noir project + +## Parameters + +| Parameter | Type | Description | +| :------ | :------ | :------ | +| `fileManager` | `FileManager` | The file manager to use | +| `projectPath`? | `string` | The path to the project inside the file manager. Defaults to the root of the file manager | +| `logFn`? | `LogFn` | A logging function. If not provided, console.log will be used | +| `debugLogFn`? | `LogFn` | A debug logging function. If not provided, logFn will be used | + +## Returns + +`Promise`\<[`CompilationResult`](../type-aliases/CompilationResult.md)\> + +## Example + +```typescript +// Node.js + +import { compile, createFileManager } from '@noir-lang/noir_wasm'; + +const fm = createFileManager(myProjectPath); +const myCompiledCode = await compile(fm); +``` + +```typescript +// Browser + +import { compile, createFileManager } from '@noir-lang/noir_wasm'; + +const fm = createFileManager('/'); +for (const path of files) { + await fm.writeFile(path, await getFileAsStream(path)); +} +const myCompiledCode = await compile(fm); +``` + +*** + +Generated using [typedoc-plugin-markdown](https://www.npmjs.com/package/typedoc-plugin-markdown) and [TypeDoc](https://typedoc.org/) diff --git a/docs/versioned_docs/version-v0.23.0/reference/NoirJS/noir_wasm/functions/createFileManager.md b/docs/versioned_docs/version-v0.23.0/reference/NoirJS/noir_wasm/functions/createFileManager.md new file mode 100644 index 00000000000..7e65c1d69c7 --- /dev/null +++ b/docs/versioned_docs/version-v0.23.0/reference/NoirJS/noir_wasm/functions/createFileManager.md @@ -0,0 +1,21 @@ +# createFileManager() + +```ts +createFileManager(dataDir): FileManager +``` + +Creates a new FileManager instance based on fs in node and memfs in the browser (via webpack alias) + +## Parameters + +| Parameter | Type | Description | +| :------ | :------ | :------ | +| `dataDir` | `string` | root of the file system | + +## Returns + +`FileManager` + +*** + +Generated using [typedoc-plugin-markdown](https://www.npmjs.com/package/typedoc-plugin-markdown) and [TypeDoc](https://typedoc.org/) diff --git a/docs/versioned_docs/version-v0.23.0/reference/NoirJS/noir_wasm/functions/inflateDebugSymbols.md b/docs/versioned_docs/version-v0.23.0/reference/NoirJS/noir_wasm/functions/inflateDebugSymbols.md new file mode 100644 index 00000000000..fcea9275341 --- /dev/null +++ b/docs/versioned_docs/version-v0.23.0/reference/NoirJS/noir_wasm/functions/inflateDebugSymbols.md @@ -0,0 +1,21 @@ +# inflateDebugSymbols() + +```ts +inflateDebugSymbols(debugSymbols): any +``` + +Decompresses and decodes the debug symbols + +## Parameters + +| Parameter | Type | Description | +| :------ | :------ | :------ | +| `debugSymbols` | `string` | The base64 encoded debug symbols | + +## Returns + +`any` + +*** + +Generated using [typedoc-plugin-markdown](https://www.npmjs.com/package/typedoc-plugin-markdown) and [TypeDoc](https://typedoc.org/) diff --git a/docs/versioned_docs/version-v0.23.0/reference/NoirJS/noir_wasm/index.md b/docs/versioned_docs/version-v0.23.0/reference/NoirJS/noir_wasm/index.md new file mode 100644 index 00000000000..939f2481687 --- /dev/null +++ b/docs/versioned_docs/version-v0.23.0/reference/NoirJS/noir_wasm/index.md @@ -0,0 +1,21 @@ +# noir_wasm + +## Exports + +### Type Aliases + +| Type alias | Description | +| :------ | :------ | +| [CompilationResult](type-aliases/CompilationResult.md) | output of Noir Wasm compilation, can be for a contract or lib/binary | + +### Functions + +| Function | Description | +| :------ | :------ | +| [compile](functions/compile.md) | Compiles a Noir project | +| [createFileManager](functions/createFileManager.md) | Creates a new FileManager instance based on fs in node and memfs in the browser (via webpack alias) | +| [inflateDebugSymbols](functions/inflateDebugSymbols.md) | Decompresses and decodes the debug symbols | + +*** + +Generated using [typedoc-plugin-markdown](https://www.npmjs.com/package/typedoc-plugin-markdown) and [TypeDoc](https://typedoc.org/) diff --git a/docs/versioned_docs/version-v0.23.0/reference/NoirJS/noir_wasm/type-aliases/CompilationResult.md b/docs/versioned_docs/version-v0.23.0/reference/NoirJS/noir_wasm/type-aliases/CompilationResult.md new file mode 100644 index 00000000000..23cfbe6025d --- /dev/null +++ b/docs/versioned_docs/version-v0.23.0/reference/NoirJS/noir_wasm/type-aliases/CompilationResult.md @@ -0,0 +1,11 @@ +# CompilationResult + +```ts +type CompilationResult: ContractCompilationArtifacts | ProgramCompilationArtifacts; +``` + +output of Noir Wasm compilation, can be for a contract or lib/binary + +*** + +Generated using [typedoc-plugin-markdown](https://www.npmjs.com/package/typedoc-plugin-markdown) and [TypeDoc](https://typedoc.org/) diff --git a/docs/versioned_docs/version-v0.23.0/reference/NoirJS/noir_wasm/typedoc-sidebar.cjs b/docs/versioned_docs/version-v0.23.0/reference/NoirJS/noir_wasm/typedoc-sidebar.cjs new file mode 100644 index 00000000000..d7eba0db813 --- /dev/null +++ b/docs/versioned_docs/version-v0.23.0/reference/NoirJS/noir_wasm/typedoc-sidebar.cjs @@ -0,0 +1,4 @@ +// @ts-check +/** @type {import('@docusaurus/plugin-content-docs').SidebarsConfig} */ +const typedocSidebar = { items: [{"type":"doc","id":"reference/NoirJS/noir_wasm/index","label":"API"},{"type":"category","label":"Type Aliases","items":[{"type":"doc","id":"reference/NoirJS/noir_wasm/type-aliases/CompilationResult","label":"CompilationResult"}]},{"type":"category","label":"Functions","items":[{"type":"doc","id":"reference/NoirJS/noir_wasm/functions/compile","label":"compile"},{"type":"doc","id":"reference/NoirJS/noir_wasm/functions/createFileManager","label":"createFileManager"},{"type":"doc","id":"reference/NoirJS/noir_wasm/functions/inflateDebugSymbols","label":"inflateDebugSymbols"}]}]}; +module.exports = typedocSidebar.items; \ No newline at end of file diff --git a/docs/versioned_docs/version-v0.23.0/tutorials/noirjs_app.md b/docs/versioned_docs/version-v0.23.0/tutorials/noirjs_app.md index 23534795dde..82899217e61 100644 --- a/docs/versioned_docs/version-v0.23.0/tutorials/noirjs_app.md +++ b/docs/versioned_docs/version-v0.23.0/tutorials/noirjs_app.md @@ -14,9 +14,9 @@ You can find the complete app code for this guide [here](https://github.com/noir :::note -Feel free to use whatever versions, just keep in mind that Nargo and the NoirJS packages are meant to be in sync. For example, Nargo 0.19.x matches `noir_js@0.19.x`, etc. +Feel free to use whatever versions, just keep in mind that Nargo and the NoirJS packages are meant to be in sync. For example, Nargo 0.23.x matches `noir_js@0.23.x`, etc. -In this guide, we will be pinned to 0.19.4. +In this guide, we will be pinned to 0.23.0. ::: @@ -80,7 +80,7 @@ To do this this, go back to the previous folder (`cd ..`) and create a new vite You should see `vite-project` appear in your root folder. This seems like a good time to `cd` into it and install our NoirJS packages: ```bash -npm i @noir-lang/backend_barretenberg@0.19.4 @noir-lang/noir_js@0.19.4 +npm i @noir-lang/backend_barretenberg@0.23.0 @noir-lang/noir_js@0.23.0 vite-plugin-top-level-await ``` :::info @@ -99,9 +99,25 @@ At this point in the tutorial, your folder structure should look like this: #### Some cleanup +Add a `vite.config.js` file containing the following: + +```js +import { defineConfig } from 'vite'; +import topLevelAwait from "vite-plugin-top-level-await"; + +export default defineConfig({ + plugins: [ + topLevelAwait({ + promiseExportName: "__tla", + promiseImportName: i => `__tla_${i}` + }) + ] +}) +``` + `npx create vite` is amazing but it creates a bunch of files we don't really need for our simple example. Actually, let's just delete everything except for `index.html`, `main.js` and `package.json`. I feel lighter already. -![my heart is ready for you, noir.js](../../static/img/memes/titanic.jpeg) +![my heart is ready for you, noir.js](@site/static/img/memes/titanic.jpeg) ## HTML @@ -270,7 +286,7 @@ if (verification) display('logs', 'Verifying proof... ✅'); You have successfully generated a client-side Noir web app! -![coded app without math knowledge](../../static/img/memes/flextape.jpeg) +![coded app without math knowledge](@site/static/img/memes/flextape.jpeg) ## Further Reading diff --git a/docs/versioned_docs/version-v0.24.0/explainers/explainer-oracle.md b/docs/versioned_docs/version-v0.24.0/explainers/explainer-oracle.md new file mode 100644 index 00000000000..b84ca5dd986 --- /dev/null +++ b/docs/versioned_docs/version-v0.24.0/explainers/explainer-oracle.md @@ -0,0 +1,57 @@ +--- +title: Oracles +description: This guide provides an in-depth understanding of how Oracles work in Noir programming. Learn how to use outside calculations in your programs, constrain oracles, and understand their uses and limitations. +keywords: + - Noir Programming + - Oracles + - JSON-RPC + - Foreign Call Handlers + - Constrained Functions + - Blockchain Programming +sidebar_position: 1 +--- + +If you've seen "The Matrix" you may recall "The Oracle" as Gloria Foster smoking cigarettes and baking cookies. While she appears to "know things", she is actually providing a calculation of a pre-determined future. Noir Oracles are similar, in a way. They don't calculate the future (yet), but they allow you to use outside calculations in your programs. + +![matrix oracle prediction](@site/static/img/memes/matrix_oracle.jpeg) + +A Noir program is usually self-contained. You can pass certain inputs to it, and it will generate a deterministic output for those inputs. But what if you wanted to defer some calculation to an outside process or source? + +Oracles are functions that provide this feature. + +## Use cases + +An example usage for Oracles is proving something on-chain. For example, proving that the ETH-USDC quote was below a certain target at a certain block time. Or even making more complex proofs like proving the ownership of an NFT as an anonymous login method. + +Another interesting use case is to defer expensive calculations to be made outside of the Noir program, and then constraining the result; similar to the use of [unconstrained functions](../noir/concepts//unconstrained.md). + +In short, anything that can be constrained in a Noir program but needs to be fetched from an external source is a great candidate to be used in oracles. + +## Constraining oracles + +Just like in The Matrix, Oracles are powerful. But with great power, comes great responsibility. Just because you're using them in a Noir program doesn't mean they're true. Noir has no superpowers. If you want to prove that Portugal won the Euro Cup 2016, you're still relying on potentially untrusted information. + +To give a concrete example, Alice wants to login to the [NounsDAO](https://nouns.wtf/) forum with her username "noir_nouner" by proving she owns a noun without revealing her ethereum address. Her Noir program could have a oracle call like this: + +```rust +#[oracle(getNoun)] +unconstrained fn get_noun(address: Field) -> Field +``` + +This oracle could naively resolve with the number of Nouns she possesses. However, it is useless as a trusted source, as the oracle could resolve to anything Alice wants. In order to make this oracle call actually useful, Alice would need to constrain the response from the oracle, by proving her address and the noun count belongs to the state tree of the contract. + +In short, **Oracles don't prove anything. Your Noir program does.** + +:::danger + +If you don't constrain the return of your oracle, you could be clearly opening an attack vector on your Noir program. Make double-triple sure that the return of an oracle call is constrained! + +::: + +## How to use Oracles + +On CLI, Nargo resolves oracles by making JSON RPC calls, which means it would require an RPC node to be running. + +In JavaScript, NoirJS accepts and resolves arbitrary call handlers (that is, not limited to JSON) as long as they matches the expected types the developer defines. Refer to [Foreign Call Handler](../reference/NoirJS/noir_js/type-aliases/ForeignCallHandler.md) to learn more about NoirJS's call handling. + +If you want to build using oracles, follow through to the [oracle guide](../how_to/how-to-oracles.md) for a simple example on how to do that. diff --git a/docs/versioned_docs/version-v0.24.0/explainers/explainer-recursion.md b/docs/versioned_docs/version-v0.24.0/explainers/explainer-recursion.md new file mode 100644 index 00000000000..18846176ca7 --- /dev/null +++ b/docs/versioned_docs/version-v0.24.0/explainers/explainer-recursion.md @@ -0,0 +1,176 @@ +--- +title: Recursive proofs +description: Explore the concept of recursive proofs in Zero-Knowledge programming. Understand how recursion works in Noir, a language for writing smart contracts on the EVM blockchain. Learn through practical examples like Alice and Bob's guessing game, Charlie's recursive merkle tree, and Daniel's reusable components. Discover how to use recursive proofs to optimize computational resources and improve efficiency. + +keywords: + [ + "Recursive Proofs", + "Zero-Knowledge Programming", + "Noir", + "EVM Blockchain", + "Smart Contracts", + "Recursion in Noir", + "Alice and Bob Guessing Game", + "Recursive Merkle Tree", + "Reusable Components", + "Optimizing Computational Resources", + "Improving Efficiency", + "Verification Key", + "Aggregation", + "Recursive zkSNARK schemes", + "PLONK", + "Proving and Verification Keys" + ] +sidebar_position: 1 +pagination_next: how_to/how-to-recursion +--- + +In programming, we tend to think of recursion as something calling itself. A classic example would be the calculation of the factorial of a number: + +```js +function factorial(n) { + if (n === 0 || n === 1) { + return 1; + } else { + return n * factorial(n - 1); + } +} +``` + +In this case, while `n` is not `1`, this function will keep calling itself until it hits the base case, bubbling up the result on the call stack: + +```md + Is `n` 1? <--------- + /\ / + / \ n = n -1 + / \ / + Yes No -------- +``` + +In Zero-Knowledge, recursion has some similarities. + +It is not a Noir function calling itself, but a proof being used as an input to another circuit. In short, you verify one proof *inside* another proof, returning the proof that both proofs are valid. + +This means that, given enough computational resources, you can prove the correctness of any arbitrary number of proofs in a single proof. This could be useful to design state channels (for which a common example would be [Bitcoin's Lightning Network](https://en.wikipedia.org/wiki/Lightning_Network)), to save on gas costs by settling one proof on-chain, or simply to make business logic less dependent on a consensus mechanism. + +## Examples + +Let us look at some of these examples + +### Alice and Bob - Guessing game + +Alice and Bob are friends, and they like guessing games. They want to play a guessing game online, but for that, they need a trusted third-party that knows both of their secrets and finishes the game once someone wins. + +So, they use zero-knowledge proofs. Alice tries to guess Bob's number, and Bob will generate a ZK proof stating whether she succeeded or failed. + +This ZK proof can go on a smart contract, revealing the winner and even giving prizes. However, this means every turn needs to be verified on-chain. This incurs some cost and waiting time that may simply make the game too expensive or time-consuming to be worth it. + +As a solution, Alice proposes the following: "what if Bob generates his proof, and instead of sending it on-chain, I verify it *within* my own proof before playing my own turn?". + +She can then generate a proof that she verified his proof, and so on. + +```md + Did you fail? <-------------------------- + / \ / + / \ n = n -1 + / \ / + Yes No / + | | / + | | / + | You win / + | / + | / +Generate proof of that / + + / + my own guess ---------------- +``` + +### Charlie - Recursive merkle tree + +Charlie is a concerned citizen, and wants to be sure his vote in an election is accounted for. He votes with a ZK proof, but he has no way of knowing that his ZK proof was included in the total vote count! + +If the vote collector puts all of the votes into a [Merkle tree](https://en.wikipedia.org/wiki/Merkle_tree), everyone can prove the verification of two proofs within one proof, as such: + +```md + abcd + __________|______________ + | | + ab cd + _____|_____ ______|______ + | | | | + alice bob charlie daniel +``` + +Doing this recursively allows us to arrive on a final proof `abcd` which if true, verifies the correctness of all the votes. + +### Daniel - Reusable components + +Daniel has a big circuit and a big headache. A part of his circuit is a setup phase that finishes with some assertions that need to be made. But that section alone takes most of the proving time, and is largely independent of the rest of the circuit. + +He might find it more efficient to generate a proof for that setup phase separately, and verify that proof recursively in the actual business logic section of his circuit. This will allow for parallelization of both proofs, which results in a considerable speedup. + +## What params do I need + +As you can see in the [recursion reference](noir/standard_library/recursion.md), a simple recursive proof requires: + +- The proof to verify +- The Verification Key of the circuit that generated the proof +- A hash of this verification key, as it's needed for some backends +- The public inputs for the proof + +:::info + +Recursive zkSNARK schemes do not necessarily "verify a proof" in the sense that you expect a true or false to be spit out by the verifier. Rather an aggregation object is built over the public inputs. + +So, taking the example of Alice and Bob and their guessing game: + +- Alice makes her guess. Her proof is *not* recursive: it doesn't verify any proof within it! It's just a standard `assert(x != y)` circuit +- Bob verifies Alice's proof and makes his own guess. In this circuit, he doesn't exactly *prove* the verification of Alice's proof. Instead, he *aggregates* his proof to Alice's proof. The actual verification is done when the full proof is verified, for example when using `nargo verify` or through the verifier smart contract. + +We can imagine recursive proofs a [relay race](https://en.wikipedia.org/wiki/Relay_race). The first runner doesn't have to receive the baton from anyone else, as he/she already starts with it. But when his/her turn is over, the next runner needs to receive it, run a bit more, and pass it along. Even though every runner could theoretically verify the baton mid-run (why not? 🏃🔍), only at the end of the race does the referee verify that the whole race is valid. + +::: + +## Some architecture + +As with everything in computer science, there's no one-size-fits all. But there are some patterns that could help understanding and implementing them. To give three examples: + +### Adding some logic to a proof verification + +This would be an approach for something like our guessing game, where proofs are sent back and forth and are verified by each opponent. This circuit would be divided in two sections: + +- A `recursive verification` section, which would be just the call to `std::verify_proof`, and that would be skipped on the first move (since there's no proof to verify) +- A `guessing` section, which is basically the logic part where the actual guessing happens + +In such a situation, and assuming Alice is first, she would skip the first part and try to guess Bob's number. Bob would then verify her proof on the first section of his run, and try to guess Alice's number on the second part, and so on. + +### Aggregating proofs + +In some one-way interaction situations, recursion would allow for aggregation of simple proofs that don't need to be immediately verified on-chain or elsewhere. + +To give a practical example, a barman wouldn't need to verify a "proof-of-age" on-chain every time he serves alcohol to a customer. Instead, the architecture would comprise two circuits: + +- A `main`, non-recursive circuit with some logic +- A `recursive` circuit meant to verify two proofs in one proof + +The customer's proofs would be intermediate, and made on their phones, and the barman could just verify them locally. He would then aggregate them into a final proof sent on-chain (or elsewhere) at the end of the day. + +### Recursively verifying different circuits + +Nothing prevents you from verifying different circuits in a recursive proof, for example: + +- A `circuit1` circuit +- A `circuit2` circuit +- A `recursive` circuit + +In this example, a regulator could verify that taxes were paid for a specific purchase by aggregating both a `payer` circuit (proving that a purchase was made and taxes were paid), and a `receipt` circuit (proving that the payment was received) + +## How fast is it + +At the time of writing, verifying recursive proofs is surprisingly fast. This is because most of the time is spent on generating the verification key that will be used to generate the next proof. So you are able to cache the verification key and reuse it later. + +Currently, Noir JS packages don't expose the functionality of loading proving and verification keys, but that feature exists in the underlying `bb.js` package. + +## How can I try it + +Learn more about using recursion in Nargo and NoirJS in the [how-to guide](../how_to/how-to-recursion.md) and see a full example in [noir-examples](https://github.com/noir-lang/noir-examples). diff --git a/docs/versioned_docs/version-v0.24.0/getting_started/_category_.json b/docs/versioned_docs/version-v0.24.0/getting_started/_category_.json new file mode 100644 index 00000000000..5d694210bbf --- /dev/null +++ b/docs/versioned_docs/version-v0.24.0/getting_started/_category_.json @@ -0,0 +1,5 @@ +{ + "position": 0, + "collapsible": true, + "collapsed": true +} diff --git a/docs/versioned_docs/version-v0.24.0/getting_started/hello_noir/_category_.json b/docs/versioned_docs/version-v0.24.0/getting_started/hello_noir/_category_.json new file mode 100644 index 00000000000..23b560f610b --- /dev/null +++ b/docs/versioned_docs/version-v0.24.0/getting_started/hello_noir/_category_.json @@ -0,0 +1,5 @@ +{ + "position": 1, + "collapsible": true, + "collapsed": true +} diff --git a/docs/versioned_docs/version-v0.24.0/getting_started/hello_noir/index.md b/docs/versioned_docs/version-v0.24.0/getting_started/hello_noir/index.md new file mode 100644 index 00000000000..743c4d8d634 --- /dev/null +++ b/docs/versioned_docs/version-v0.24.0/getting_started/hello_noir/index.md @@ -0,0 +1,142 @@ +--- +title: Creating a Project +description: + Learn how to create and verify your first Noir program using Nargo, a programming language for + zero-knowledge proofs. +keywords: + [ + Nargo, + Noir, + zero-knowledge proofs, + programming language, + create Noir program, + verify Noir program, + step-by-step guide, + ] +sidebar_position: 1 + +--- + +Now that we have installed Nargo, it is time to make our first hello world program! + +## Create a Project Directory + +Noir code can live anywhere on your computer. Let us create a _projects_ folder in the home +directory to house our Noir programs. + +For Linux, macOS, and Windows PowerShell, create the directory and change directory into it by +running: + +```sh +mkdir ~/projects +cd ~/projects +``` + +## Create Our First Nargo Project + +Now that we are in the projects directory, create a new Nargo project by running: + +```sh +nargo new hello_world +``` + +> **Note:** `hello_world` can be any arbitrary project name, we are simply using `hello_world` for +> demonstration. +> +> In production, the common practice is to name the project folder as `circuits` for better +> identifiability when sitting alongside other folders in the codebase (e.g. `contracts`, `scripts`, +> `test`). + +A `hello_world` folder would be created. Similar to Rust, the folder houses _src/main.nr_ and +_Nargo.toml_ which contain the source code and environmental options of your Noir program +respectively. + +### Intro to Noir Syntax + +Let us take a closer look at _main.nr_. The default _main.nr_ generated should look like this: + +```rust +fn main(x : Field, y : pub Field) { + assert(x != y); +} +``` + +The first line of the program specifies the program's inputs: + +```rust +x : Field, y : pub Field +``` + +Program inputs in Noir are private by default (e.g. `x`), but can be labeled public using the +keyword `pub` (e.g. `y`). To learn more about private and public values, check the +[Data Types](../../noir/concepts/data_types/index.md) section. + +The next line of the program specifies its body: + +```rust +assert(x != y); +``` + +The Noir syntax `assert` can be interpreted as something similar to constraints in other zk-contract languages. + +For more Noir syntax, check the [Language Concepts](../../noir/concepts/comments.md) chapter. + +## Build In/Output Files + +Change directory into _hello_world_ and build in/output files for your Noir program by running: + +```sh +cd hello_world +nargo check +``` + +Two additional files would be generated in your project directory: + +_Prover.toml_ houses input values, and _Verifier.toml_ houses public values. + +## Prove Our Noir Program + +Now that the project is set up, we can create a proof of correct execution of our Noir program. + +Fill in input values for execution in the _Prover.toml_ file. For example: + +```toml +x = "1" +y = "2" +``` + +Prove the valid execution of your Noir program: + +```sh +nargo prove +``` + +A new folder _proofs_ would then be generated in your project directory, containing the proof file +`.proof`, where the project name is defined in Nargo.toml. + +The _Verifier.toml_ file would also be updated with the public values computed from program +execution (in this case the value of `y`): + +```toml +y = "0x0000000000000000000000000000000000000000000000000000000000000002" +``` + +> **Note:** Values in _Verifier.toml_ are computed as 32-byte hex values. + +## Verify Our Noir Program + +Once a proof is generated, we can verify correct execution of our Noir program by verifying the +proof file. + +Verify your proof by running: + +```sh +nargo verify +``` + +The verification will complete in silence if it is successful. If it fails, it will log the +corresponding error instead. + +Congratulations, you have now created and verified a proof for your very first Noir program! + +In the [next section](./project_breakdown.md), we will go into more detail on each step performed. diff --git a/docs/versioned_docs/version-v0.24.0/getting_started/hello_noir/project_breakdown.md b/docs/versioned_docs/version-v0.24.0/getting_started/hello_noir/project_breakdown.md new file mode 100644 index 00000000000..6160a102c6c --- /dev/null +++ b/docs/versioned_docs/version-v0.24.0/getting_started/hello_noir/project_breakdown.md @@ -0,0 +1,199 @@ +--- +title: Project Breakdown +description: + Learn about the anatomy of a Nargo project, including the purpose of the Prover and Verifier TOML + files, and how to prove and verify your program. +keywords: + [Nargo, Nargo project, Prover.toml, Verifier.toml, proof verification, private asset transfer] +sidebar_position: 2 +--- + +This section breaks down our hello world program from the previous section. We elaborate on the project +structure and what the `prove` and `verify` commands did. + +## Anatomy of a Nargo Project + +Upon creating a new project with `nargo new` and building the in/output files with `nargo check` +commands, you would get a minimal Nargo project of the following structure: + + - src + - Prover.toml + - Verifier.toml + - Nargo.toml + +The source directory _src_ holds the source code for your Noir program. By default only a _main.nr_ +file will be generated within it. + +### Prover.toml + +_Prover.toml_ is used for specifying the input values for executing and proving the program. You can specify `toml` files with different names by using the `--prover-name` or `-p` flags, see the [Prover](#provertoml) section below. Optionally you may specify expected output values for prove-time checking as well. + +### Verifier.toml + +_Verifier.toml_ contains public in/output values computed when executing the Noir program. + +### Nargo.toml + +_Nargo.toml_ contains the environmental options of your project. It contains a "package" section and a "dependencies" section. + +Example Nargo.toml: + +```toml +[package] +name = "noir_starter" +type = "bin" +authors = ["Alice"] +compiler_version = "0.9.0" +description = "Getting started with Noir" +entry = "circuit/main.nr" +license = "MIT" + +[dependencies] +ecrecover = {tag = "v0.9.0", git = "https://github.com/colinnielsen/ecrecover-noir.git"} +``` + +Nargo.toml for a [workspace](../../noir/modules_packages_crates/workspaces.md) will look a bit different. For example: + +```toml +[workspace] +members = ["crates/a", "crates/b"] +default-member = "crates/a" +``` + +#### Package section + +The package section defines a number of fields including: + +- `name` (**required**) - the name of the package +- `type` (**required**) - can be "bin", "lib", or "contract" to specify whether its a binary, library or Aztec contract +- `authors` (optional) - authors of the project +- `compiler_version` - specifies the version of the compiler to use. This is enforced by the compiler and follow's [Rust's versioning](https://doc.rust-lang.org/cargo/reference/manifest.html#the-version-field), so a `compiler_version = 0.18.0` will enforce Nargo version 0.18.0, `compiler_version = ^0.18.0` will enforce anything above 0.18.0 but below 0.19.0, etc. For more information, see how [Rust handles these operators](https://docs.rs/semver/latest/semver/enum.Op.html) +- `description` (optional) +- `entry` (optional) - a relative filepath to use as the entry point into your package (overrides the default of `src/lib.nr` or `src/main.nr`) +- `backend` (optional) +- `license` (optional) + +#### Dependencies section + +This is where you will specify any dependencies for your project. See the [Dependencies page](../../noir/modules_packages_crates/dependencies.md) for more info. + +`./proofs/` and `./contract/` directories will not be immediately visible until you create a proof or +verifier contract respectively. + +### main.nr + +The _main.nr_ file contains a `main` method, this method is the entry point into your Noir program. + +In our sample program, _main.nr_ looks like this: + +```rust +fn main(x : Field, y : Field) { + assert(x != y); +} +``` + +The parameters `x` and `y` can be seen as the API for the program and must be supplied by the +prover. Since neither `x` nor `y` is marked as public, the verifier does not supply any inputs, when +verifying the proof. + +The prover supplies the values for `x` and `y` in the _Prover.toml_ file. + +As for the program body, `assert` ensures that the condition to be satisfied (e.g. `x != y`) is +constrained by the proof of the execution of said program (i.e. if the condition was not met, the +verifier would reject the proof as an invalid proof). + +### Prover.toml + +The _Prover.toml_ file is a file which the prover uses to supply his witness values(both private and +public). + +In our hello world program the _Prover.toml_ file looks like this: + +```toml +x = "1" +y = "2" +``` + +When the command `nargo prove` is executed, two processes happen: + +1. Noir creates a proof that `x`, which holds the value of `1`, and `y`, which holds the value of `2`, + is not equal. This inequality constraint is due to the line `assert(x != y)`. + +2. Noir creates and stores the proof of this statement in the _proofs_ directory in a file called your-project.proof. So if your project is named "private_voting" (defined in the project Nargo.toml), the proof will be saved at `./proofs/private_voting.proof`. Opening this file will display the proof in hex format. + +#### Arrays of Structs + +The following code shows how to pass an array of structs to a Noir program to generate a proof. + +```rust +// main.nr +struct Foo { + bar: Field, + baz: Field, +} + +fn main(foos: [Foo; 3]) -> pub Field { + foos[2].bar + foos[2].baz +} +``` + +Prover.toml: + +```toml +[[foos]] # foos[0] +bar = 0 +baz = 0 + +[[foos]] # foos[1] +bar = 0 +baz = 0 + +[[foos]] # foos[2] +bar = 1 +baz = 2 +``` + +#### Custom toml files + +You can specify a `toml` file with a different name to use for proving by using the `--prover-name` or `-p` flags. + +This command looks for proof inputs in the default **Prover.toml** and generates the proof and saves it at `./proofs/.proof`: + +```bash +nargo prove +``` + +This command looks for proof inputs in the custom **OtherProver.toml** and generates proof and saves it at `./proofs/.proof`: + +```bash +nargo prove -p OtherProver +``` + +## Verifying a Proof + +When the command `nargo verify` is executed, two processes happen: + +1. Noir checks in the _proofs_ directory for a proof file with the project name (eg. test_project.proof) + +2. If that file is found, the proof's validity is checked + +> **Note:** The validity of the proof is linked to the current Noir program; if the program is +> changed and the verifier verifies the proof, it will fail because the proof is not valid for the +> _modified_ Noir program. + +In production, the prover and the verifier are usually two separate entities. A prover would +retrieve the necessary inputs, execute the Noir program, generate a proof and pass it to the +verifier. The verifier would then retrieve the public inputs, usually from external sources, and +verify the validity of the proof against it. + +Take a private asset transfer as an example: + +A person using a browser as the prover would retrieve private inputs locally (e.g. the user's private key) and +public inputs (e.g. the user's encrypted balance on-chain), compute the transfer, generate a proof +and submit it to the verifier smart contract. + +The verifier contract would then draw the user's encrypted balance directly from the blockchain and +verify the proof submitted against it. If the verification passes, additional functions in the +verifier contract could trigger (e.g. approve the asset transfer). + +Now that you understand the concepts, you'll probably want some editor feedback while you are writing more complex code. diff --git a/docs/versioned_docs/version-v0.24.0/getting_started/installation/_category_.json b/docs/versioned_docs/version-v0.24.0/getting_started/installation/_category_.json new file mode 100644 index 00000000000..0c02fb5d4d7 --- /dev/null +++ b/docs/versioned_docs/version-v0.24.0/getting_started/installation/_category_.json @@ -0,0 +1,6 @@ +{ + "position": 0, + "label": "Install Nargo", + "collapsible": true, + "collapsed": true +} diff --git a/docs/versioned_docs/version-v0.24.0/getting_started/installation/index.md b/docs/versioned_docs/version-v0.24.0/getting_started/installation/index.md new file mode 100644 index 00000000000..4ef86aa5914 --- /dev/null +++ b/docs/versioned_docs/version-v0.24.0/getting_started/installation/index.md @@ -0,0 +1,48 @@ +--- +title: Nargo Installation +description: + nargo is a command line tool for interacting with Noir programs. This page is a quick guide on how to install Nargo through the most common and easy method, noirup +keywords: [ + Nargo + Noir + Rust + Cargo + Noirup + Installation + Terminal Commands + Version Check + Nightlies + Specific Versions + Branches + Noirup Repository +] +pagination_next: getting_started/hello_noir/index +--- + +`nargo` is the one-stop-shop for almost everything related with Noir. The name comes from our love for Rust and its package manager `cargo`. + +With `nargo`, you can start new projects, compile, execute, prove, verify, test, generate solidity contracts, and do pretty much all that is available in Noir. + +Similarly to `rustup`, we also maintain an easy installation method that covers most machines: `noirup`. + +## Installing Noirup + +Open a terminal on your machine, and write: + +```bash +curl -L https://raw.githubusercontent.com/noir-lang/noirup/main/install | bash +``` + +Close the terminal, open another one, and run + +```bash +noirup +``` + +Done. That's it. You should have the latest version working. You can check with `nargo --version`. + +You can also install nightlies, specific versions +or branches. Check out the [noirup repository](https://github.com/noir-lang/noirup) for more +information. + +Now we're ready to start working on [our first Noir program!](../hello_noir/index.md) diff --git a/docs/versioned_docs/version-v0.24.0/getting_started/installation/other_install_methods.md b/docs/versioned_docs/version-v0.24.0/getting_started/installation/other_install_methods.md new file mode 100644 index 00000000000..076f26dfd94 --- /dev/null +++ b/docs/versioned_docs/version-v0.24.0/getting_started/installation/other_install_methods.md @@ -0,0 +1,254 @@ +--- +title: Alternative Install Methods +description: There are different ways to install Nargo, the one-stop shop and command-line tool for developing Noir programs. This guide explains other methods that don't rely on noirup, such as compiling from source, installing from binaries, and using WSL for windows +keywords: [ + Installation + Nargo + Noirup + Binaries + Compiling from Source + WSL for Windows + macOS + Linux + Nix + Direnv + Shell & editor experience + Building and testing + Uninstalling Nargo + Noir vs code extension, + ] +sidebar_position: 1 +--- + +## Encouraged Installation Method: Noirup + +Noirup is the endorsed method for installing Nargo, streamlining the process of fetching binaries or compiling from source. It supports a range of options to cater to your specific needs, from nightly builds and specific versions to compiling from various sources. + +### Installing Noirup + +First, ensure you have `noirup` installed: + +```sh +curl -L https://raw.githubusercontent.com/noir-lang/noirup/main/install | bash +``` + +### Fetching Binaries + +With `noirup`, you can easily switch between different Nargo versions, including nightly builds: + +- **Nightly Version**: Install the latest nightly build. + + ```sh + noirup --version nightly + ``` + +- **Specific Version**: Install a specific version of Nargo. + ```sh + noirup --version + ``` + +### Compiling from Source + +`noirup` also enables compiling Nargo from various sources: + +- **From a Specific Branch**: Install from the latest commit on a branch. + + ```sh + noirup --branch + ``` + +- **From a Fork**: Install from the main branch of a fork. + + ```sh + noirup --repo + ``` + +- **From a Specific Branch in a Fork**: Install from a specific branch in a fork. + + ```sh + noirup --repo --branch + ``` + +- **From a Specific Pull Request**: Install from a specific PR. + + ```sh + noirup --pr + ``` + +- **From a Specific Commit**: Install from a specific commit. + + ```sh + noirup -C + ``` + +- **From Local Source**: Compile and install from a local directory. + ```sh + noirup --path ./path/to/local/source + ``` + +## Alternate Installation Methods (No Longer Recommended) + +While the following methods are available, they are no longer recommended. We advise using noirup for a more efficient and flexible installation experience. + +However, there are other methods for installing Nargo: + +- [Binaries](#option-1-installing-from-binaries) +- [Compiling from Source](#option-2-compile-from-source) +- [WSL for Windows](#option-3-wsl-for-windows) + +### Option 1: Installing from Binaries + +See [GitHub Releases](https://github.com/noir-lang/noir/releases) for the latest and previous +platform specific binaries. + +#### Step 1 + +Paste and run the following in the terminal to extract and install the binary: + +> **macOS / Linux:** If you are prompted with `Permission denied` when running commands, prepend +> `sudo` and re-run it. + +##### macOS (Apple Silicon) + +```bash +mkdir -p $HOME/.nargo/bin && \ +curl -o $HOME/.nargo/bin/nargo-aarch64-apple-darwin.tar.gz -L https://github.com/noir-lang/noir/releases/download/v0.24.0/nargo-aarch64-apple-darwin.tar.gz && \ +tar -xvf $HOME/.nargo/bin/nargo-aarch64-apple-darwin.tar.gz -C $HOME/.nargo/bin/ && \ +echo '\nexport PATH=$PATH:$HOME/.nargo/bin' >> ~/.zshrc && \ +source ~/.zshrc +``` + +##### macOS (Intel) + +```bash +mkdir -p $HOME/.nargo/bin && \ +curl -o $HOME/.nargo/bin/nargo-x86_64-apple-darwin.tar.gz -L https://github.com/noir-lang/noir/releases/download/v0.24.0/nargo-x86_64-apple-darwin.tar.gz && \ +tar -xvf $HOME/.nargo/bin/nargo-x86_64-apple-darwin.tar.gz -C $HOME/.nargo/bin/ && \ +echo '\nexport PATH=$PATH:$HOME/.nargo/bin' >> ~/.zshrc && \ +source ~/.zshrc +``` + +##### Linux (Bash) + +```bash +mkdir -p $HOME/.nargo/bin && \ +curl -o $HOME/.nargo/bin/nargo-x86_64-unknown-linux-gnu.tar.gz -L https://github.com/noir-lang/noir/releases/download/v0.24.0/nargo-x86_64-unknown-linux-gnu.tar.gz && \ +tar -xvf $HOME/.nargo/bin/nargo-x86_64-unknown-linux-gnu.tar.gz -C $HOME/.nargo/bin/ && \ +echo -e '\nexport PATH=$PATH:$HOME/.nargo/bin' >> ~/.bashrc && \ +source ~/.bashrc +``` + +#### Step 2 + +Check if the installation was successful by running `nargo --version`. You should get a version number. + +> **macOS:** If you are prompted with an OS alert, right-click and open the _nargo_ executable from +> Finder. Close the new terminal popped up and `nargo` should now be accessible. + +### Option 2: Compile from Source + +Due to the large number of native dependencies, Noir projects uses [Nix](https://nixos.org/) and [direnv](https://direnv.net/) to streamline the development experience. It helps mitigating issues commonly associated with dependency management, such as conflicts between required package versions for different projects (often referred to as "dependency hell"). + +Combined with direnv, which automatically sets or clears environment variables based on the directory, it further simplifies the development process by seamlessly integrating with the developer's shell, facilitating an efficient and reliable workflow for managing and deploying Noir projects with multiple dependencies. + +#### Setting up your environment + +For the best experience, please follow these instructions to setup your environment: + +1. Install Nix following [their guide](https://nixos.org/download.html) for your operating system. +2. Create the file `~/.config/nix/nix.conf` with the contents: + +```ini +experimental-features = nix-command +extra-experimental-features = flakes +``` + +3. Install direnv into your Nix profile by running: + +```sh +nix profile install nixpkgs#direnv +``` + +4. Add direnv to your shell following [their guide](https://direnv.net/docs/hook.html). + 1. For bash or zshell, add `eval "$(direnv hook bash)"` or `eval "$(direnv hook zsh)"` to your ~/.bashrc or ~/.zshrc file, respectively. +5. Restart your shell. + +#### Shell & editor experience + +Now that your environment is set up, you can get to work on the project. + +1. Clone the repository, such as: + +```sh +git clone git@github.com:noir-lang/noir +``` + +> Replacing `noir` with whichever repository you want to work on. + +2. Navigate to the directory: + +```sh +cd noir +``` + +> Replacing `noir` with whichever repository you cloned. + +3. You should see a **direnv error** because projects aren't allowed by default. Make sure you've reviewed and trust our `.envrc` file, then you need to run: + +```sh +direnv allow +``` + +4. Now, wait awhile for all the native dependencies to be built. This will take some time and direnv will warn you that it is taking a long time, but we just need to let it run. + +5. Once you are presented with your prompt again, you can start your editor within the project directory (we recommend [VSCode](https://code.visualstudio.com/)): + +```sh +code . +``` + +6. (Recommended) When launching VSCode for the first time, you should be prompted to install our recommended plugins. We highly recommend installing these for the best development experience. + +#### Building and testing + +Assuming you are using `direnv` to populate your environment, building and testing the project can be done +with the typical `cargo build`, `cargo test`, and `cargo clippy` commands. You'll notice that the `cargo` version matches the version we specify in `rust-toolchain.toml`, which is 1.71.1 at the time of this writing. + +If you want to build the entire project in an isolated sandbox, you can use Nix commands: + +1. `nix build .` (or `nix build . -L` for verbose output) to build the project in a Nix sandbox. +2. `nix flake check` (or `nix flake check -L` for verbose output) to run clippy and tests in a Nix sandbox. + +#### Without `direnv` + +If you have hesitations with using direnv, you can launch a subshell with `nix develop` and then launch your editor from within the subshell. However, if VSCode was already launched in the project directory, the environment won't be updated. + +Advanced: If you aren't using direnv nor launching your editor within the subshell, you can try to install Barretenberg and other global dependencies the package needs. This is an advanced workflow and likely won't receive support! + +### Option 3: WSL (for Windows) + +The default backend for Noir (Barretenberg) doesn't provide Windows binaries at this time. For that reason, Noir cannot be installed natively. However, it is available by using Windows Subsystem for Linux (WSL). + +Step 1: Follow the instructions [here](https://learn.microsoft.com/en-us/windows/wsl/install) to install and run WSL. + +step 2: Follow the [Noirup instructions](#encouraged-installation-method-noirup). + +## Uninstalling Nargo + +### Noirup + +If you installed Nargo with `noirup` or through directly downloading binaries, you can uninstall Nargo by removing the files in `~/.nargo`, `~/nargo`, and `~/noir_cache`. This ensures that all installed binaries, configurations, and cache related to Nargo are fully removed from your system. + +```bash +rm -r ~/.nargo +rm -r ~/nargo +rm -r ~/noir_cache +``` + +### Nix + +If you installed Nargo with Nix or compiled it from source, you can remove the binary located at `~/.nix-profile/bin/nargo`. + +```bash +rm ~/.nix-profile/bin/nargo +``` diff --git a/docs/versioned_docs/version-v0.24.0/getting_started/tooling/_category_.json b/docs/versioned_docs/version-v0.24.0/getting_started/tooling/_category_.json new file mode 100644 index 00000000000..55804c03a71 --- /dev/null +++ b/docs/versioned_docs/version-v0.24.0/getting_started/tooling/_category_.json @@ -0,0 +1,6 @@ +{ + "position": 2, + "label": "Tooling", + "collapsible": true, + "collapsed": true +} diff --git a/docs/versioned_docs/version-v0.24.0/getting_started/tooling/index.mdx b/docs/versioned_docs/version-v0.24.0/getting_started/tooling/index.mdx new file mode 100644 index 00000000000..ac480f3c9f5 --- /dev/null +++ b/docs/versioned_docs/version-v0.24.0/getting_started/tooling/index.mdx @@ -0,0 +1,38 @@ +--- +title: Tooling +Description: This section provides information about the various tools and utilities available for Noir development. It covers the Noir playground, IDE tools, Codespaces, and community projects. +Keywords: [Noir, Development, Playground, IDE Tools, Language Service Provider, VS Code Extension, Codespaces, noir-starter, Community Projects, Awesome Noir Repository, Developer Tooling] +--- + +Noir is meant to be easy to develop with. For that reason, a number of utilities have been put together to ease the development process as much as feasible in the zero-knowledge world. + +## Playground + +The Noir playground is an easy way to test small ideas, share snippets, and integrate in other websites. You can access it at [play.noir-lang.org](https://play.noir-lang.org). + +## IDE tools + +When you install Nargo, you're also installing a Language Service Provider (LSP), which can be used by IDEs to provide syntax highlighting, codelens, warnings, and more. + +The easiest way to use these tools is by installing the [Noir VS Code extension](https://marketplace.visualstudio.com/items?itemName=noir-lang.vscode-noir). + +## Codespaces + +Some Noir repos have leveraged Codespaces in order to ease the development process. You can visit the [noir-starter](https://github.com/noir-lang/noir-starter) for an example. + + + +## GitHub Actions + +You can use `noirup` with GitHub Actions for CI/CD and automated testing. It is as simple as +installing `noirup` and running tests in your GitHub Action `yml` file. + +See the +[config file in the Noir repo](https://github.com/TomAFrench/noir-hashes/blob/master/.github/workflows/noir.yml) for an example usage. + +## Community projects + +As an open-source project, Noir has received many contributions over time. Some of them are related with developer tooling, and you can see some of them in [Awesome Noir repository](https://github.com/noir-lang/awesome-noir#dev-tools) diff --git a/docs/versioned_docs/version-v0.24.0/getting_started/tooling/language_server.md b/docs/versioned_docs/version-v0.24.0/getting_started/tooling/language_server.md new file mode 100644 index 00000000000..81e0356ef8a --- /dev/null +++ b/docs/versioned_docs/version-v0.24.0/getting_started/tooling/language_server.md @@ -0,0 +1,43 @@ +--- +title: Language Server +description: Learn about the Noir Language Server, how to install the components, and configuration that may be required. +keywords: [Nargo, Language Server, LSP, VSCode, Visual Studio Code] +sidebar_position: 0 +--- + +This section helps you install and configure the Noir Language Server. + +The Language Server Protocol (LSP) has two components, the [Server](#language-server) and the [Client](#language-client). Below we describe each in the context of Noir. + +## Language Server + +The Server component is provided by the Nargo command line tool that you installed at the beginning of this guide. +As long as Nargo is installed and you've used it to run other commands in this guide, it should be good to go! + +If you'd like to verify that the `nargo lsp` command is available, you can run `nargo --help` and look for `lsp` in the list of commands. If you see it, you're using a version of Noir with LSP support. + +## Language Client + +The Client component is usually an editor plugin that launches the Server. It communicates LSP messages between the editor and the Server. For example, when you save a file, the Client will alert the Server, so it can try to compile the project and report any errors. + +Currently, Noir provides a Language Client for Visual Studio Code via the [vscode-noir](https://github.com/noir-lang/vscode-noir) extension. You can install it via the [Visual Studio Marketplace](https://marketplace.visualstudio.com/items?itemName=noir-lang.vscode-noir). + +> **Note:** Noir's Language Server Protocol support currently assumes users' VSCode workspace root to be the same as users' Noir project root (i.e. where Nargo.toml lies). +> +> If LSP features seem to be missing / malfunctioning, make sure you are opening your Noir project directly (instead of as a sub-folder) in your VSCode instance. + +When your language server is running correctly and the VSCode plugin is installed, you should see handy codelens buttons for compilation, measuring circuit size, execution, and tests: + +![Compile and Execute](@site/static/img/codelens_compile_execute.png) +![Run test](@site/static/img/codelens_run_test.png) + +You should also see your tests in the `testing` panel: + +![Testing panel](@site/static/img/codelens_testing_panel.png) + +### Configuration + +- **Noir: Enable LSP** - If checked, the extension will launch the Language Server via `nargo lsp` and communicate with it. +- **Noir: Nargo Flags** - Additional flags may be specified if you require them to be added when the extension calls `nargo lsp`. +- **Noir: Nargo Path** - An absolute path to a Nargo binary with the `lsp` command. This may be useful if Nargo is not within the `PATH` of your editor. +- **Noir > Trace: Server** - Setting this to `"messages"` or `"verbose"` will log LSP messages between the Client and Server. Useful for debugging. diff --git a/docs/versioned_docs/version-v0.24.0/getting_started/tooling/testing.md b/docs/versioned_docs/version-v0.24.0/getting_started/tooling/testing.md new file mode 100644 index 00000000000..d3e0c522473 --- /dev/null +++ b/docs/versioned_docs/version-v0.24.0/getting_started/tooling/testing.md @@ -0,0 +1,62 @@ +--- +title: Testing in Noir +description: Learn how to use Nargo to test your Noir program in a quick and easy way +keywords: [Nargo, testing, Noir, compile, test] +sidebar_position: 1 +--- + +You can test your Noir programs using Noir circuits. + +Nargo will automatically compile and run any functions which have the decorator `#[test]` on them if +you run `nargo test`. + +For example if you have a program like: + +```rust +fn add(x: u64, y: u64) -> u64 { + x + y +} +#[test] +fn test_add() { + assert(add(2,2) == 4); + assert(add(0,1) == 1); + assert(add(1,0) == 1); +} +``` + +Running `nargo test` will test that the `test_add` function can be executed while satisfying all +the constraints which allows you to test that add returns the expected values. Test functions can't +have any arguments currently. + +### Test fail + +You can write tests that are expected to fail by using the decorator `#[test(should_fail)]`. For example: + +```rust +fn add(x: u64, y: u64) -> u64 { + x + y +} +#[test(should_fail)] +fn test_add() { + assert(add(2,2) == 5); +} +``` + +You can be more specific and make it fail with a specific reason by using `should_fail_with = "`: + +```rust +fn main(african_swallow_avg_speed : Field) { + assert(african_swallow_avg_speed == 65, "What is the airspeed velocity of an unladen swallow"); +} + +#[test] +fn test_king_arthur() { + main(65); +} + +#[test(should_fail_with = "What is the airspeed velocity of an unladen swallow")] +fn test_bridgekeeper() { + main(32); +} + +``` diff --git a/docs/versioned_docs/version-v0.24.0/how_to/_category_.json b/docs/versioned_docs/version-v0.24.0/how_to/_category_.json new file mode 100644 index 00000000000..23b560f610b --- /dev/null +++ b/docs/versioned_docs/version-v0.24.0/how_to/_category_.json @@ -0,0 +1,5 @@ +{ + "position": 1, + "collapsible": true, + "collapsed": true +} diff --git a/docs/versioned_docs/version-v0.24.0/how_to/how-to-oracles.md b/docs/versioned_docs/version-v0.24.0/how_to/how-to-oracles.md new file mode 100644 index 00000000000..0d84d992320 --- /dev/null +++ b/docs/versioned_docs/version-v0.24.0/how_to/how-to-oracles.md @@ -0,0 +1,280 @@ +--- +title: How to use Oracles +description: Learn how to use oracles in your Noir program with examples in both Nargo and NoirJS. This guide also covers writing a JSON RPC server and providing custom foreign call handlers for NoirJS. +keywords: + - Noir Programming + - Oracles + - Nargo + - NoirJS + - JSON RPC Server + - Foreign Call Handlers +sidebar_position: 1 +--- + +This guide shows you how to use oracles in your Noir program. For the sake of clarity, it assumes that: + +- You have read the [explainer on Oracles](../explainers/explainer-oracle.md) and are comfortable with the concept. +- You have a Noir program to add oracles to. You can create one using the [vite-hardhat starter](https://github.com/noir-lang/noir-starter/tree/main/vite-hardhat) as a boilerplate. +- You understand the concept of a JSON-RPC server. Visit the [JSON-RPC website](https://www.jsonrpc.org/) if you need a refresher. +- You are comfortable with server-side JavaScript (e.g. Node.js, managing packages, etc.). + +For reference, you can find the snippets used in this tutorial on the [Aztec DevRel Repository](https://github.com/AztecProtocol/dev-rel/tree/main/code-snippets/how-to-oracles). + +## Rundown + +This guide has 3 major steps: + +1. How to modify our Noir program to make use of oracle calls as unconstrained functions +2. How to write a JSON RPC Server to resolve these oracle calls with Nargo +3. How to use them in Nargo and how to provide a custom resolver in NoirJS + +## Step 1 - Modify your Noir program + +An oracle is defined in a Noir program by defining two methods: + +- An unconstrained method - This tells the compiler that it is executing an [unconstrained functions](../noir/concepts//unconstrained.md). +- A decorated oracle method - This tells the compiler that this method is an RPC call. + +An example of an oracle that returns a `Field` would be: + +```rust +#[oracle(getSqrt)] +unconstrained fn sqrt(number: Field) -> Field { } + +unconstrained fn get_sqrt(number: Field) -> Field { + sqrt(number) +} +``` + +In this example, we're wrapping our oracle function in a unconstrained method, and decorating it with `oracle(getSqrt)`. We can then call the unconstrained function as we would call any other function: + +```rust +fn main(input: Field) { + let sqrt = get_sqrt(input); +} +``` + +In the next section, we will make this `getSqrt` (defined on the `sqrt` decorator) be a method of the RPC server Noir will use. + +:::danger + +As explained in the [Oracle Explainer](../explainers/explainer-oracle.md), this `main` function is unsafe unless you constrain its return value. For example: + +```rust +fn main(input: Field) { + let sqrt = get_sqrt(input); + assert(sqrt.pow_32(2) as u64 == input as u64); // <---- constrain the return of an oracle! +} +``` + +::: + +:::info + +Currently, oracles only work with single params or array params. For example: + +```rust +#[oracle(getSqrt)] +unconstrained fn sqrt([Field; 2]) -> [Field; 2] { } +``` + +::: + +## Step 2 - Write an RPC server + +Brillig will call *one* RPC server. Most likely you will have to write your own, and you can do it in whatever language you prefer. In this guide, we will do it in Javascript. + +Let's use the above example of an oracle that consumes an array with two `Field` and returns their square roots: + +```rust +#[oracle(getSqrt)] +unconstrained fn sqrt(input: [Field; 2]) -> [Field; 2] { } + +unconstrained fn get_sqrt(input: [Field; 2]) -> [Field; 2] { + sqrt(input) +} + +fn main(input: [Field; 2]) { + let sqrt = get_sqrt(input); + assert(sqrt[0].pow_32(2) as u64 == input[0] as u64); + assert(sqrt[1].pow_32(2) as u64 == input[1] as u64); +} +``` + +:::info + +Why square root? + +In general, computing square roots is computationally more expensive than multiplications, which takes a toll when speaking about ZK applications. In this case, instead of calculating the square root in Noir, we are using our oracle to offload that computation to be made in plain. In our circuit we can simply multiply the two values. + +::: + +Now, we should write the correspondent RPC server, starting with the [default JSON-RPC 2.0 boilerplate](https://www.npmjs.com/package/json-rpc-2.0#example): + +```js +import { JSONRPCServer } from "json-rpc-2.0"; +import express from "express"; +import bodyParser from "body-parser"; + +const app = express(); +app.use(bodyParser.json()); + +const server = new JSONRPCServer(); +app.post("/", (req, res) => { + const jsonRPCRequest = req.body; + server.receive(jsonRPCRequest).then((jsonRPCResponse) => { + if (jsonRPCResponse) { + res.json(jsonRPCResponse); + } else { + res.sendStatus(204); + } + }); +}); + +app.listen(5555); +``` + +Now, we will add our `getSqrt` method, as expected by the `#[oracle(getSqrt)]` decorator in our Noir code. It maps through the params array and returns their square roots: + +```js +server.addMethod("getSqrt", async (params) => { + const values = params[0].Array.map(({ inner }) => { + return { inner: `${Math.sqrt(parseInt(inner, 16))}` }; + }); + return { values: [{ Array: values }] }; +}); +``` + +:::tip + +Brillig expects an object with an array of values. Each value is an object declaring to be `Single` or `Array` and returning a `inner` property *as a string*. For example: + +```json +{ "values": [{ "Array": [{ "inner": "1" }, { "inner": "2"}]}]} +{ "values": [{ "Single": { "inner": "1" }}]} +{ "values": [{ "Single": { "inner": "1" }}, { "Array": [{ "inner": "1", { "inner": "2" }}]}]} +``` + +If you're using Typescript, the following types may be helpful in understanding the expected return value and making sure they're easy to follow: + +```js +interface Value { + inner: string, +} + +interface SingleForeignCallParam { + Single: Value, +} + +interface ArrayForeignCallParam { + Array: Value[], +} + +type ForeignCallParam = SingleForeignCallParam | ArrayForeignCallParam; + +interface ForeignCallResult { + values: ForeignCallParam[], +} +``` + +::: + +## Step 3 - Usage with Nargo + +Using the [`nargo` CLI tool](../getting_started/installation/index.md), you can use oracles in the `nargo test`, `nargo execute` and `nargo prove` commands by passing a value to `--oracle-resolver`. For example: + +```bash +nargo test --oracle-resolver http://localhost:5555 +``` + +This tells `nargo` to use your RPC Server URL whenever it finds an oracle decorator. + +## Step 4 - Usage with NoirJS + +In a JS environment, an RPC server is not strictly necessary, as you may want to resolve your oracles without needing any JSON call at all. NoirJS simply expects that you pass a callback function when you generate proofs, and that callback function can be anything. + +For example, if your Noir program expects the host machine to provide CPU pseudo-randomness, you could simply pass it as the `foreignCallHandler`. You don't strictly need to create an RPC server to serve pseudo-randomness, as you may as well get it directly in your app: + +```js +const foreignCallHandler = (name, inputs) => crypto.randomBytes(16) // etc + +await noir.generateFinalProof(inputs, foreignCallHandler) +``` + +As one can see, in NoirJS, the [`foreignCallHandler`](../reference/NoirJS/noir_js/type-aliases/ForeignCallHandler.md) function simply means "a callback function that returns a value of type [`ForeignCallOutput`](../reference/NoirJS/noir_js/type-aliases/ForeignCallOutput.md). It doesn't have to be an RPC call like in the case for Nargo. + +:::tip + +Does this mean you don't have to write an RPC server like in [Step #2](#step-2---write-an-rpc-server)? + +You don't technically have to, but then how would you run `nargo test` or `nargo prove`? To use both `Nargo` and `NoirJS` in your development flow, you will have to write a JSON RPC server. + +::: + +In this case, let's make `foreignCallHandler` call the JSON RPC Server we created in [Step #2](#step-2---write-an-rpc-server), by making it a JSON RPC Client. + +For example, using the same `getSqrt` program in [Step #1](#step-1---modify-your-noir-program) (comments in the code): + +```js +import { JSONRPCClient } from "json-rpc-2.0"; + +// declaring the JSONRPCClient +const client = new JSONRPCClient((jsonRPCRequest) => { +// hitting the same JSON RPC Server we coded above + return fetch("http://localhost:5555", { + method: "POST", + headers: { + "content-type": "application/json", + }, + body: JSON.stringify(jsonRPCRequest), + }).then((response) => { + if (response.status === 200) { + return response + .json() + .then((jsonRPCResponse) => client.receive(jsonRPCResponse)); + } else if (jsonRPCRequest.id !== undefined) { + return Promise.reject(new Error(response.statusText)); + } + }); +}); + +// declaring a function that takes the name of the foreign call (getSqrt) and the inputs +const foreignCallHandler = async (name, input) => { + // notice that the "inputs" parameter contains *all* the inputs + // in this case we to make the RPC request with the first parameter "numbers", which would be input[0] + const oracleReturn = await client.request(name, [ + { Array: input[0].map((i) => ({ inner: i.toString("hex") })) }, + ]); + return [oracleReturn.values[0].Array.map((x) => x.inner)]; +}; + +// the rest of your NoirJS code +const input = { input: [4, 16] }; +const { witness } = await noir.execute(numbers, foreignCallHandler); +``` + +:::tip + +If you're in a NoirJS environment running your RPC server together with a frontend app, you'll probably hit a familiar problem in full-stack development: requests being blocked by [CORS](https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS) policy. For development only, you can simply install and use the [`cors` npm package](https://www.npmjs.com/package/cors) to get around the problem: + +```bash +yarn add cors +``` + +and use it as a middleware: + +```js +import cors from "cors"; + +const app = express(); +app.use(cors()) +``` + +::: + +## Conclusion + +Hopefully by the end of this guide, you should be able to: + +- Write your own logic around Oracles and how to write a JSON RPC server to make them work with your Nargo commands. +- Provide custom foreign call handlers for NoirJS. diff --git a/docs/versioned_docs/version-v0.24.0/how_to/how-to-recursion.md b/docs/versioned_docs/version-v0.24.0/how_to/how-to-recursion.md new file mode 100644 index 00000000000..4c45bb87ae2 --- /dev/null +++ b/docs/versioned_docs/version-v0.24.0/how_to/how-to-recursion.md @@ -0,0 +1,179 @@ +--- +title: How to use recursion on NoirJS +description: Learn how to implement recursion with NoirJS, a powerful tool for creating smart contracts on the EVM blockchain. This guide assumes familiarity with NoirJS, solidity verifiers, and the Barretenberg proving backend. Discover how to generate both final and intermediate proofs using `noir_js` and `backend_barretenberg`. +keywords: + [ + "NoirJS", + "EVM blockchain", + "smart contracts", + "recursion", + "solidity verifiers", + "Barretenberg backend", + "noir_js", + "backend_barretenberg", + "intermediate proofs", + "final proofs", + "nargo compile", + "json import", + "recursive circuit", + "recursive app" + ] +sidebar_position: 1 +--- + +This guide shows you how to use recursive proofs in your NoirJS app. For the sake of clarity, it is assumed that: + +- You already have a NoirJS app. If you don't, please visit the [NoirJS tutorial](../tutorials/noirjs_app.md) and the [reference](../reference/NoirJS/noir_js/index.md). +- You are familiar with what are recursive proofs and you have read the [recursion explainer](../explainers/explainer-recursion.md) +- You already built a recursive circuit following [the reference](../noir/standard_library/recursion.md), and understand how it works. + +It is also assumed that you're not using `noir_wasm` for compilation, and instead you've used [`nargo compile`](../reference/nargo_commands.md) to generate the `json` you're now importing into your project. However, the guide should work just the same if you're using `noir_wasm`. + +:::info + +As you've read in the [explainer](../explainers/explainer-recursion.md), a recursive proof is an intermediate proof. This means that it doesn't necessarily generate the final step that makes it verifiable in a smart contract. However, it is easy to verify within another circuit. + +While "standard" usage of NoirJS packages abstracts final proofs, it currently lacks the necessary interface to abstract away intermediate proofs. This means that these proofs need to be created by using the backend directly. + +In short: + +- `noir_js` generates *only* final proofs +- `backend_barretenberg` generates both types of proofs + +::: + +In a standard recursive app, you're also dealing with at least two circuits. For the purpose of this guide, we will assume the following: + +- `main`: a circuit of type `assert(x != y)`, where `main` is marked with a `#[recursive]` attribute. This attribute states that the backend should generate proofs that are friendly for verification within another circuit. +- `recursive`: a circuit that verifies `main` + +For a full example on how recursive proofs work, please refer to the [noir-examples](https://github.com/noir-lang/noir-examples) repository. We will *not* be using it as a reference for this guide. + +## Step 1: Setup + +In a common NoirJS app, you need to instantiate a backend with something like `const backend = new Backend(circuit)`. Then you feed it to the `noir_js` interface. + +For recursion, this doesn't happen, and the only need for `noir_js` is only to `execute` a circuit and get its witness and return value. Everything else is not interfaced, so it needs to happen on the `backend` object. + +It is also recommended that you instantiate the backend with as many threads as possible, to allow for maximum concurrency: + +```js +const backend = new Backend(circuit, { threads: 8 }) +``` + +:::tip +You can use the [`os.cpus()`](https://nodejs.org/api/os.html#oscpus) object in `nodejs` or [`navigator.hardwareConcurrency`](https://developer.mozilla.org/en-US/docs/Web/API/Navigator/hardwareConcurrency) on the browser to make the most out of those glorious cpu cores +::: + +## Step 2: Generating the witness and the proof for `main` + +After instantiating the backend, you should also instantiate `noir_js`. We will use it to execute the circuit and get the witness. + +```js +const noir = new Noir(circuit, backend) +const { witness } = noir.execute(input) +``` + +With this witness, you are now able to generate the intermediate proof for the main circuit: + +```js +const { proof, publicInputs } = await backend.generateProof(witness) +``` + +:::warning + +Always keep in mind what is actually happening on your development process, otherwise you'll quickly become confused about what circuit we are actually running and why! + +In this case, you can imagine that Alice (running the `main` circuit) is proving something to Bob (running the `recursive` circuit), and Bob is verifying her proof within his proof. + +With this in mind, it becomes clear that our intermediate proof is the one *meant to be verified within another circuit*, so it must be Alice's. Actually, the only final proof in this theoretical scenario would be the last one, sent on-chain. + +::: + +## Step 3 - Verification and proof artifacts + +Optionally, you are able to verify the intermediate proof: + +```js +const verified = await backend.verifyProof({ proof, publicInputs }) +``` + +This can be useful to make sure our intermediate proof was correctly generated. But the real goal is to do it within another circuit. For that, we need to generate recursive proof artifacts that will be passed to the circuit that is verifying the proof we just generated. Instead of passing the proof and verification key as a byte array, we pass them as fields which makes it cheaper to verify in a circuit: + +```js +const { proofAsFields, vkAsFields, vkHash } = await backend.generateRecursiveProofArtifacts( { publicInputs, proof }, publicInputsCount) +``` + +This call takes the public inputs and the proof, but also the public inputs count. While this is easily retrievable by simply counting the `publicInputs` length, the backend interface doesn't currently abstract it away. + +:::info + +The `proofAsFields` has a constant size `[Field; 93]` and verification keys in Barretenberg are always `[Field; 114]`. + +::: + +:::warning + +One common mistake is to forget *who* makes this call. + +In a situation where Alice is generating the `main` proof, if she generates the proof artifacts and sends them to Bob, which gladly takes them as true, this would mean Alice could prove anything! + +Instead, Bob needs to make sure *he* extracts the proof artifacts, using his own instance of the `main` circuit backend. This way, Alice has to provide a valid proof for the correct `main` circuit. + +::: + +## Step 4 - Recursive proof generation + +With the artifacts, generating a recursive proof is no different from a normal proof. You simply use the `backend` (with the recursive circuit) to generate it: + +```js +const recursiveInputs = { + verification_key: vkAsFields, // array of length 114 + proof: proofAsFields, // array of length 93 + size of public inputs + publicInputs: [mainInput.y], // using the example above, where `y` is the only public input + key_hash: vkHash, +} + +const { witness, returnValue } = noir.execute(recursiveInputs) // we're executing the recursive circuit now! +const { proof, publicInputs } = backend.generateProof(witness) +const verified = backend.verifyProof({ proof, publicInputs }) +``` + +You can obviously chain this proof into another proof. In fact, if you're using recursive proofs, you're probably interested of using them this way! + +:::tip + +Managing circuits and "who does what" can be confusing. To make sure your naming is consistent, you can keep them in an object. For example: + +```js +const circuits = { + main: mainJSON, + recursive: recursiveJSON +} +const backends = { + main: new BarretenbergBackend(circuits.main), + recursive: new BarretenbergBackend(circuits.recursive) +} +const noir_programs = { + main: new Noir(circuits.main, backends.main), + recursive: new Noir(circuits.recursive, backends.recursive) +} +``` + +This allows you to neatly call exactly the method you want without conflicting names: + +```js +// Alice runs this 👇 +const { witness: mainWitness } = await noir_programs.main.execute(input) +const proof = await backends.main.generateProof(mainWitness) + +// Bob runs this 👇 +const verified = await backends.main.verifyProof(proof) +const { proofAsFields, vkAsFields, vkHash } = await backends.main.generateRecursiveProofArtifacts( + proof, + numPublicInputs, +); +const recursiveProof = await noir_programs.recursive.generateProof(recursiveInputs) +``` + +::: diff --git a/docs/versioned_docs/version-v0.24.0/how_to/how-to-solidity-verifier.md b/docs/versioned_docs/version-v0.24.0/how_to/how-to-solidity-verifier.md new file mode 100644 index 00000000000..e3c7c1065da --- /dev/null +++ b/docs/versioned_docs/version-v0.24.0/how_to/how-to-solidity-verifier.md @@ -0,0 +1,231 @@ +--- +title: Generate a Solidity Verifier +description: + Learn how to run the verifier as a smart contract on the blockchain. Compile a Solidity verifier + contract for your Noir program and deploy it on any EVM blockchain acting as a verifier smart + contract. Read more to find out +keywords: + [ + solidity verifier, + smart contract, + blockchain, + compiler, + plonk_vk.sol, + EVM blockchain, + verifying Noir programs, + proving backend, + Barretenberg, + ] +sidebar_position: 0 +pagination_next: tutorials/noirjs_app +--- + +Noir has the ability to generate a verifier contract in Solidity, which can be deployed in many EVM-compatible blockchains such as Ethereum. + +This allows for a powerful feature set, as one can make use of the conciseness and the privacy provided by Noir in an immutable ledger. Applications can range from simple P2P guessing games, to complex private DeFi interactions. + +This guide shows you how to generate a Solidity Verifier and deploy it on the [Remix IDE](https://remix.ethereum.org/). It is assumed that: + +- You are comfortable with the Solidity programming language and understand how contracts are deployed on the Ethereum network +- You have Noir installed and you have a Noir program. If you don't, [get started](../getting_started/installation/index.md) with Nargo and the example Hello Noir circuit +- You are comfortable navigating RemixIDE. If you aren't or you need a refresher, you can find some video tutorials [here](https://www.youtube.com/channel/UCjTUPyFEr2xDGN6Cg8nKDaA) that could help you. + +## Rundown + +Generating a Solidity Verifier contract is actually a one-command process. However, compiling it and deploying it can have some caveats. Here's the rundown of this guide: + +1. How to generate a solidity smart contract +2. How to compile the smart contract in the RemixIDE +3. How to deploy it to a testnet + +## Step 1 - Generate a contract + +This is by far the most straight-forward step. Just run: + +```sh +nargo codegen-verifier +``` + +A new `contract` folder would then be generated in your project directory, containing the Solidity +file `plonk_vk.sol`. It can be deployed to any EVM blockchain acting as a verifier smart contract. + +:::info + +It is possible to generate verifier contracts of Noir programs for other smart contract platforms as long as the proving backend supplies an implementation. + +Barretenberg, the default proving backend for Nargo, supports generation of verifier contracts, for the time being these are only in Solidity. +::: + +## Step 2 - Compiling + +We will mostly skip the details of RemixIDE, as the UI can change from version to version. For now, we can just open +Remix and create a blank workspace. + +![Create Workspace](@site/static/img/how-tos/solidity_verifier_1.png) + +We will create a new file to contain the contract Nargo generated, and copy-paste its content. + +:::warning + +You'll likely see a warning advising you to not trust pasted code. While it is an important warning, it is irrelevant in the context of this guide and can be ignored. We will not be deploying anywhere near a mainnet. + +::: + +To compile our the verifier, we can navigate to the compilation tab: + +![Compilation Tab](@site/static/img/how-tos/solidity_verifier_2.png) + +Remix should automatically match a suitable compiler version. However, hitting the "Compile" button will most likely generate a "Stack too deep" error: + +![Stack too deep](@site/static/img/how-tos/solidity_verifier_3.png) + +This is due to the verify function needing to put many variables on the stack, but enabling the optimizer resolves the issue. To do this, let's open the "Advanced Configurations" tab and enable optimization. The default 200 runs will suffice. + +:::info + +This time we will see a warning about an unused function parameter. This is expected, as the `verify` function doesn't use the `_proof` parameter inside a solidity block, it is loaded from calldata and used in assembly. + +::: + +![Compilation success](@site/static/img/how-tos/solidity_verifier_4.png) + +## Step 3 - Deploying + +At this point we should have a compiled contract read to deploy. If we navigate to the deploy section in Remix, we will see many different environments we can deploy to. The steps to deploy on each environment would be out-of-scope for this guide, so we will just use the default Remix VM. + +Looking closely, we will notice that our "Solidity Verifier" is actually three contracts working together: + +- An `UltraVerificationKey` library which simply stores the verification key for our circuit. +- An abstract contract `BaseUltraVerifier` containing most of the verifying logic. +- A main `UltraVerifier` contract that inherits from the Base and uses the Key contract. + +Remix will take care of the dependencies for us so we can simply deploy the UltraVerifier contract by selecting it and hitting "deploy": + +![Deploying UltraVerifier](@site/static/img/how-tos/solidity_verifier_5.png) + +A contract will show up in the "Deployed Contracts" section, where we can retrieve the Verification Key Hash. This is particularly useful for double-checking the deployer contract is the correct one. + +:::note + +Why "UltraVerifier"? + +To be precise, the Noir compiler (`nargo`) doesn't generate the verifier contract directly. It compiles the Noir code into an intermediate language (ACIR), which is then executed by the backend. So it is the backend that returns the verifier smart contract, not Noir. + +In this case, the Barretenberg Backend uses the UltraPlonk proving system, hence the "UltraVerifier" name. + +::: + +## Step 4 - Verifying + +To verify a proof using the Solidity verifier contract, we call the `verify` function in this extended contract: + +```solidity +function verify(bytes calldata _proof, bytes32[] calldata _publicInputs) external view returns (bool) +``` + +When using the default example in the [Hello Noir](../getting_started/hello_noir/index.md) guide, the easiest way to confirm that the verifier contract is doing its job is by calling the `verify` function via remix with the required parameters. For `_proof`, run `nargo prove` and use the string in `proof/.proof` (adding the hex `0x` prefix). We can also copy the public input from `Verifier.toml`, as it will be properly formatted as 32-byte strings: + +``` +0x...... , [0x0000.....02] +``` + +A programmatic example of how the `verify` function is called can be seen in the example zk voting application [here](https://github.com/noir-lang/noir-examples/blob/33e598c257e2402ea3a6b68dd4c5ad492bce1b0a/foundry-voting/src/zkVote.sol#L35): + +```solidity +function castVote(bytes calldata proof, uint proposalId, uint vote, bytes32 nullifierHash) public returns (bool) { + // ... + bytes32[] memory publicInputs = new bytes32[](4); + publicInputs[0] = merkleRoot; + publicInputs[1] = bytes32(proposalId); + publicInputs[2] = bytes32(vote); + publicInputs[3] = nullifierHash; + require(verifier.verify(proof, publicInputs), "Invalid proof"); +``` + +:::info[Return Values] + +A circuit doesn't have the concept of a return value. Return values are just syntactic sugar in +Noir. + +Under the hood, the return value is passed as an input to the circuit and is checked at the end of +the circuit program. + +For example, if you have Noir program like this: + +```rust +fn main( + // Public inputs + pubkey_x: pub Field, + pubkey_y: pub Field, + // Private inputs + priv_key: Field, +) -> pub Field +``` + +the `verify` function will expect the public inputs array (second function parameter) to be of length 3, the two inputs and the return value. Like before, these values are populated in Verifier.toml after running `nargo prove`. + +Passing only two inputs will result in an error such as `PUBLIC_INPUT_COUNT_INVALID(3, 2)`. + +In this case, the inputs parameter to `verify` would be an array ordered as `[pubkey_x, pubkey_y, return]`. + +::: + +:::tip[Structs] + +You can pass structs to the verifier contract. They will be flattened so that the array of inputs is 1-dimensional array. + +For example, consider the following program: + +```rust +struct Type1 { + val1: Field, + val2: Field, +} + +struct Nested { + t1: Type1, + is_true: bool, +} + +fn main(x: pub Field, nested: pub Nested, y: pub Field) { + //... +} +``` + +The order of these inputs would be flattened to: `[x, nested.t1.val1, nested.t1.val2, nested.is_true, y]` + +::: + +The other function you can call is our entrypoint `verify` function, as defined above. + +:::tip + +It's worth noticing that the `verify` function is actually a `view` function. A `view` function does not alter the blockchain state, so it doesn't need to be distributed (i.e. it will run only on the executing node), and therefore doesn't cost any gas. + +This can be particularly useful in some situations. If Alice generated a proof and wants Bob to verify its correctness, Bob doesn't need to run Nargo, NoirJS, or any Noir specific infrastructure. He can simply make a call to the blockchain with the proof and verify it is correct without paying any gas. + +It would be incorrect to say that a Noir proof verification costs any gas at all. However, most of the time the result of `verify` is used to modify state (for example, to update a balance, a game state, etc). In that case the whole network needs to execute it, which does incur gas costs (calldata and execution, but not storage). + +::: + +## A Note on EVM chains + +ZK-SNARK verification depends on some precompiled cryptographic primitives such as Elliptic Curve Pairings (if you like complex math, you can read about EC Pairings [here](https://medium.com/@VitalikButerin/exploring-elliptic-curve-pairings-c73c1864e627)). Not all EVM chains support EC Pairings, notably some of the ZK-EVMs. This means that you won't be able to use the verifier contract in all of them. + +For example, chains like `zkSync ERA` and `Polygon zkEVM` do not currently support these precompiles, so proof verification via Solidity verifier contracts won't work. Here's a quick list of EVM chains that have been tested and are known to work: + +- Optimism +- Arbitrum +- Polygon PoS +- Scroll +- Celo + +If you test any other chains, please open a PR on this page to update the list. See [this doc](https://github.com/noir-lang/noir-starter/tree/main/with-foundry#testing-on-chain) for more info about testing verifier contracts on different EVM chains. + +## What's next + +Now that you know how to call a Noir Solidity Verifier on a smart contract using Remix, you should be comfortable with using it with some programmatic frameworks, such as [hardhat](https://github.com/noir-lang/noir-starter/tree/main/vite-hardhat) and [foundry](https://github.com/noir-lang/noir-starter/tree/main/with-foundry). + +You can find other tools, examples, boilerplates and libraries in the [awesome-noir](https://github.com/noir-lang/awesome-noir) repository. + +You should also be ready to write and deploy your first NoirJS app and start generating proofs on websites, phones, and NodeJS environments! Head on to the [NoirJS tutorial](../tutorials/noirjs_app.md) to learn how to do that. diff --git a/docs/versioned_docs/version-v0.24.0/how_to/merkle-proof.mdx b/docs/versioned_docs/version-v0.24.0/how_to/merkle-proof.mdx new file mode 100644 index 00000000000..34074659ac1 --- /dev/null +++ b/docs/versioned_docs/version-v0.24.0/how_to/merkle-proof.mdx @@ -0,0 +1,48 @@ +--- +title: Prove Merkle Tree Membership +description: + Learn how to use merkle membership proof in Noir to prove that a given leaf is a member of a + merkle tree with a specified root, at a given index. +keywords: + [merkle proof, merkle membership proof, Noir, rust, hash function, Pedersen, sha256, merkle tree] +--- + +Let's walk through an example of a merkle membership proof in Noir that proves that a given leaf is +in a merkle tree. + +```rust +use dep::std; + +fn main(message : [Field; 62], index : Field, hashpath : [Field; 40], root : Field) { + let leaf = std::hash::hash_to_field(message); + let merkle_root = std::merkle::compute_merkle_root(leaf, index, hashpath); + assert(merkle_root == root); +} + +``` + +The message is hashed using `hash_to_field`. The specific hash function that is being used is chosen +by the backend. The only requirement is that this hash function can heuristically be used as a +random oracle. If only collision resistance is needed, then one can call `std::hash::pedersen_hash` +instead. + +```rust +let leaf = std::hash::hash_to_field(message); +``` + +The leaf is then passed to a compute_merkle_root function with the root, index and hashpath. The returned root can then be asserted to be the same as the provided root. + +```rust +let merkle_root = std::merkle::compute_merkle_root(leaf, index, hashpath); +assert (merkle_root == root); +``` + +> **Note:** It is possible to re-implement the merkle tree implementation without standard library. +> However, for most usecases, it is enough. In general, the standard library will always opt to be +> as conservative as possible, while striking a balance with efficiency. + +An example, the merkle membership proof, only requires a hash function that has collision +resistance, hence a hash function like Pedersen is allowed, which in most cases is more efficient +than the even more conservative sha256. + +[View an example on the starter repo](https://github.com/noir-lang/noir-examples/blob/3ea09545cabfa464124ec2f3ea8e60c608abe6df/stealthdrop/circuits/src/main.nr#L20) diff --git a/docs/versioned_docs/version-v0.24.0/how_to/using-devcontainers.mdx b/docs/versioned_docs/version-v0.24.0/how_to/using-devcontainers.mdx new file mode 100644 index 00000000000..727ec6ca667 --- /dev/null +++ b/docs/versioned_docs/version-v0.24.0/how_to/using-devcontainers.mdx @@ -0,0 +1,110 @@ +--- +title: Developer Containers and Codespaces +description: "Learn how to set up a devcontainer in your GitHub repository for a seamless coding experience with Codespaces. Follow our easy 8-step guide to create your own Noir environment without installing Nargo locally." +keywords: ["Devcontainer", "Codespaces", "GitHub", "Noir Environment", "Docker Image", "Development Environment", "Remote Coding", "GitHub Codespaces", "Noir Programming", "Nargo", "VSCode Extensions", "Noirup"] +sidebar_position: 1 +--- + +Adding a developer container configuration file to your Noir project is one of the easiest way to unlock coding in browser. + +## What's a devcontainer after all? + +A [Developer Container](https://containers.dev/) (devcontainer for short) is a Docker image that comes preloaded with tools, extensions, and other tools you need to quickly get started or continue a project, without having to install Nargo locally. Think of it as a development environment in a box. + +There are many advantages to this: + +- It's platform and architecture agnostic +- You don't need to have an IDE installed, or Nargo, or use a terminal at all +- It's safer for using on a public machine or public network + +One of the best ways of using devcontainers is... not using your machine at all, for maximum control, performance, and ease of use. +Enter Codespaces. + +## Codespaces + +If a devcontainer is just a Docker image, then what stops you from provisioning a `p3dn.24xlarge` AWS EC2 instance with 92 vCPUs and 768 GiB RAM and using it to prove your 10-gate SNARK proof? + +Nothing! Except perhaps the 30-40$ per hour it will cost you. + +The problem is that provisioning takes time, and I bet you don't want to see the AWS console every time you want to code something real quick. + +Fortunately, there's an easy and free way to get a decent remote machine ready and loaded in less than 2 minutes: Codespaces. [Codespaces is a Github feature](https://github.com/features/codespaces) that allows you to code in a remote machine by using devcontainers, and it's pretty cool: + +- You can start coding Noir in less than a minute +- It uses the resources of a remote machine, so you can code on your grandma's phone if needed be +- It makes it easy to share work with your frens +- It's fully reusable, you can stop and restart whenever you need to + +:::info + +Don't take out your wallet just yet. Free GitHub accounts get about [15-60 hours of coding](https://github.com/features/codespaces) for free per month, depending on the size of your provisioned machine. + +::: + +## Tell me it's _actually_ easy + +It is! + +Github comes with a default codespace and you can use it to code your own devcontainer. That's exactly what we will be doing in this guide. + + + +8 simple steps: + +#### 1. Create a new repository on GitHub. + +#### 2. Click "Start coding with Codespaces". This will use the default image. + +#### 3. Create a folder called `.devcontainer` in the root of your repository. + +#### 4. Create a Dockerfile in that folder, and paste the following code: + +```docker +FROM --platform=linux/amd64 node:lts-bookworm-slim +SHELL ["/bin/bash", "-c"] +RUN apt update && apt install -y curl bash git tar gzip libc++-dev +RUN curl -L https://raw.githubusercontent.com/noir-lang/noirup/main/install | bash +ENV PATH="/root/.nargo/bin:$PATH" +RUN noirup +ENTRYPOINT ["nargo"] +``` +#### 5. Create a file called `devcontainer.json` in the same folder, and paste the following code: + +```json +{ + "name": "Noir on Codespaces", + "build": { + "context": ".", + "dockerfile": "Dockerfile" + }, + "customizations": { + "vscode": { + "extensions": ["noir-lang.vscode-noir"] + } + } +} +``` +#### 6. Commit and push your changes + +This will pull the new image and build it, so it could take a minute or so + +#### 8. Done! +Just wait for the build to finish, and there's your easy Noir environment. + + +Refer to [noir-starter](https://github.com/noir-lang/noir-starter/) as an example of how devcontainers can be used together with codespaces. + + + +## How do I use it? + +Using the codespace is obviously much easier than setting it up. +Just navigate to your repository and click "Code" -> "Open with Codespaces". It should take a few seconds to load, and you're ready to go. + +:::info + +If you really like the experience, you can add a badge to your readme, links to existing codespaces, and more. +Check out the [official docs](https://docs.github.com/en/codespaces/setting-up-your-project-for-codespaces/setting-up-your-repository/facilitating-quick-creation-and-resumption-of-codespaces) for more info. diff --git a/docs/versioned_docs/version-v0.24.0/index.mdx b/docs/versioned_docs/version-v0.24.0/index.mdx new file mode 100644 index 00000000000..75086ddcdde --- /dev/null +++ b/docs/versioned_docs/version-v0.24.0/index.mdx @@ -0,0 +1,67 @@ +--- +title: Noir Lang +hide_title: true +description: + Learn about the public alpha release of Noir, a domain specific language heavily influenced by Rust that compiles to + an intermediate language which can be compiled to an arithmetic circuit or a rank-1 constraint system. +keywords: + [Noir, + Domain Specific Language, + Rust, + Intermediate Language, + Arithmetic Circuit, + Rank-1 Constraint System, + Ethereum Developers, + Protocol Developers, + Blockchain Developers, + Proving System, + Smart Contract Language] +sidebar_position: 0 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +Noir Logo + +Noir is a Domain-Specific Language for SNARK proving systems developed by [Aztec Labs](https://aztec.network/). It allows you to generate complex Zero-Knowledge Programs (ZKP) by using simple and flexible syntax, requiring no previous knowledge on the underlying mathematics or cryptography. + +ZK programs are programs that can generate short proofs of a certain statement without revealing some details about it. You can read more about ZKPs [here](https://dev.to/spalladino/a-beginners-intro-to-coding-zero-knowledge-proofs-c56). + +## What's new about Noir? + +Noir works differently from most ZK languages by taking a two-pronged path. First, it compiles the program to an adaptable intermediate language known as ACIR. From there, depending on a given project's needs, ACIR can be further compiled into an arithmetic circuit for integration with the proving backend. + +:::info + +Noir is backend agnostic, which means it makes no assumptions on which proving backend powers the ZK proof. Being the language that powers [Aztec Contracts](https://docs.aztec.network/developers/contracts/main), it defaults to Aztec's Barretenberg proving backend. + +However, the ACIR output can be transformed to be compatible with other PLONK-based backends, or into a [rank-1 constraint system](https://www.rareskills.io/post/rank-1-constraint-system) suitable for backends such as Arkwork's Marlin. + +::: + +## Who is Noir for? + +Noir can be used both in complex cloud-based backends and in user's smartphones, requiring no knowledge on the underlying math or cryptography. From authorization systems that keep a password in the user's device, to complex on-chain verification of recursive proofs, Noir is designed to abstract away complexity without any significant overhead. Here are some examples of situations where Noir can be used: + + + + Noir Logo + + Aztec Contracts leverage Noir to allow for the storage and execution of private information. Writing an Aztec Contract is as easy as writing Noir, and Aztec developers can easily interact with the network storage and execution through the [Aztec.nr](https://docs.aztec.network/developers/contracts/main) library. + + + Soliditry Verifier Example + Noir can auto-generate Solidity verifier contracts that verify Noir proofs. This allows for non-interactive verification of proofs containing private information in an immutable system. This feature powers a multitude of use-case scenarios, from P2P chess tournaments, to [Aztec Layer-2 Blockchain](https://docs.aztec.network/) + + + Aztec Labs developed NoirJS, an easy interface to generate and verify Noir proofs in a Javascript environment. This allows for Noir to be used in webpages, mobile apps, games, and any other environment supporting JS execution in a standalone manner. + + + + +## Libraries + +Noir is meant to be easy to extend by simply importing Noir libraries just like in Rust. +The [awesome-noir repo](https://github.com/noir-lang/awesome-noir#libraries) is a collection of libraries developed by the Noir community. +Writing a new library is easy and makes code be composable and easy to reuse. See the section on [dependencies](noir/modules_packages_crates/dependencies.md) for more information. diff --git a/docs/versioned_docs/version-v0.24.0/migration_notes.md b/docs/versioned_docs/version-v0.24.0/migration_notes.md new file mode 100644 index 00000000000..9f27230a1a0 --- /dev/null +++ b/docs/versioned_docs/version-v0.24.0/migration_notes.md @@ -0,0 +1,91 @@ +--- +title: Migration notes +description: Read about migration notes from previous versions, which could solve problems while updating +keywords: [Noir, notes, migration, updating, upgrading] +--- + +Noir is in full-speed development. Things break fast, wild, and often. This page attempts to leave some notes on errors you might encounter when upgrading and how to resolve them until proper patches are built. + +## ≥0.19 + +### Enforcing `compiler_version` + +From this version on, the compiler will check for the `compiler_version` field in `Nargo.toml`, and will error if it doesn't match the current Nargo version in use. + +To update, please make sure this field in `Nargo.toml` matches the output of `nargo --version`. + +## ≥0.14 + +The index of the [for loops](noir/concepts/control_flow.md#loops) is now of type `u64` instead of `Field`. An example refactor would be: + +```rust +for i in 0..10 { + let i = i as Field; +} +``` + +## ≥v0.11.0 and Nargo backend + +From this version onwards, Nargo starts managing backends through the `nargo backend` command. Upgrading to the versions per usual steps might lead to: + +### `backend encountered an error` + +This is likely due to the existing locally installed version of proving backend (e.g. barretenberg) is incompatible with the version of Nargo in use. + +To fix the issue: + +1. Uninstall the existing backend + +```bash +nargo backend uninstall acvm-backend-barretenberg +``` + +You may replace _acvm-backend-barretenberg_ with the name of your backend listed in `nargo backend ls` or in ~/.nargo/backends. + +2. Reinstall a compatible version of the proving backend. + +If you are using the default barretenberg backend, simply run: + +``` +nargo prove +``` + +with your Noir program. + +This will trigger the download and installation of the latest version of barretenberg compatible with your Nargo in use. + +### `backend encountered an error: illegal instruction` + +On certain Intel-based systems, an `illegal instruction` error may arise due to incompatibility of barretenberg with certain CPU instructions. + +To fix the issue: + +1. Uninstall the existing backend + +```bash +nargo backend uninstall acvm-backend-barretenberg +``` + +You may replace _acvm-backend-barretenberg_ with the name of your backend listed in `nargo backend ls` or in ~/.nargo/backends. + +2. Reinstall a compatible version of the proving backend. + +If you are using the default barretenberg backend, simply run: + +``` +nargo backend install acvm-backend-barretenberg https://github.com/noir-lang/barretenberg-js-binary/raw/master/run-bb.tar.gz +``` + +This downloads and installs a specific bb.js based version of barretenberg binary from GitHub. + +The gzipped file is running [this bash script](https://github.com/noir-lang/barretenberg-js-binary/blob/master/run-bb-js.sh), where we need to gzip it as the Nargo currently expect the backend to be zipped up. + +Then run: + +``` +DESIRED_BINARY_VERSION=0.8.1 nargo info +``` + +This overrides the bb native binary with a bb.js node application instead, which should be compatible with most if not all hardware. This does come with the drawback of being generally slower than native binary. + +0.8.1 indicates bb.js version 0.8.1, so if you change that it will update to a different version or the default version in the script if none was supplied. diff --git a/docs/versioned_docs/version-v0.24.0/noir/concepts/_category_.json b/docs/versioned_docs/version-v0.24.0/noir/concepts/_category_.json new file mode 100644 index 00000000000..7da08f8a8c5 --- /dev/null +++ b/docs/versioned_docs/version-v0.24.0/noir/concepts/_category_.json @@ -0,0 +1,6 @@ +{ + "label": "Concepts", + "position": 0, + "collapsible": true, + "collapsed": true +} \ No newline at end of file diff --git a/docs/versioned_docs/version-v0.24.0/noir/concepts/assert.md b/docs/versioned_docs/version-v0.24.0/noir/concepts/assert.md new file mode 100644 index 00000000000..bcff613a695 --- /dev/null +++ b/docs/versioned_docs/version-v0.24.0/noir/concepts/assert.md @@ -0,0 +1,45 @@ +--- +title: Assert Function +description: + Learn about the assert function in Noir, which can be used to explicitly constrain the predicate or + comparison expression that follows to be true, and what happens if the expression is false at + runtime. +keywords: [Noir programming language, assert statement, predicate expression, comparison expression] +sidebar_position: 4 +--- + +Noir includes a special `assert` function which will explicitly constrain the predicate/comparison +expression that follows to be true. If this expression is false at runtime, the program will fail to +be proven. Example: + +```rust +fn main(x : Field, y : Field) { + assert(x == y); +} +``` + +> Assertions only work for predicate operations, such as `==`. If there's any ambiguity on the operation, the program will fail to compile. For example, it is unclear if `assert(x + y)` would check for `x + y == 0` or simply would return `true`. + +You can optionally provide a message to be logged when the assertion fails: + +```rust +assert(x == y, "x and y are not equal"); +``` + +Aside string literals, the optional message can be a format string or any other type supported as input for Noir's [print](../standard_library/logging.md) functions. This feature lets you incorporate runtime variables into your failed assertion logs: + +```rust +assert(x == y, f"Expected x == y, but got {x} == {y}"); +``` + +Using a variable as an assertion message directly: + +```rust +struct myStruct { + myField: Field +} + +let s = myStruct { myField: y }; +assert(s.myField == x, s); +``` + diff --git a/docs/versioned_docs/version-v0.24.0/noir/concepts/comments.md b/docs/versioned_docs/version-v0.24.0/noir/concepts/comments.md new file mode 100644 index 00000000000..b51a85f5c94 --- /dev/null +++ b/docs/versioned_docs/version-v0.24.0/noir/concepts/comments.md @@ -0,0 +1,33 @@ +--- +title: Comments +description: + Learn how to write comments in Noir programming language. A comment is a line of code that is + ignored by the compiler, but it can be read by programmers. Single-line and multi-line comments + are supported in Noir. +keywords: [Noir programming language, comments, single-line comments, multi-line comments] +sidebar_position: 10 +--- + +A comment is a line in your codebase which the compiler ignores, however it can be read by +programmers. + +Here is a single line comment: + +```rust +// This is a comment and is ignored +``` + +`//` is used to tell the compiler to ignore the rest of the line. + +Noir also supports multi-line block comments. Start a block comment with `/*` and end the block with `*/`. + +Noir does not natively support doc comments. You may be able to use [Rust doc comments](https://doc.rust-lang.org/reference/comments.html) in your code to leverage some Rust documentation build tools with Noir code. + +```rust +/* + This is a block comment describing a complex function. +*/ +fn main(x : Field, y : pub Field) { + assert(x != y); +} +``` diff --git a/docs/versioned_docs/version-v0.24.0/noir/concepts/control_flow.md b/docs/versioned_docs/version-v0.24.0/noir/concepts/control_flow.md new file mode 100644 index 00000000000..4ce65236db3 --- /dev/null +++ b/docs/versioned_docs/version-v0.24.0/noir/concepts/control_flow.md @@ -0,0 +1,45 @@ +--- +title: Control Flow +description: + Learn how to use loops and if expressions in the Noir programming language. Discover the syntax + and examples for for loops and if-else statements. +keywords: [Noir programming language, loops, for loop, if-else statements, Rust syntax] +sidebar_position: 2 +--- + +## Loops + +Noir has one kind of loop: the `for` loop. `for` loops allow you to repeat a block of code multiple +times. + +The following block of code between the braces is run 10 times. + +```rust +for i in 0..10 { + // do something +}; +``` + +The index for loops is of type `u64`. + +## If Expressions + +Noir supports `if-else` statements. The syntax is most similar to Rust's where it is not required +for the statement's conditional to be surrounded by parentheses. + +```rust +let a = 0; +let mut x: u32 = 0; + +if a == 0 { + if a != 0 { + x = 6; + } else { + x = 2; + } +} else { + x = 5; + assert(x == 5); +} +assert(x == 2); +``` diff --git a/docs/versioned_docs/version-v0.24.0/noir/concepts/data_bus.md b/docs/versioned_docs/version-v0.24.0/noir/concepts/data_bus.md new file mode 100644 index 00000000000..e54fc861257 --- /dev/null +++ b/docs/versioned_docs/version-v0.24.0/noir/concepts/data_bus.md @@ -0,0 +1,21 @@ +--- +title: Data Bus +sidebar_position: 13 +--- +**Disclaimer** this feature is experimental, do not use it! + +The data bus is an optimization that the backend can use to make recursion more efficient. +In order to use it, you must define some inputs of the program entry points (usually the `main()` +function) with the `call_data` modifier, and the return values with the `return_data` modifier. +These modifiers are incompatible with `pub` and `mut` modifiers. + +## Example + +```rust +fn main(mut x: u32, y: call_data u32, z: call_data [u32;4] ) -> return_data u32 { + let a = z[x]; + a+y +} +``` + +As a result, both call_data and return_data will be treated as private inputs and encapsulated into a read-only array each, for the backend to process. diff --git a/docs/versioned_docs/version-v0.24.0/noir/concepts/data_types/_category_.json b/docs/versioned_docs/version-v0.24.0/noir/concepts/data_types/_category_.json new file mode 100644 index 00000000000..5d694210bbf --- /dev/null +++ b/docs/versioned_docs/version-v0.24.0/noir/concepts/data_types/_category_.json @@ -0,0 +1,5 @@ +{ + "position": 0, + "collapsible": true, + "collapsed": true +} diff --git a/docs/versioned_docs/version-v0.24.0/noir/concepts/data_types/arrays.md b/docs/versioned_docs/version-v0.24.0/noir/concepts/data_types/arrays.md new file mode 100644 index 00000000000..a8bd338e736 --- /dev/null +++ b/docs/versioned_docs/version-v0.24.0/noir/concepts/data_types/arrays.md @@ -0,0 +1,251 @@ +--- +title: Arrays +description: + Dive into the Array data type in Noir. Grasp its methods, practical examples, and best practices for efficiently using Arrays in your Noir code. +keywords: + [ + noir, + array type, + methods, + examples, + indexing, + ] +sidebar_position: 4 +--- + +An array is one way of grouping together values into one compound type. Array types can be inferred +or explicitly specified via the syntax `[; ]`: + +```rust +fn main(x : Field, y : Field) { + let my_arr = [x, y]; + let your_arr: [Field; 2] = [x, y]; +} +``` + +Here, both `my_arr` and `your_arr` are instantiated as an array containing two `Field` elements. + +Array elements can be accessed using indexing: + +```rust +fn main() { + let a = [1, 2, 3, 4, 5]; + + let first = a[0]; + let second = a[1]; +} +``` + +All elements in an array must be of the same type (i.e. homogeneous). That is, an array cannot group +a `Field` value and a `u8` value together for example. + +You can write mutable arrays, like: + +```rust +fn main() { + let mut arr = [1, 2, 3, 4, 5]; + assert(arr[0] == 1); + + arr[0] = 42; + assert(arr[0] == 42); +} +``` + +You can instantiate a new array of a fixed size with the same value repeated for each element. The following example instantiates an array of length 32 where each element is of type Field and has the value 0. + +```rust +let array: [Field; 32] = [0; 32]; +``` + +Like in Rust, arrays in Noir are a fixed size. However, if you wish to convert an array to a [slice](./slices), you can just call `as_slice` on your array: + +```rust +let array: [Field; 32] = [0; 32]; +let sl = array.as_slice() +``` + +You can define multidimensional arrays: + +```rust +let array : [[Field; 2]; 2]; +let element = array[0][0]; +``` +However, multidimensional slices are not supported. For example, the following code will error at compile time: +```rust +let slice : [[Field]] = []; +``` + +## Types + +You can create arrays of primitive types or structs. There is not yet support for nested arrays +(arrays of arrays) or arrays of structs that contain arrays. + +## Methods + +For convenience, the STD provides some ready-to-use, common methods for arrays. +Each of these functions are located within the generic impl `impl [T; N] {`. +So anywhere `self` appears, it refers to the variable `self: [T; N]`. + +### len + +Returns the length of an array + +```rust +fn len(self) -> Field +``` + +example + +```rust +fn main() { + let array = [42, 42]; + assert(array.len() == 2); +} +``` + +### sort + +Returns a new sorted array. The original array remains untouched. Notice that this function will +only work for arrays of fields or integers, not for any arbitrary type. This is because the sorting +logic it uses internally is optimized specifically for these values. If you need a sort function to +sort any type, you should use the function `sort_via` described below. + +```rust +fn sort(self) -> [T; N] +``` + +example + +```rust +fn main() { + let arr = [42, 32]; + let sorted = arr.sort(); + assert(sorted == [32, 42]); +} +``` + +### sort_via + +Sorts the array with a custom comparison function + +```rust +fn sort_via(self, ordering: fn(T, T) -> bool) -> [T; N] +``` + +example + +```rust +fn main() { + let arr = [42, 32] + let sorted_ascending = arr.sort_via(|a, b| a < b); + assert(sorted_ascending == [32, 42]); // verifies + + let sorted_descending = arr.sort_via(|a, b| a > b); + assert(sorted_descending == [32, 42]); // does not verify +} +``` + +### map + +Applies a function to each element of the array, returning a new array containing the mapped elements. + +```rust +fn map(self, f: fn(T) -> U) -> [U; N] +``` + +example + +```rust +let a = [1, 2, 3]; +let b = a.map(|a| a * 2); // b is now [2, 4, 6] +``` + +### fold + +Applies a function to each element of the array, returning the final accumulated value. The first +parameter is the initial value. + +```rust +fn fold(self, mut accumulator: U, f: fn(U, T) -> U) -> U +``` + +This is a left fold, so the given function will be applied to the accumulator and first element of +the array, then the second, and so on. For a given call the expected result would be equivalent to: + +```rust +let a1 = [1]; +let a2 = [1, 2]; +let a3 = [1, 2, 3]; + +let f = |a, b| a - b; +a1.fold(10, f) //=> f(10, 1) +a2.fold(10, f) //=> f(f(10, 1), 2) +a3.fold(10, f) //=> f(f(f(10, 1), 2), 3) +``` + +example: + +```rust + +fn main() { + let arr = [2, 2, 2, 2, 2]; + let folded = arr.fold(0, |a, b| a + b); + assert(folded == 10); +} + +``` + +### reduce + +Same as fold, but uses the first element as starting element. + +```rust +fn reduce(self, f: fn(T, T) -> T) -> T +``` + +example: + +```rust +fn main() { + let arr = [2, 2, 2, 2, 2]; + let reduced = arr.reduce(|a, b| a + b); + assert(reduced == 10); +} +``` + +### all + +Returns true if all the elements satisfy the given predicate + +```rust +fn all(self, predicate: fn(T) -> bool) -> bool +``` + +example: + +```rust +fn main() { + let arr = [2, 2, 2, 2, 2]; + let all = arr.all(|a| a == 2); + assert(all); +} +``` + +### any + +Returns true if any of the elements satisfy the given predicate + +```rust +fn any(self, predicate: fn(T) -> bool) -> bool +``` + +example: + +```rust +fn main() { + let arr = [2, 2, 2, 2, 5]; + let any = arr.any(|a| a == 5); + assert(any); +} + +``` diff --git a/docs/versioned_docs/version-v0.24.0/noir/concepts/data_types/booleans.md b/docs/versioned_docs/version-v0.24.0/noir/concepts/data_types/booleans.md new file mode 100644 index 00000000000..69826fcd724 --- /dev/null +++ b/docs/versioned_docs/version-v0.24.0/noir/concepts/data_types/booleans.md @@ -0,0 +1,31 @@ +--- +title: Booleans +description: + Delve into the Boolean data type in Noir. Understand its methods, practical examples, and best practices for using Booleans in your Noir programs. +keywords: + [ + noir, + boolean type, + methods, + examples, + logical operations, + ] +sidebar_position: 2 +--- + + +The `bool` type in Noir has two possible values: `true` and `false`: + +```rust +fn main() { + let t = true; + let f: bool = false; +} +``` + +> **Note:** When returning a boolean value, it will show up as a value of 1 for `true` and 0 for +> `false` in _Verifier.toml_. + +The boolean type is most commonly used in conditionals like `if` expressions and `assert` +statements. More about conditionals is covered in the [Control Flow](../control_flow) and +[Assert Function](../assert) sections. diff --git a/docs/versioned_docs/version-v0.24.0/noir/concepts/data_types/fields.md b/docs/versioned_docs/version-v0.24.0/noir/concepts/data_types/fields.md new file mode 100644 index 00000000000..99b4aa63549 --- /dev/null +++ b/docs/versioned_docs/version-v0.24.0/noir/concepts/data_types/fields.md @@ -0,0 +1,192 @@ +--- +title: Fields +description: + Dive deep into the Field data type in Noir. Understand its methods, practical examples, and best practices to effectively use Fields in your Noir programs. +keywords: + [ + noir, + field type, + methods, + examples, + best practices, + ] +sidebar_position: 0 +--- + +The field type corresponds to the native field type of the proving backend. + +The size of a Noir field depends on the elliptic curve's finite field for the proving backend +adopted. For example, a field would be a 254-bit integer when paired with the default backend that +spans the Grumpkin curve. + +Fields support integer arithmetic and are often used as the default numeric type in Noir: + +```rust +fn main(x : Field, y : Field) { + let z = x + y; +} +``` + +`x`, `y` and `z` are all private fields in this example. Using the `let` keyword we defined a new +private value `z` constrained to be equal to `x + y`. + +If proving efficiency is of priority, fields should be used as a default for solving problems. +Smaller integer types (e.g. `u64`) incur extra range constraints. + +## Methods + +After declaring a Field, you can use these common methods on it: + +### to_le_bits + +Transforms the field into an array of bits, Little Endian. + +```rust +fn to_le_bits(_x : Field, _bit_size: u32) -> [u1; N] +``` + +example: + +```rust +fn main() { + let field = 2; + let bits = field.to_le_bits(32); +} +``` + +### to_be_bits + +Transforms the field into an array of bits, Big Endian. + +```rust +fn to_be_bits(_x : Field, _bit_size: u32) -> [u1; N] +``` + +example: + +```rust +fn main() { + let field = 2; + let bits = field.to_be_bits(32); +} +``` + +### to_le_bytes + +Transforms into an array of bytes, Little Endian + +```rust +fn to_le_bytes(_x : Field, byte_size: u32) -> [u8] +``` + +example: + +```rust +fn main() { + let field = 2; + let bytes = field.to_le_bytes(4); +} +``` + +### to_be_bytes + +Transforms into an array of bytes, Big Endian + +```rust +fn to_be_bytes(_x : Field, byte_size: u32) -> [u8] +``` + +example: + +```rust +fn main() { + let field = 2; + let bytes = field.to_be_bytes(4); +} +``` + +### to_le_radix + +Decomposes into a vector over the specified base, Little Endian + +```rust +fn to_le_radix(_x : Field, _radix: u32, _result_len: u32) -> [u8] +``` + +example: + +```rust +fn main() { + let field = 2; + let radix = field.to_le_radix(256, 4); +} +``` + +### to_be_radix + +Decomposes into a vector over the specified base, Big Endian + +```rust +fn to_be_radix(_x : Field, _radix: u32, _result_len: u32) -> [u8] +``` + +example: + +```rust +fn main() { + let field = 2; + let radix = field.to_be_radix(256, 4); +} +``` + +### pow_32 + +Returns the value to the power of the specified exponent + +```rust +fn pow_32(self, exponent: Field) -> Field +``` + +example: + +```rust +fn main() { + let field = 2 + let pow = field.pow_32(4); + assert(pow == 16); +} +``` + +### assert_max_bit_size + +Adds a constraint to specify that the field can be represented with `bit_size` number of bits + +```rust +fn assert_max_bit_size(self, bit_size: u32) +``` + +example: + +```rust +fn main() { + let field = 2 + field.assert_max_bit_size(32); +} +``` + +### sgn0 + +Parity of (prime) Field element, i.e. sgn0(x mod p) = 0 if x ∈ \{0, ..., p-1\} is even, otherwise sgn0(x mod p) = 1. + +```rust +fn sgn0(self) -> u1 +``` + + +### lt + +Returns true if the field is less than the other field + +```rust +pub fn lt(self, another: Field) -> bool +``` diff --git a/docs/versioned_docs/version-v0.24.0/noir/concepts/data_types/function_types.md b/docs/versioned_docs/version-v0.24.0/noir/concepts/data_types/function_types.md new file mode 100644 index 00000000000..f6121af17e2 --- /dev/null +++ b/docs/versioned_docs/version-v0.24.0/noir/concepts/data_types/function_types.md @@ -0,0 +1,26 @@ +--- +title: Function types +sidebar_position: 10 +--- + +Noir supports higher-order functions. The syntax for a function type is as follows: + +```rust +fn(arg1_type, arg2_type, ...) -> return_type +``` + +Example: + +```rust +fn assert_returns_100(f: fn() -> Field) { // f takes no args and returns a Field + assert(f() == 100); +} + +fn main() { + assert_returns_100(|| 100); // ok + assert_returns_100(|| 150); // fails +} +``` + +A function type also has an optional capture environment - this is necessary to support closures. +See [Lambdas](../lambdas.md) for more details. diff --git a/docs/versioned_docs/version-v0.24.0/noir/concepts/data_types/index.md b/docs/versioned_docs/version-v0.24.0/noir/concepts/data_types/index.md new file mode 100644 index 00000000000..3c9cd4c2437 --- /dev/null +++ b/docs/versioned_docs/version-v0.24.0/noir/concepts/data_types/index.md @@ -0,0 +1,96 @@ +--- +title: Data Types +description: + Get a clear understanding of the two categories of Noir data types - primitive types and compound + types. Learn about their characteristics, differences, and how to use them in your Noir + programming. +keywords: + [ + noir, + data types, + primitive types, + compound types, + private types, + public types, + ] +--- + +Every value in Noir has a type, which determines which operations are valid for it. + +All values in Noir are fundamentally composed of `Field` elements. For a more approachable +developing experience, abstractions are added on top to introduce different data types in Noir. + +Noir has two category of data types: primitive types (e.g. `Field`, integers, `bool`) and compound +types that group primitive types (e.g. arrays, tuples, structs). Each value can either be private or +public. + +## Private & Public Types + +A **private value** is known only to the Prover, while a **public value** is known by both the +Prover and Verifier. Mark values as `private` when the value should only be known to the prover. All +primitive types (including individual fields of compound types) in Noir are private by default, and +can be marked public when certain values are intended to be revealed to the Verifier. + +> **Note:** For public values defined in Noir programs paired with smart contract verifiers, once +> the proofs are verified on-chain the values can be considered known to everyone that has access to +> that blockchain. + +Public data types are treated no differently to private types apart from the fact that their values +will be revealed in proofs generated. Simply changing the value of a public type will not change the +circuit (where the same goes for changing values of private types as well). + +_Private values_ are also referred to as _witnesses_ sometimes. + +> **Note:** The terms private and public when applied to a type (e.g. `pub Field`) have a different +> meaning than when applied to a function (e.g. `pub fn foo() {}`). +> +> The former is a visibility modifier for the Prover to interpret if a value should be made known to +> the Verifier, while the latter is a visibility modifier for the compiler to interpret if a +> function should be made accessible to external Noir programs like in other languages. + +### pub Modifier + +All data types in Noir are private by default. Types are explicitly declared as public using the +`pub` modifier: + +```rust +fn main(x : Field, y : pub Field) -> pub Field { + x + y +} +``` + +In this example, `x` is **private** while `y` and `x + y` (the return value) are **public**. Note +that visibility is handled **per variable**, so it is perfectly valid to have one input that is +private and another that is public. + +> **Note:** Public types can only be declared through parameters on `main`. + +## Type Aliases + +A type alias is a new name for an existing type. Type aliases are declared with the keyword `type`: + +```rust +type Id = u8; + +fn main() { + let id: Id = 1; + let zero: u8 = 0; + assert(zero + 1 == id); +} +``` + +Type aliases can also be used with [generics](@site/docs/noir/concepts/generics.md): + +```rust +type Id = Size; + +fn main() { + let id: Id = 1; + let zero: u32 = 0; + assert(zero + 1 == id); +} +``` + +### BigInt + +You can achieve BigInt functionality using the [Noir BigInt](https://github.com/shuklaayush/noir-bigint) library. diff --git a/docs/versioned_docs/version-v0.24.0/noir/concepts/data_types/integers.md b/docs/versioned_docs/version-v0.24.0/noir/concepts/data_types/integers.md new file mode 100644 index 00000000000..30135d76e4a --- /dev/null +++ b/docs/versioned_docs/version-v0.24.0/noir/concepts/data_types/integers.md @@ -0,0 +1,162 @@ +--- +title: Integers +description: Explore the Integer data type in Noir. Learn about its methods, see real-world examples, and grasp how to efficiently use Integers in your Noir code. +keywords: [noir, integer types, methods, examples, arithmetic] +sidebar_position: 1 +--- + +An integer type is a range constrained field type. The Noir frontend supports arbitrarily-sized, both unsigned and signed integer types. + +:::info + +When an integer is defined in Noir without a specific type, it will default to `Field`. + +The one exception is for loop indices which default to `u64` since comparisons on `Field`s are not possible. + +::: + +## Unsigned Integers + +An unsigned integer type is specified first with the letter `u` (indicating its unsigned nature) followed by its bit size (e.g. `8`): + +```rust +fn main() { + let x: u8 = 1; + let y: u8 = 1; + let z = x + y; + assert (z == 2); +} +``` + +The bit size determines the maximum value the integer type can store. For example, a `u8` variable can store a value in the range of 0 to 255 (i.e. $\\2^{8}-1\\$). + +## Signed Integers + +A signed integer type is specified first with the letter `i` (which stands for integer) followed by its bit size (e.g. `8`): + +```rust +fn main() { + let x: i8 = -1; + let y: i8 = -1; + let z = x + y; + assert (z == -2); +} +``` + +The bit size determines the maximum and minimum range of value the integer type can store. For example, an `i8` variable can store a value in the range of -128 to 127 (i.e. $\\-2^{7}\\$ to $\\2^{7}-1\\$). + +:::tip + +If you are using the default proving backend with Noir, both even (e.g. _u2_, _i2_) and odd (e.g. _u3_, _i3_) arbitrarily-sized integer types up to 127 bits (i.e. _u127_ and _i127_) are supported. + +::: + + +## 128 bits Unsigned Integers + +The built-in structure `U128` allows you to use 128-bit unsigned integers almost like a native integer type. However, there are some differences to keep in mind: +- You cannot cast between a native integer and `U128` +- There is a higher performance cost when using `U128`, compared to a native type. + +Conversion between unsigned integer types and U128 are done through the use of `from_integer` and `to_integer` functions. + +```rust +fn main() { + let x = U128::from_integer(23); + let y = U128::from_hex("0x7"); + let z = x + y; + assert(z.to_integer() == 30); +} +``` + +`U128` is implemented with two 64 bits limbs, representing the low and high bits, which explains the performance cost. You should expect `U128` to be twice more costly for addition and four times more costly for multiplication. +You can construct a U128 from its limbs: +```rust +fn main(x: u64, y: u64) { + let x = U128::from_u64s_be(x,y); + assert(z.hi == x as Field); + assert(z.lo == y as Field); +} +``` + +Note that the limbs are stored as Field elements in order to avoid unnecessary conversions. +Apart from this, most operations will work as usual: + +```rust +fn main(x: U128, y: U128) { + // multiplication + let c = x * y; + // addition and subtraction + let c = c - x + y; + // division + let c = x / y; + // bit operation; + let c = x & y | y; + // bit shift + let c = x << y; + // comparisons; + let c = x < y; + let c = x == y; +} +``` + +## Overflows + +Computations that exceed the type boundaries will result in overflow errors. This happens with both signed and unsigned integers. For example, attempting to prove: + +```rust +fn main(x: u8, y: u8) { + let z = x + y; +} +``` + +With: + +```toml +x = "255" +y = "1" +``` + +Would result in: + +``` +$ nargo prove +error: Assertion failed: 'attempt to add with overflow' +┌─ ~/src/main.nr:9:13 +│ +│ let z = x + y; +│ ----- +│ += Call stack: + ... +``` + +A similar error would happen with signed integers: + +```rust +fn main() { + let x: i8 = -118; + let y: i8 = -11; + let z = x + y; +} +``` + +### Wrapping methods + +Although integer overflow is expected to error, some use-cases rely on wrapping. For these use-cases, the standard library provides `wrapping` variants of certain common operations: + +```rust +fn wrapping_add(x: T, y: T) -> T; +fn wrapping_sub(x: T, y: T) -> T; +fn wrapping_mul(x: T, y: T) -> T; +``` + +Example of how it is used: + +```rust +use dep::std; + +fn main(x: u8, y: u8) -> pub u8 { + std::wrapping_add(x, y) +} +``` diff --git a/docs/versioned_docs/version-v0.24.0/noir/concepts/data_types/references.md b/docs/versioned_docs/version-v0.24.0/noir/concepts/data_types/references.md new file mode 100644 index 00000000000..a5293d11cfb --- /dev/null +++ b/docs/versioned_docs/version-v0.24.0/noir/concepts/data_types/references.md @@ -0,0 +1,23 @@ +--- +title: References +sidebar_position: 9 +--- + +Noir supports first-class references. References are a bit like pointers: they point to a specific address that can be followed to access the data stored at that address. You can use Rust-like syntax to use pointers in Noir: the `&` operator references the variable, the `*` operator dereferences it. + +Example: + +```rust +fn main() { + let mut x = 2; + + // you can reference x as &mut and pass it to multiplyBy2 + multiplyBy2(&mut x); +} + +// you can access &mut here +fn multiplyBy2(x: &mut Field) { + // and dereference it with * + *x = *x * 2; +} +``` diff --git a/docs/versioned_docs/version-v0.24.0/noir/concepts/data_types/slices.mdx b/docs/versioned_docs/version-v0.24.0/noir/concepts/data_types/slices.mdx new file mode 100644 index 00000000000..4a6ee816aa2 --- /dev/null +++ b/docs/versioned_docs/version-v0.24.0/noir/concepts/data_types/slices.mdx @@ -0,0 +1,147 @@ +--- +title: Slices +description: Explore the Slice data type in Noir. Understand its methods, see real-world examples, and learn how to effectively use Slices in your Noir programs. +keywords: [noir, slice type, methods, examples, subarrays] +sidebar_position: 5 +--- + +import Experimental from '@site/src/components/Notes/_experimental.mdx'; + + + +A slice is a dynamically-sized view into a sequence of elements. They can be resized at runtime, but because they don't own the data, they cannot be returned from a circuit. You can treat slices as arrays without a constrained size. + +```rust +use dep::std::slice; + +fn main() -> pub Field { + let mut slice: [Field] = [0; 2]; + + let mut new_slice = slice.push_back(6); + new_slice.len() +} +``` + +View the corresponding test file [here][test-file]. + +[test-file]: https://github.com/noir-lang/noir/blob/f387ec1475129732f72ba294877efdf6857135ac/crates/nargo_cli/tests/test_data_ssa_refactor/slices/src/main.nr + +## Methods + +For convenience, the STD provides some ready-to-use, common methods for slices: + +### push_back + +Pushes a new element to the end of the slice, returning a new slice with a length one greater than the original unmodified slice. + +```rust +fn push_back(_self: [T], _elem: T) -> [T] +``` + +example: + +```rust +fn main() -> pub Field { + let mut slice: [Field] = [0; 2]; + + let mut new_slice = slice.push_back(6); + new_slice.len() +} +``` + +View the corresponding test file [here][test-file]. + +### push_front + +Returns a new array with the specified element inserted at index 0. The existing elements indexes are incremented by 1. + +```rust +fn push_front(_self: Self, _elem: T) -> Self +``` + +Example: + +```rust +let mut new_slice: [Field] = []; +new_slice = new_slice.push_front(20); +assert(new_slice[0] == 20); // returns true +``` + +View the corresponding test file [here][test-file]. + +### pop_front + +Returns a tuple of two items, the first element of the array and the rest of the array. + +```rust +fn pop_front(_self: Self) -> (T, Self) +``` + +Example: + +```rust +let (first_elem, rest_of_slice) = slice.pop_front(); +``` + +View the corresponding test file [here][test-file]. + +### pop_back + +Returns a tuple of two items, the beginning of the array with the last element omitted and the last element. + +```rust +fn pop_back(_self: Self) -> (Self, T) +``` + +Example: + +```rust +let (popped_slice, last_elem) = slice.pop_back(); +``` + +View the corresponding test file [here][test-file]. + +### append + +Loops over a slice and adds it to the end of another. + +```rust +fn append(mut self, other: Self) -> Self +``` + +Example: + +```rust +let append = [1, 2].append([3, 4, 5]); +``` + +### insert + +Inserts an element at a specified index and shifts all following elements by 1. + +```rust +fn insert(_self: Self, _index: Field, _elem: T) -> Self +``` + +Example: + +```rust +new_slice = rest_of_slice.insert(2, 100); +assert(new_slice[2] == 100); +``` + +View the corresponding test file [here][test-file]. + +### remove + +Remove an element at a specified index, shifting all elements after it to the left, returning the altered slice and the removed element. + +```rust +fn remove(_self: Self, _index: Field) -> (Self, T) +``` + +Example: + +```rust +let (remove_slice, removed_elem) = slice.remove(3); +``` diff --git a/docs/versioned_docs/version-v0.24.0/noir/concepts/data_types/strings.md b/docs/versioned_docs/version-v0.24.0/noir/concepts/data_types/strings.md new file mode 100644 index 00000000000..311dfd64416 --- /dev/null +++ b/docs/versioned_docs/version-v0.24.0/noir/concepts/data_types/strings.md @@ -0,0 +1,80 @@ +--- +title: Strings +description: + Discover the String data type in Noir. Learn about its methods, see real-world examples, and understand how to effectively manipulate and use Strings in Noir. +keywords: + [ + noir, + string type, + methods, + examples, + concatenation, + ] +sidebar_position: 3 +--- + + +The string type is a fixed length value defined with `str`. + +You can use strings in `assert()` functions or print them with +`println()`. See more about [Logging](../../standard_library/logging). + +```rust +use dep::std; + +fn main(message : pub str<11>, hex_as_string : str<4>) { + println(message); + assert(message == "hello world"); + assert(hex_as_string == "0x41"); +} +``` + +You can convert a `str` to a byte array by calling `as_bytes()` +or a vector by calling `as_bytes_vec()`. + +```rust +fn main() { + let message = "hello world"; + let message_bytes = message.as_bytes(); + let mut message_vec = message.as_bytes_vec(); + assert(message_bytes.len() == 11); + assert(message_bytes[0] == 104); + assert(message_bytes[0] == message_vec.get(0)); +} +``` + +## Escape characters + +You can use escape characters for your strings: + +| Escape Sequence | Description | +|-----------------|-----------------| +| `\r` | Carriage Return | +| `\n` | Newline | +| `\t` | Tab | +| `\0` | Null Character | +| `\"` | Double Quote | +| `\\` | Backslash | + +Example: + +```rust +let s = "Hello \"world" // prints "Hello "world" +let s = "hey \tyou"; // prints "hey you" +``` + +## Raw strings + +A raw string begins with the letter `r` and is optionally delimited by a number of hashes `#`. + +Escape characters are *not* processed within raw strings. All contents are interpreted literally. + +Example: + +```rust +let s = r"Hello world"; +let s = r#"Simon says "hello world""#; + +// Any number of hashes may be used (>= 1) as long as the string also terminates with the same number of hashes +let s = r#####"One "#, Two "##, Three "###, Four "####, Five will end the string."#####; +``` diff --git a/docs/versioned_docs/version-v0.24.0/noir/concepts/data_types/structs.md b/docs/versioned_docs/version-v0.24.0/noir/concepts/data_types/structs.md new file mode 100644 index 00000000000..dbf68c99813 --- /dev/null +++ b/docs/versioned_docs/version-v0.24.0/noir/concepts/data_types/structs.md @@ -0,0 +1,70 @@ +--- +title: Structs +description: + Explore the Struct data type in Noir. Learn about its methods, see real-world examples, and grasp how to effectively define and use Structs in your Noir programs. +keywords: + [ + noir, + struct type, + methods, + examples, + data structures, + ] +sidebar_position: 8 +--- + +A struct also allows for grouping multiple values of different types. Unlike tuples, we can also +name each field. + +> **Note:** The usage of _field_ here refers to each element of the struct and is unrelated to the +> field type of Noir. + +Defining a struct requires giving it a name and listing each field within as `: ` pairs: + +```rust +struct Animal { + hands: Field, + legs: Field, + eyes: u8, +} +``` + +An instance of a struct can then be created with actual values in `: ` pairs in any +order. Struct fields are accessible using their given names: + +```rust +fn main() { + let legs = 4; + + let dog = Animal { + eyes: 2, + hands: 0, + legs, + }; + + let zero = dog.hands; +} +``` + +Structs can also be destructured in a pattern, binding each field to a new variable: + +```rust +fn main() { + let Animal { hands, legs: feet, eyes } = get_octopus(); + + let ten = hands + feet + eyes as u8; +} + +fn get_octopus() -> Animal { + let octopus = Animal { + hands: 0, + legs: 8, + eyes: 2, + }; + + octopus +} +``` + +The new variables can be bound with names different from the original struct field names, as +showcased in the `legs --> feet` binding in the example above. diff --git a/docs/versioned_docs/version-v0.24.0/noir/concepts/data_types/tuples.md b/docs/versioned_docs/version-v0.24.0/noir/concepts/data_types/tuples.md new file mode 100644 index 00000000000..2ec5c9c4113 --- /dev/null +++ b/docs/versioned_docs/version-v0.24.0/noir/concepts/data_types/tuples.md @@ -0,0 +1,48 @@ +--- +title: Tuples +description: + Dive into the Tuple data type in Noir. Understand its methods, practical examples, and best practices for efficiently using Tuples in your Noir code. +keywords: + [ + noir, + tuple type, + methods, + examples, + multi-value containers, + ] +sidebar_position: 7 +--- + +A tuple collects multiple values like an array, but with the added ability to collect values of +different types: + +```rust +fn main() { + let tup: (u8, u64, Field) = (255, 500, 1000); +} +``` + +One way to access tuple elements is via destructuring using pattern matching: + +```rust +fn main() { + let tup = (1, 2); + + let (one, two) = tup; + + let three = one + two; +} +``` + +Another way to access tuple elements is via direct member access, using a period (`.`) followed by +the index of the element we want to access. Index `0` corresponds to the first tuple element, `1` to +the second and so on: + +```rust +fn main() { + let tup = (5, 6, 7, 8); + + let five = tup.0; + let eight = tup.3; +} +``` diff --git a/docs/docs/noir/concepts/data_types/vectors.mdx b/docs/versioned_docs/version-v0.24.0/noir/concepts/data_types/vectors.mdx similarity index 100% rename from docs/docs/noir/concepts/data_types/vectors.mdx rename to docs/versioned_docs/version-v0.24.0/noir/concepts/data_types/vectors.mdx diff --git a/docs/versioned_docs/version-v0.24.0/noir/concepts/distinct.md b/docs/versioned_docs/version-v0.24.0/noir/concepts/distinct.md new file mode 100644 index 00000000000..6c993b8b5e0 --- /dev/null +++ b/docs/versioned_docs/version-v0.24.0/noir/concepts/distinct.md @@ -0,0 +1,64 @@ +--- +title: Distinct Witnesses +sidebar_position: 11 +--- + +The `distinct` keyword prevents repetitions of witness indices in the program's ABI. This ensures +that the witnesses being returned as public inputs are all unique. + +The `distinct` keyword is only used for return values on program entry points (usually the `main()` +function). + +When using `distinct` and `pub` simultaneously, `distinct` comes first. See the example below. + +You can read more about the problem this solves +[here](https://github.com/noir-lang/noir/issues/1183). + +## Example + +Without the `distinct` keyword, the following program + +```rust +fn main(x : pub Field, y : pub Field) -> pub [Field; 4] { + let a = 1; + let b = 1; + [x + 1, y, a, b] +} +``` + +compiles to + +```json +{ + //... + "abi": { + //... + "param_witnesses": { "x": [1], "y": [2] }, + "return_witnesses": [3, 2, 4, 4] + } +} +``` + +Whereas (with the `distinct` keyword) + +```rust +fn main(x : pub Field, y : pub Field) -> distinct pub [Field; 4] { + let a = 1; + let b = 1; + [x + 1, y, a, b] +} +``` + +compiles to + +```json +{ + //... + "abi": { + //... + "param_witnesses": { "x": [1], "y": [2] }, + //... + "return_witnesses": [3, 4, 5, 6] + } +} +``` diff --git a/docs/versioned_docs/version-v0.24.0/noir/concepts/functions.md b/docs/versioned_docs/version-v0.24.0/noir/concepts/functions.md new file mode 100644 index 00000000000..48aba9cd058 --- /dev/null +++ b/docs/versioned_docs/version-v0.24.0/noir/concepts/functions.md @@ -0,0 +1,226 @@ +--- +title: Functions +description: + Learn how to declare functions and methods in Noir, a programming language with Rust semantics. + This guide covers parameter declaration, return types, call expressions, and more. +keywords: [Noir, Rust, functions, methods, parameter declaration, return types, call expressions] +sidebar_position: 1 +--- + +Functions in Noir follow the same semantics of Rust, though Noir does not support early returns. + +To declare a function the `fn` keyword is used. + +```rust +fn foo() {} +``` + +By default, functions are visible only within the package they are defined. To make them visible outside of that package (for example, as part of a [library](../modules_packages_crates/crates_and_packages.md#libraries)), you should mark them as `pub`: + +```rust +pub fn foo() {} +``` + +You can also restrict the visibility of the function to only the crate it was defined in, by specifying `pub(crate)`: + +```rust +pub(crate) fn foo() {} //foo can only be called within its crate +``` + +All parameters in a function must have a type and all types are known at compile time. The parameter +is pre-pended with a colon and the parameter type. Multiple parameters are separated using a comma. + +```rust +fn foo(x : Field, y : Field){} +``` + +The return type of a function can be stated by using the `->` arrow notation. The function below +states that the foo function must return a `Field`. If the function returns no value, then the arrow +is omitted. + +```rust +fn foo(x : Field, y : Field) -> Field { + x + y +} +``` + +Note that a `return` keyword is unneeded in this case - the last expression in a function's body is +returned. + +## Main function + +If you're writing a binary, the `main` function is the starting point of your program. You can pass all types of expressions to it, as long as they have a fixed size at compile time: + +```rust +fn main(x : Field) // this is fine: passing a Field +fn main(x : [Field; 2]) // this is also fine: passing a Field with known size at compile-time +fn main(x : (Field, bool)) // 👌: passing a (Field, bool) tuple means size 2 +fn main(x : str<5>) // this is fine, as long as you pass a string of size 5 + +fn main(x : Vec) // can't compile, has variable size +fn main(x : [Field]) // can't compile, has variable size +fn main(....// i think you got it by now +``` + +Keep in mind [tests](../../getting_started/tooling/testing.md) don't differentiate between `main` and any other function. The following snippet passes tests, but won't compile or prove: + +```rust +fn main(x : [Field]) { + assert(x[0] == 1); +} + +#[test] +fn test_one() { + main([1, 2]); +} +``` + +```bash +$ nargo test +[testing] Running 1 test functions +[testing] Testing test_one... ok +[testing] All tests passed + +$ nargo check +The application panicked (crashed). +Message: Cannot have variable sized arrays as a parameter to main +``` + +## Call Expressions + +Calling a function in Noir is executed by using the function name and passing in the necessary +arguments. + +Below we show how to call the `foo` function from the `main` function using a call expression: + +```rust +fn main(x : Field, y : Field) { + let z = foo(x); +} + +fn foo(x : Field) -> Field { + x + x +} +``` + +## Methods + +You can define methods in Noir on any struct type in scope. + +```rust +struct MyStruct { + foo: Field, + bar: Field, +} + +impl MyStruct { + fn new(foo: Field) -> MyStruct { + MyStruct { + foo, + bar: 2, + } + } + + fn sum(self) -> Field { + self.foo + self.bar + } +} + +fn main() { + let s = MyStruct::new(40); + assert(s.sum() == 42); +} +``` + +Methods are just syntactic sugar for functions, so if we wanted to we could also call `sum` as +follows: + +```rust +assert(MyStruct::sum(s) == 42); +``` + +It is also possible to specialize which method is chosen depending on the [generic](./generics.md) type that is used. In this example, the `foo` function returns different values depending on its type: + +```rust +struct Foo {} + +impl Foo { + fn foo(self) -> Field { 1 } +} + +impl Foo { + fn foo(self) -> Field { 2 } +} + +fn main() { + let f1: Foo = Foo{}; + let f2: Foo = Foo{}; + assert(f1.foo() + f2.foo() == 3); +} +``` + +Also note that impls with the same method name defined in them cannot overlap. For example, if we already have `foo` defined for `Foo` and `Foo` like we do above, we cannot also define `foo` in an `impl Foo` since it would be ambiguous which version of `foo` to choose. + +```rust +// Including this impl in the same project as the above snippet would +// cause an overlapping impls error +impl Foo { + fn foo(self) -> Field { 3 } +} +``` + +## Lambdas + +Lambdas are anonymous functions. They follow the syntax of Rust - `|arg1, arg2, ..., argN| return_expression`. + +```rust +let add_50 = |val| val + 50; +assert(add_50(100) == 150); +``` + +See [Lambdas](./lambdas.md) for more details. + +## Attributes + +Attributes are metadata that can be applied to a function, using the following syntax: `#[attribute(value)]`. + +Supported attributes include: + +- **builtin**: the function is implemented by the compiler, for efficiency purposes. +- **deprecated**: mark the function as _deprecated_. Calling the function will generate a warning: `warning: use of deprecated function` +- **field**: Used to enable conditional compilation of code depending on the field size. See below for more details +- **oracle**: mark the function as _oracle_; meaning it is an external unconstrained function, implemented in noir_js. See [Unconstrained](./unconstrained.md) and [NoirJS](../../reference/NoirJS/noir_js/index.md) for more details. +- **test**: mark the function as unit tests. See [Tests](../../getting_started/tooling/testing.md) for more details + +### Field Attribute + +The field attribute defines which field the function is compatible for. The function is conditionally compiled, under the condition that the field attribute matches the Noir native field. +The field can be defined implicitly, by using the name of the elliptic curve usually associated to it - for instance bn254, bls12_381 - or explicitly by using the field (prime) order, in decimal or hexadecimal form. +As a result, it is possible to define multiple versions of a function with each version specialized for a different field attribute. This can be useful when a function requires different parameters depending on the underlying elliptic curve. + +Example: we define the function `foo()` three times below. Once for the default Noir bn254 curve, once for the field $\mathbb F_{23}$, which will normally never be used by Noir, and once again for the bls12_381 curve. + +```rust +#[field(bn254)] +fn foo() -> u32 { + 1 +} + +#[field(23)] +fn foo() -> u32 { + 2 +} + +// This commented code would not compile as foo would be defined twice because it is the same field as bn254 +// #[field(21888242871839275222246405745257275088548364400416034343698204186575808495617)] +// fn foo() -> u32 { +// 2 +// } + +#[field(bls12_381)] +fn foo() -> u32 { + 3 +} +``` + +If the field name is not known to Noir, it will discard the function. Field names are case insensitive. diff --git a/docs/versioned_docs/version-v0.24.0/noir/concepts/generics.md b/docs/versioned_docs/version-v0.24.0/noir/concepts/generics.md new file mode 100644 index 00000000000..ddd42bf1f9b --- /dev/null +++ b/docs/versioned_docs/version-v0.24.0/noir/concepts/generics.md @@ -0,0 +1,106 @@ +--- +title: Generics +description: Learn how to use Generics in Noir +keywords: [Noir, Rust, generics, functions, structs] +sidebar_position: 7 +--- + +Generics allow you to use the same functions with multiple different concrete data types. You can +read more about the concept of generics in the Rust documentation +[here](https://doc.rust-lang.org/book/ch10-01-syntax.html). + +Here is a trivial example showing the identity function that supports any type. In Rust, it is +common to refer to the most general type as `T`. We follow the same convention in Noir. + +```rust +fn id(x: T) -> T { + x +} +``` + +## In Structs + +Generics are useful for specifying types in structs. For example, we can specify that a field in a +struct will be of a certain generic type. In this case `value` is of type `T`. + +```rust +struct RepeatedValue { + value: T, + count: Field, +} + +impl RepeatedValue { + fn print(self) { + for _i in 0 .. self.count { + println(self.value); + } + } +} + +fn main() { + let repeated = RepeatedValue { value: "Hello!", count: 2 }; + repeated.print(); +} +``` + +The `print` function will print `Hello!` an arbitrary number of times, twice in this case. + +If we want to be generic over array lengths (which are type-level integers), we can use numeric +generics. Using these looks just like using regular generics, but these generics can resolve to +integers at compile-time, rather than resolving to types. Here's an example of a struct that is +generic over the size of the array it contains internally: + +```rust +struct BigInt { + limbs: [u32; N], +} + +impl BigInt { + // `N` is in scope of all methods in the impl + fn first(first: BigInt, second: BigInt) -> Self { + assert(first.limbs != second.limbs); + first + + fn second(first: BigInt, second: Self) -> Self { + assert(first.limbs != second.limbs); + second + } +} +``` + +## Calling functions on generic parameters + +Since a generic type `T` can represent any type, how can we call functions on the underlying type? +In other words, how can we go from "any type `T`" to "any type `T` that has certain methods available?" + +This is what [traits](../concepts/traits) are for in Noir. Here's an example of a function generic over +any type `T` that implements the `Eq` trait for equality: + +```rust +fn first_element_is_equal(array1: [T; N], array2: [T; N]) -> bool + where T: Eq +{ + if (array1.len() == 0) | (array2.len() == 0) { + true + } else { + array1[0] == array2[0] + } +} + +fn main() { + assert(first_element_is_equal([1, 2, 3], [1, 5, 6])); + + // We can use first_element_is_equal for arrays of any type + // as long as we have an Eq impl for the types we pass in + let array = [MyStruct::new(), MyStruct::new()]; + assert(array_eq(array, array, MyStruct::eq)); +} + +impl Eq for MyStruct { + fn eq(self, other: MyStruct) -> bool { + self.foo == other.foo + } +} +``` + +You can find more details on traits and trait implementations on the [traits page](../concepts/traits). diff --git a/docs/versioned_docs/version-v0.24.0/noir/concepts/globals.md b/docs/versioned_docs/version-v0.24.0/noir/concepts/globals.md new file mode 100644 index 00000000000..063a3d89248 --- /dev/null +++ b/docs/versioned_docs/version-v0.24.0/noir/concepts/globals.md @@ -0,0 +1,72 @@ +--- +title: Global Variables +description: + Learn about global variables in Noir. Discover how + to declare, modify, and use them in your programs. +keywords: [noir programming language, globals, global variables, constants] +sidebar_position: 8 +--- + +## Globals + + +Noir supports global variables. The global's type can be inferred by the compiler entirely: + +```rust +global N = 5; // Same as `global N: Field = 5` + +global TUPLE = (3, 2); + +fn main() { + assert(N == 5); + assert(N == TUPLE.0 + TUPLE.1); +} +``` + +:::info + +Globals can be defined as any expression, so long as they don't depend on themselves - otherwise there would be a dependency cycle! For example: + +```rust +global T = foo(T); // dependency error +``` + +::: + + +If they are initialized to a literal integer, globals can be used to specify an array's length: + +```rust +global N: Field = 2; + +fn main(y : [Field; N]) { + assert(y[0] == y[1]) +} +``` + +A global from another module can be imported or referenced externally like any other name: + +```rust +global N = 20; + +fn main() { + assert(my_submodule::N != N); +} + +mod my_submodule { + global N: Field = 10; +} +``` + +When a global is used, Noir replaces the name with its definition on each occurrence. +This means globals defined using function calls will repeat the call each time they're used: + +```rust +global RESULT = foo(); + +fn foo() -> [Field; 100] { ... } +``` + +This is usually fine since Noir will generally optimize any function call that does not +refer to a program input into a constant. It should be kept in mind however, if the called +function performs side-effects like `println`, as these will still occur on each use. diff --git a/docs/versioned_docs/version-v0.24.0/noir/concepts/lambdas.md b/docs/versioned_docs/version-v0.24.0/noir/concepts/lambdas.md new file mode 100644 index 00000000000..be3c7e0b5ca --- /dev/null +++ b/docs/versioned_docs/version-v0.24.0/noir/concepts/lambdas.md @@ -0,0 +1,81 @@ +--- +title: Lambdas +description: Learn how to use anonymous functions in Noir programming language. +keywords: [Noir programming language, lambda, closure, function, anonymous function] +sidebar_position: 9 +--- + +## Introduction + +Lambdas are anonymous functions. The syntax is `|arg1, arg2, ..., argN| return_expression`. + +```rust +let add_50 = |val| val + 50; +assert(add_50(100) == 150); +``` + +A block can be used as the body of a lambda, allowing you to declare local variables inside it: + +```rust +let cool = || { + let x = 100; + let y = 100; + x + y +} + +assert(cool() == 200); +``` + +## Closures + +Inside the body of a lambda, you can use variables defined in the enclosing function. Such lambdas are called **closures**. In this example `x` is defined inside `main` and is accessed from within the lambda: + +```rust +fn main() { + let x = 100; + let closure = || x + 150; + assert(closure() == 250); +} +``` + +## Passing closures to higher-order functions + +It may catch you by surprise that the following code fails to compile: + +```rust +fn foo(f: fn () -> Field) -> Field { + f() +} + +fn main() { + let (x, y) = (50, 50); + assert(foo(|| x + y) == 100); // error :( +} +``` + +The reason is that the closure's capture environment affects its type - we have a closure that captures two Fields and `foo` +expects a regular function as an argument - those are incompatible. +:::note + +Variables contained within the `||` are the closure's parameters, and the expression that follows it is the closure's body. The capture environment is comprised of any variables used in the closure's body that are not parameters. + +E.g. in |x| x + y, y would be a captured variable, but x would not be, since it is a parameter of the closure. + +::: +The syntax for the type of a closure is `fn[env](args) -> ret_type`, where `env` is the capture environment of the closure - +in this example that's `(Field, Field)`. + +The best solution in our case is to make `foo` generic over the environment type of its parameter, so that it can be called +with closures with any environment, as well as with regular functions: + +```rust +fn foo(f: fn[Env]() -> Field) -> Field { + f() +} + +fn main() { + let (x, y) = (50, 50); + assert(foo(|| x + y) == 100); // compiles fine + assert(foo(|| 60) == 60); // compiles fine +} +``` diff --git a/docs/versioned_docs/version-v0.24.0/noir/concepts/mutability.md b/docs/versioned_docs/version-v0.24.0/noir/concepts/mutability.md new file mode 100644 index 00000000000..fdeef6a87c5 --- /dev/null +++ b/docs/versioned_docs/version-v0.24.0/noir/concepts/mutability.md @@ -0,0 +1,121 @@ +--- +title: Mutability +description: + Learn about mutable variables in Noir. Discover how + to declare, modify, and use them in your programs. +keywords: [noir programming language, mutability in noir, mutable variables] +sidebar_position: 8 +--- + +Variables in noir can be declared mutable via the `mut` keyword. Mutable variables can be reassigned +to via an assignment expression. + +```rust +let x = 2; +x = 3; // error: x must be mutable to be assigned to + +let mut y = 3; +let y = 4; // OK +``` + +The `mut` modifier can also apply to patterns: + +```rust +let (a, mut b) = (1, 2); +a = 11; // error: a must be mutable to be assigned to +b = 12; // OK + +let mut (c, d) = (3, 4); +c = 13; // OK +d = 14; // OK + +// etc. +let MyStruct { x: mut y } = MyStruct { x: a }; +// y is now in scope +``` + +Note that mutability in noir is local and everything is passed by value, so if a called function +mutates its parameters then the parent function will keep the old value of the parameters. + +```rust +fn main() -> pub Field { + let x = 3; + helper(x); + x // x is still 3 +} + +fn helper(mut x: i32) { + x = 4; +} +``` + +## Non-local mutability + +Non-local mutability can be achieved through the mutable reference type `&mut T`: + +```rust +fn set_to_zero(x: &mut Field) { + *x = 0; +} + +fn main() { + let mut y = 42; + set_to_zero(&mut y); + assert(*y == 0); +} +``` + +When creating a mutable reference, the original variable being referred to (`y` in this +example) must also be mutable. Since mutable references are a reference type, they must +be explicitly dereferenced via `*` to retrieve the underlying value. Note that this yields +a copy of the value, so mutating this copy will not change the original value behind the +reference: + +```rust +fn main() { + let mut x = 1; + let x_ref = &mut x; + + let mut y = *x_ref; + let y_ref = &mut y; + + x = 2; + *x_ref = 3; + + y = 4; + *y_ref = 5; + + assert(x == 3); + assert(*x_ref == 3); + assert(y == 5); + assert(*y_ref == 5); +} +``` + +Note that types in Noir are actually deeply immutable so the copy that occurs when +dereferencing is only a conceptual copy - no additional constraints will occur. + +Mutable references can also be stored within structs. Note that there is also +no lifetime parameter on these unlike rust. This is because the allocated memory +always lasts the entire program - as if it were an array of one element. + +```rust +struct Foo { + x: &mut Field +} + +impl Foo { + fn incr(mut self) { + *self.x += 1; + } +} + +fn main() { + let foo = Foo { x: &mut 0 }; + foo.incr(); + assert(*foo.x == 1); +} +``` + +In general, you should avoid non-local & shared mutability unless it is needed. Sticking +to only local mutability will improve readability and potentially improve compiler optimizations as well. diff --git a/docs/versioned_docs/version-v0.24.0/noir/concepts/ops.md b/docs/versioned_docs/version-v0.24.0/noir/concepts/ops.md new file mode 100644 index 00000000000..60425cb8994 --- /dev/null +++ b/docs/versioned_docs/version-v0.24.0/noir/concepts/ops.md @@ -0,0 +1,98 @@ +--- +title: Logical Operations +description: + Learn about the supported arithmetic and logical operations in the Noir programming language. + Discover how to perform operations on private input types, integers, and booleans. +keywords: + [ + Noir programming language, + supported operations, + arithmetic operations, + logical operations, + predicate operators, + bitwise operations, + short-circuiting, + backend, + ] +sidebar_position: 3 +--- + +# Operations + +## Table of Supported Operations + +| Operation | Description | Requirements | +| :-------- | :------------------------------------------------------------: | -------------------------------------: | +| + | Adds two private input types together | Types must be private input | +| - | Subtracts two private input types together | Types must be private input | +| \* | Multiplies two private input types together | Types must be private input | +| / | Divides two private input types together | Types must be private input | +| ^ | XOR two private input types together | Types must be integer | +| & | AND two private input types together | Types must be integer | +| \| | OR two private input types together | Types must be integer | +| \<\< | Left shift an integer by another integer amount | Types must be integer | +| >> | Right shift an integer by another integer amount | Types must be integer | +| ! | Bitwise not of a value | Type must be integer or boolean | +| \< | returns a bool if one value is less than the other | Upper bound must have a known bit size | +| \<= | returns a bool if one value is less than or equal to the other | Upper bound must have a known bit size | +| > | returns a bool if one value is more than the other | Upper bound must have a known bit size | +| >= | returns a bool if one value is more than or equal to the other | Upper bound must have a known bit size | +| == | returns a bool if one value is equal to the other | Both types must not be constants | +| != | returns a bool if one value is not equal to the other | Both types must not be constants | + +### Predicate Operators + +`<,<=, !=, == , >, >=` are known as predicate/comparison operations because they compare two values. +This differs from the operations such as `+` where the operands are used in _computation_. + +### Bitwise Operations Example + +```rust +fn main(x : Field) { + let y = x as u32; + let z = y & y; +} +``` + +`z` is implicitly constrained to be the result of `y & y`. The `&` operand is used to denote bitwise +`&`. + +> `x & x` would not compile as `x` is a `Field` and not an integer type. + +### Logical Operators + +Noir has no support for the logical operators `||` and `&&`. This is because encoding the +short-circuiting that these operators require can be inefficient for Noir's backend. Instead you can +use the bitwise operators `|` and `&` which operate identically for booleans, just without the +short-circuiting. + +```rust +let my_val = 5; + +let mut flag = 1; +if (my_val > 6) | (my_val == 0) { + flag = 0; +} +assert(flag == 1); + +if (my_val != 10) & (my_val < 50) { + flag = 0; +} +assert(flag == 0); +``` + +### Shorthand operators + +Noir shorthand operators for most of the above operators, namely `+=, -=, *=, /=, %=, &=, |=, ^=, <<=`, and `>>=`. These allow for more concise syntax. For example: + +```rust +let mut i = 0; +i = i + 1; +``` + +could be written as: + +```rust +let mut i = 0; +i += 1; +``` diff --git a/docs/versioned_docs/version-v0.24.0/noir/concepts/oracles.md b/docs/versioned_docs/version-v0.24.0/noir/concepts/oracles.md new file mode 100644 index 00000000000..2e6a6818d48 --- /dev/null +++ b/docs/versioned_docs/version-v0.24.0/noir/concepts/oracles.md @@ -0,0 +1,23 @@ +--- +title: Oracles +description: Dive into how Noir supports Oracles via RPC calls, and learn how to declare an Oracle in Noir with our comprehensive guide. +keywords: + - Noir + - Oracles + - RPC Calls + - Unconstrained Functions + - Programming + - Blockchain +sidebar_position: 6 +--- + +Noir has support for Oracles via RPC calls. This means Noir will make an RPC call and use the return value for proof generation. + +Since Oracles are not resolved by Noir, they are [`unconstrained` functions](./unconstrained.md) + +You can declare an Oracle through the `#[oracle()]` flag. Example: + +```rust +#[oracle(get_number_sequence)] +unconstrained fn get_number_sequence(_size: Field) -> [Field] {} +``` diff --git a/docs/versioned_docs/version-v0.24.0/noir/concepts/shadowing.md b/docs/versioned_docs/version-v0.24.0/noir/concepts/shadowing.md new file mode 100644 index 00000000000..5ce6130d201 --- /dev/null +++ b/docs/versioned_docs/version-v0.24.0/noir/concepts/shadowing.md @@ -0,0 +1,44 @@ +--- +title: Shadowing +sidebar_position: 12 +--- + +Noir allows for inheriting variables' values and re-declaring them with the same name similar to Rust, known as shadowing. + +For example, the following function is valid in Noir: + +```rust +fn main() { + let x = 5; + + { + let x = x * 2; + assert (x == 10); + } + + assert (x == 5); +} +``` + +In this example, a variable x is first defined with the value 5. + +The local scope that follows shadows the original x, i.e. creates a local mutable x based on the value of the original x. It is given a value of 2 times the original x. + +When we return to the main scope, x once again refers to just the original x, which stays at the value of 5. + +## Temporal mutability + +One way that shadowing is useful, in addition to ergonomics across scopes, is for temporarily mutating variables. + +```rust +fn main() { + let age = 30; + // age = age + 5; // Would error as `age` is immutable by default. + + let mut age = age + 5; // Temporarily mutates `age` with a new value. + + let age = age; // Locks `age`'s mutability again. + + assert (age == 35); +} +``` diff --git a/docs/versioned_docs/version-v0.24.0/noir/concepts/traits.md b/docs/versioned_docs/version-v0.24.0/noir/concepts/traits.md new file mode 100644 index 00000000000..ef1445a5907 --- /dev/null +++ b/docs/versioned_docs/version-v0.24.0/noir/concepts/traits.md @@ -0,0 +1,389 @@ +--- +title: Traits +description: + Traits in Noir can be used to abstract out a common interface for functions across + several data types. +keywords: [noir programming language, traits, interfaces, generic, protocol] +sidebar_position: 14 +--- + +## Overview + +Traits in Noir are a useful abstraction similar to interfaces or protocols in other languages. Each trait defines +the interface of several methods contained within the trait. Types can then implement this trait by providing +implementations for these methods. For example in the program: + +```rust +struct Rectangle { + width: Field, + height: Field, +} + +impl Rectangle { + fn area(self) -> Field { + self.width * self.height + } +} + +fn log_area(r: Rectangle) { + println(r.area()); +} +``` + +We have a function `log_area` to log the area of a `Rectangle`. Now how should we change the program if we want this +function to work on `Triangle`s as well?: + +```rust +struct Triangle { + width: Field, + height: Field, +} + +impl Triangle { + fn area(self) -> Field { + self.width * self.height / 2 + } +} +``` + +Making `log_area` generic over all types `T` would be invalid since not all types have an `area` method. Instead, we can +introduce a new `Area` trait and make `log_area` generic over all types `T` that implement `Area`: + +```rust +trait Area { + fn area(self) -> Field; +} + +fn log_area(shape: T) where T: Area { + println(shape.area()); +} +``` + +We also need to explicitly implement `Area` for `Rectangle` and `Triangle`. We can do that by changing their existing +impls slightly. Note that the parameter types and return type of each of our `area` methods must match those defined +by the `Area` trait. + +```rust +impl Area for Rectangle { + fn area(self) -> Field { + self.width * self.height + } +} + +impl Area for Triangle { + fn area(self) -> Field { + self.width * self.height / 2 + } +} +``` + +Now we have a working program that is generic over any type of Shape that is used! Others can even use this program +as a library with their own types - such as `Circle` - as long as they also implement `Area` for these types. + +## Where Clauses + +As seen in `log_area` above, when we want to create a function or method that is generic over any type that implements +a trait, we can add a where clause to the generic function. + +```rust +fn log_area(shape: T) where T: Area { + println(shape.area()); +} +``` + +It is also possible to apply multiple trait constraints on the same variable at once by combining traits with the `+` +operator. Similarly, we can have multiple trait constraints by separating each with a comma: + +```rust +fn foo(elements: [T], thing: U) where + T: Default + Add + Eq, + U: Bar, +{ + let mut sum = T::default(); + + for element in elements { + sum += element; + } + + if sum == T::default() { + thing.bar(); + } +} +``` + +## Generic Implementations + +You can add generics to a trait implementation by adding the generic list after the `impl` keyword: + +```rust +trait Second { + fn second(self) -> Field; +} + +impl Second for (T, Field) { + fn second(self) -> Field { + self.1 + } +} +``` + +You can also implement a trait for every type this way: + +```rust +trait Debug { + fn debug(self); +} + +impl Debug for T { + fn debug(self) { + println(self); + } +} + +fn main() { + 1.debug(); +} +``` + +### Generic Trait Implementations With Where Clauses + +Where clauses can also be placed on trait implementations themselves to restrict generics in a similar way. +For example, while `impl Foo for T` implements the trait `Foo` for every type, `impl Foo for T where T: Bar` +will implement `Foo` only for types that also implement `Bar`. This is often used for implementing generic types. +For example, here is the implementation for array equality: + +```rust +impl Eq for [T; N] where T: Eq { + // Test if two arrays have the same elements. + // Because both arrays must have length N, we know their lengths already match. + fn eq(self, other: Self) -> bool { + let mut result = true; + + for i in 0 .. self.len() { + // The T: Eq constraint is needed to call == on the array elements here + result &= self[i] == other[i]; + } + + result + } +} +``` + +## Generic Traits + +Traits themselves can also be generic by placing the generic arguments after the trait name. These generics are in +scope of every item within the trait. + +```rust +trait Into { + // Convert `self` to type `T` + fn into(self) -> T; +} +``` + +When implementing generic traits the generic arguments of the trait must be specified. This is also true anytime +when referencing a generic trait (e.g. in a `where` clause). + +```rust +struct MyStruct { + array: [Field; 2], +} + +impl Into<[Field; 2]> for MyStruct { + fn into(self) -> [Field; 2] { + self.array + } +} + +fn as_array(x: T) -> [Field; 2] + where T: Into<[Field; 2]> +{ + x.into() +} + +fn main() { + let array = [1, 2]; + let my_struct = MyStruct { array }; + + assert_eq(as_array(my_struct), array); +} +``` + +## Trait Methods With No `self` + +A trait can contain any number of methods, each of which have access to the `Self` type which represents each type +that eventually implements the trait. Similarly, the `self` variable is available as well but is not required to be used. +For example, we can define a trait to create a default value for a type. This trait will need to return the `Self` type +but doesn't need to take any parameters: + +```rust +trait Default { + fn default() -> Self; +} +``` + +Implementing this trait can be done similarly to any other trait: + +```rust +impl Default for Field { + fn default() -> Field { + 0 + } +} + +struct MyType {} + +impl Default for MyType { + fn default() -> Field { + MyType {} + } +} +``` + +However, since there is no `self` parameter, we cannot call it via the method call syntax `object.method()`. +Instead, we'll need to refer to the function directly. This can be done either by referring to the +specific impl `MyType::default()` or referring to the trait itself `Default::default()`. In the later +case, type inference determines the impl that is selected. + +```rust +let my_struct = MyStruct::default(); + +let x: Field = Default::default(); +let result = x + Default::default(); +``` + +:::warning + +```rust +let _ = Default::default(); +``` + +If type inference cannot select which impl to use because of an ambiguous `Self` type, an impl will be +arbitrarily selected. This occurs most often when the result of a trait function call with no parameters +is unused. To avoid this, when calling a trait function with no `self` or `Self` parameters or return type, +always refer to it via the implementation type's namespace - e.g. `MyType::default()`. +This is set to change to an error in future Noir versions. + +::: + +## Default Method Implementations + +A trait can also have default implementations of its methods by giving a body to the desired functions. +Note that this body must be valid for all types that may implement the trait. As a result, the only +valid operations on `self` will be operations valid for any type or other operations on the trait itself. + +```rust +trait Numeric { + fn add(self, other: Self) -> Self; + + // Default implementation of double is (self + self) + fn double(self) -> Self { + self.add(self) + } +} +``` + +When implementing a trait with default functions, a type may choose to implement only the required functions: + +```rust +impl Numeric for Field { + fn add(self, other: Field) -> Field { + self + other + } +} +``` + +Or it may implement the optional methods as well: + +```rust +impl Numeric for u32 { + fn add(self, other: u32) -> u32 { + self + other + } + + fn double(self) -> u32 { + self * 2 + } +} +``` + +## Impl Specialization + +When implementing traits for a generic type it is possible to implement the trait for only a certain combination +of generics. This can be either as an optimization or because those specific generics are required to implement the trait. + +```rust +trait Sub { + fn sub(self, other: Self) -> Self; +} + +struct NonZero { + value: T, +} + +impl Sub for NonZero { + fn sub(self, other: Self) -> Self { + let value = self.value - other.value; + assert(value != 0); + NonZero { value } + } +} +``` + +## Overlapping Implementations + +Overlapping implementations are disallowed by Noir to ensure Noir's decision on which impl to select is never ambiguous. +This means if a trait `Foo` is already implemented +by a type `Bar` for all `T`, then we cannot also have a separate impl for `Bar` (or any other +type argument). Similarly, if there is an impl for all `T` such as `impl Debug for T`, we cannot create +any more impls to `Debug` for other types since it would be ambiguous which impl to choose for any given +method call. + +```rust +trait Trait {} + +// Previous impl defined here +impl Trait for (A, B) {} + +// error: Impl for type `(Field, Field)` overlaps with existing impl +impl Trait for (Field, Field) {} +``` + +## Trait Coherence + +Another restriction on trait implementations is coherence. This restriction ensures other crates cannot create +impls that may overlap with other impls, even if several unrelated crates are used as dependencies in the same +program. + +The coherence restriction is: to implement a trait, either the trait itself or the object type must be declared +in the crate the impl is in. + +In practice this often comes up when using types provided by libraries. If a library provides a type `Foo` that does +not implement a trait in the standard library such as `Default`, you may not `impl Default for Foo` in your own crate. +While restrictive, this prevents later issues or silent changes in the program if the `Foo` library later added its +own impl for `Default`. If you are a user of the `Foo` library in this scenario and need a trait not implemented by the +library your choices are to either submit a patch to the library or use the newtype pattern. + +### The Newtype Pattern + +The newtype pattern gets around the coherence restriction by creating a new wrapper type around the library type +that we cannot create `impl`s for. Since the new wrapper type is defined in our current crate, we can create +impls for any trait we need on it. + +```rust +struct Wrapper { + foo: dep::some_library::Foo, +} + +impl Default for Wrapper { + fn default() -> Wrapper { + Wrapper { + foo: dep::some_library::Foo::new(), + } + } +} +``` + +Since we have an impl for our own type, the behavior of this code will not change even if `some_library` is updated +to provide its own `impl Default for Foo`. The downside of this pattern is that it requires extra wrapping and +unwrapping of values when converting to and from the `Wrapper` and `Foo` types. diff --git a/docs/versioned_docs/version-v0.24.0/noir/concepts/unconstrained.md b/docs/versioned_docs/version-v0.24.0/noir/concepts/unconstrained.md new file mode 100644 index 00000000000..89d12c1c971 --- /dev/null +++ b/docs/versioned_docs/version-v0.24.0/noir/concepts/unconstrained.md @@ -0,0 +1,95 @@ +--- +title: Unconstrained Functions +description: "Learn about what unconstrained functions in Noir are, how to use them and when you'd want to." + +keywords: [Noir programming language, unconstrained, open] +sidebar_position: 5 +--- + +Unconstrained functions are functions which do not constrain any of the included computation and allow for non-deterministic computation. + +## Why? + +Zero-knowledge (ZK) domain-specific languages (DSL) enable developers to generate ZK proofs from their programs by compiling code down to the constraints of an NP complete language (such as R1CS or PLONKish languages). However, the hard bounds of a constraint system can be very limiting to the functionality of a ZK DSL. + +Enabling a circuit language to perform unconstrained execution is a powerful tool. Said another way, unconstrained execution lets developers generate witnesses from code that does not generate any constraints. Being able to execute logic outside of a circuit is critical for both circuit performance and constructing proofs on information that is external to a circuit. + +Fetching information from somewhere external to a circuit can also be used to enable developers to improve circuit efficiency. + +A ZK DSL does not just prove computation, but proves that some computation was handled correctly. Thus, it is necessary that when we switch from performing some operation directly inside of a circuit to inside of an unconstrained environment that the appropriate constraints are still laid down elsewhere in the circuit. + +## Example + +An in depth example might help drive the point home. This example comes from the excellent [post](https://discord.com/channels/1113924620781883405/1124022445054111926/1128747641853972590) by Tom in the Noir Discord. + +Let's look at how we can optimize a function to turn a `u72` into an array of `u8`s. + +```rust +fn main(num: u72) -> pub [u8; 8] { + let mut out: [u8; 8] = [0; 8]; + for i in 0..8 { + out[i] = (num >> (56 - (i * 8)) as u72 & 0xff) as u8; + } + + out +} +``` + +``` +Total ACIR opcodes generated for language PLONKCSat { width: 3 }: 91 +Backend circuit size: 3619 +``` + +A lot of the operations in this function are optimized away by the compiler (all the bit-shifts turn into divisions by constants). However we can save a bunch of gates by casting to u8 a bit earlier. This automatically truncates the bit-shifted value to fit in a u8 which allows us to remove the AND against 0xff. This saves us ~480 gates in total. + +```rust +fn main(num: u72) -> pub [u8; 8] { + let mut out: [u8; 8] = [0; 8]; + for i in 0..8 { + out[i] = (num >> (56 - (i * 8)) as u8; + } + + out +} +``` + +``` +Total ACIR opcodes generated for language PLONKCSat { width: 3 }: 75 +Backend circuit size: 3143 +``` + +Those are some nice savings already but we can do better. This code is all constrained so we're proving every step of calculating out using num, but we don't actually care about how we calculate this, just that it's correct. This is where brillig comes in. + +It turns out that truncating a u72 into a u8 is hard to do inside a snark, each time we do as u8 we lay down 4 ACIR opcodes which get converted into multiple gates. It's actually much easier to calculate num from out than the other way around. All we need to do is multiply each element of out by a constant and add them all together, both relatively easy operations inside a snark. + +We can then run u72_to_u8 as unconstrained brillig code in order to calculate out, then use that result in our constrained function and assert that if we were to do the reverse calculation we'd get back num. This looks a little like the below: + +```rust +fn main(num: u72) -> pub [u8; 8] { + let out = u72_to_u8(num); + + let mut reconstructed_num: u72 = 0; + for i in 0..8 { + reconstructed_num += (out[i] as u72 << (56 - (8 * i))); + } + assert(num == reconstructed_num); + out +} + +unconstrained fn u72_to_u8(num: u72) -> [u8; 8] { + let mut out: [u8; 8] = [0; 8]; + for i in 0..8 { + out[i] = (num >> (56 - (i * 8))) as u8; + } + out +} +``` + +``` +Total ACIR opcodes generated for language PLONKCSat { width: 3 }: 78 +Backend circuit size: 2902 +``` + +This ends up taking off another ~250 gates from our circuit! We've ended up with more ACIR opcodes than before but they're easier for the backend to prove (resulting in fewer gates). + +Generally we want to use brillig whenever there's something that's easy to verify but hard to compute within the circuit. For example, if you wanted to calculate a square root of a number it'll be a much better idea to calculate this in brillig and then assert that if you square the result you get back your number. diff --git a/docs/versioned_docs/version-v0.24.0/noir/modules_packages_crates/_category_.json b/docs/versioned_docs/version-v0.24.0/noir/modules_packages_crates/_category_.json new file mode 100644 index 00000000000..1debcfe7675 --- /dev/null +++ b/docs/versioned_docs/version-v0.24.0/noir/modules_packages_crates/_category_.json @@ -0,0 +1,6 @@ +{ + "label": "Modules, Packages and Crates", + "position": 2, + "collapsible": true, + "collapsed": true +} diff --git a/docs/versioned_docs/version-v0.24.0/noir/modules_packages_crates/crates_and_packages.md b/docs/versioned_docs/version-v0.24.0/noir/modules_packages_crates/crates_and_packages.md new file mode 100644 index 00000000000..760a463094c --- /dev/null +++ b/docs/versioned_docs/version-v0.24.0/noir/modules_packages_crates/crates_and_packages.md @@ -0,0 +1,43 @@ +--- +title: Crates and Packages +description: Learn how to use Crates and Packages in your Noir project +keywords: [Nargo, dependencies, package management, crates, package] +sidebar_position: 0 +--- + +## Crates + +A crate is the smallest amount of code that the Noir compiler considers at a time. +Crates can contain modules, and the modules may be defined in other files that get compiled with the crate, as we’ll see in the coming sections. + +### Crate Types + +A Noir crate can come in several forms: binaries, libraries or contracts. + +#### Binaries + +_Binary crates_ are programs which you can compile to an ACIR circuit which you can then create proofs against. Each must have a function called `main` that defines the ACIR circuit which is to be proved. + +#### Libraries + +_Library crates_ don't have a `main` function and they don't compile down to ACIR. Instead they define functionality intended to be shared with multiple projects, and eventually included in a binary crate. + +#### Contracts + +Contract crates are similar to binary crates in that they compile to ACIR which you can create proofs against. They are different in that they do not have a single `main` function, but are a collection of functions to be deployed to the [Aztec network](https://aztec.network). You can learn more about the technical details of Aztec in the [monorepo](https://github.com/AztecProtocol/aztec-packages) or contract [examples](https://github.com/AztecProtocol/aztec-packages/tree/master/yarn-project/noir-contracts/contracts). + +### Crate Root + +Every crate has a root, which is the source file that the compiler starts, this is also known as the root module. The Noir compiler does not enforce any conditions on the name of the file which is the crate root, however if you are compiling via Nargo the crate root must be called `lib.nr` or `main.nr` for library or binary crates respectively. + +## Packages + +A Nargo _package_ is a collection of one of more crates that provides a set of functionality. A package must include a Nargo.toml file. + +A package _must_ contain either a library or a binary crate, but not both. + +### Differences from Cargo Packages + +One notable difference between Rust's Cargo and Noir's Nargo is that while Cargo allows a package to contain an unlimited number of binary crates and a single library crate, Nargo currently only allows a package to contain a single crate. + +In future this restriction may be lifted to allow a Nargo package to contain both a binary and library crate or multiple binary crates. diff --git a/docs/versioned_docs/version-v0.24.0/noir/modules_packages_crates/dependencies.md b/docs/versioned_docs/version-v0.24.0/noir/modules_packages_crates/dependencies.md new file mode 100644 index 00000000000..a37dc401b7d --- /dev/null +++ b/docs/versioned_docs/version-v0.24.0/noir/modules_packages_crates/dependencies.md @@ -0,0 +1,124 @@ +--- +title: Dependencies +description: + Learn how to specify and manage dependencies in Nargo, allowing you to upload packages to GitHub + and use them easily in your project. +keywords: [Nargo, dependencies, GitHub, package management, versioning] +sidebar_position: 1 +--- + +Nargo allows you to upload packages to GitHub and use them as dependencies. + +## Specifying a dependency + +Specifying a dependency requires a tag to a specific commit and the git url to the url containing +the package. + +Currently, there are no requirements on the tag contents. If requirements are added, it would follow +semver 2.0 guidelines. + +> Note: Without a `tag` , there would be no versioning and dependencies would change each time you +> compile your project. + +For example, to add the [ecrecover-noir library](https://github.com/colinnielsen/ecrecover-noir) to your project, add it to `Nargo.toml`: + +```toml +# Nargo.toml + +[dependencies] +ecrecover = {tag = "v0.8.0", git = "https://github.com/colinnielsen/ecrecover-noir"} +``` + +If the module is in a subdirectory, you can define a subdirectory in your git repository, for example: + +```toml +# Nargo.toml + +[dependencies] +easy_private_token_contract = {tag ="v0.1.0-alpha62", git = "https://github.com/AztecProtocol/aztec-packages", directory = "yarn-project/noir-contracts/contracts/easy_private_token_contract"} +``` + +## Specifying a local dependency + +You can also specify dependencies that are local to your machine. + +For example, this file structure has a library and binary crate + +```tree +├── binary_crate +│   ├── Nargo.toml +│   └── src +│   └── main.nr +└── lib_a + ├── Nargo.toml + └── src + └── lib.nr +``` + +Inside of the binary crate, you can specify: + +```toml +# Nargo.toml + +[dependencies] +lib_a = { path = "../lib_a" } +``` + +## Importing dependencies + +You can import a dependency to a Noir file using the following syntax. For example, to import the +ecrecover-noir library and local lib_a referenced above: + +```rust +use dep::ecrecover; +use dep::lib_a; +``` + +You can also import only the specific parts of dependency that you want to use, like so: + +```rust +use dep::std::hash::sha256; +use dep::std::scalar_mul::fixed_base_embedded_curve; +``` + +Lastly, as demonstrated in the +[elliptic curve example](../standard_library/cryptographic_primitives/ec_primitives#examples), you +can import multiple items in the same line by enclosing them in curly braces: + +```rust +use dep::std::ec::tecurve::affine::{Curve, Point}; +``` + +We don't have a way to consume libraries from inside a [workspace](./workspaces) as external dependencies right now. + +Inside a workspace, these are consumed as `{ path = "../to_lib" }` dependencies in Nargo.toml. + +## Dependencies of Dependencies + +Note that when you import a dependency, you also get access to all of the dependencies of that package. + +For example, the [phy_vector](https://github.com/resurgencelabs/phy_vector) library imports an [fraction](https://github.com/resurgencelabs/fraction) library. If you're importing the phy_vector library, then you can access the functions in fractions library like so: + +```rust +use dep::phy_vector; + +fn main(x : Field, y : pub Field) { + //... + let f = phy_vector::fraction::toFraction(true, 2, 1); + //... +} +``` + +## Available Libraries + +Noir does not currently have an official package manager. You can find a list of available Noir libraries in the [awesome-noir repo here](https://github.com/noir-lang/awesome-noir#libraries). + +Some libraries that are available today include: + +- [Standard Library](https://github.com/noir-lang/noir/tree/master/noir_stdlib) - the Noir Standard Library +- [Ethereum Storage Proof Verification](https://github.com/aragonzkresearch/noir-trie-proofs) - a library that contains the primitives necessary for RLP decoding (in the form of look-up table construction) and Ethereum state and storage proof verification (or verification of any trie proof involving 32-byte long keys) +- [BigInt](https://github.com/shuklaayush/noir-bigint) - a library that provides a custom BigUint56 data type, allowing for computations on large unsigned integers +- [ECrecover](https://github.com/colinnielsen/ecrecover-noir/tree/main) - a library to verify an ECDSA signature and return the source Ethereum address +- [Sparse Merkle Tree Verifier](https://github.com/vocdoni/smtverifier-noir/tree/main) - a library for verification of sparse Merkle trees +- [Signed Int](https://github.com/resurgencelabs/signed_int) - a library for accessing a custom Signed Integer data type, allowing access to negative numbers on Noir +- [Fraction](https://github.com/resurgencelabs/fraction) - a library for accessing fractional number data type in Noir, allowing results that aren't whole numbers diff --git a/docs/versioned_docs/version-v0.24.0/noir/modules_packages_crates/modules.md b/docs/versioned_docs/version-v0.24.0/noir/modules_packages_crates/modules.md new file mode 100644 index 00000000000..ae822a1cff4 --- /dev/null +++ b/docs/versioned_docs/version-v0.24.0/noir/modules_packages_crates/modules.md @@ -0,0 +1,105 @@ +--- +title: Modules +description: + Learn how to organize your files using modules in Noir, following the same convention as Rust's + module system. Examples included. +keywords: [Noir, Rust, modules, organizing files, sub-modules] +sidebar_position: 2 +--- + +Noir's module system follows the same convention as the _newer_ version of Rust's module system. + +## Purpose of Modules + +Modules are used to organize files. Without modules all of your code would need to live in a single +file. In Noir, the compiler does not automatically scan all of your files to detect modules. This +must be done explicitly by the developer. + +## Examples + +### Importing a module in the crate root + +Filename : `src/main.nr` + +```rust +mod foo; + +fn main() { + foo::hello_world(); +} +``` + +Filename : `src/foo.nr` + +```rust +fn from_foo() {} +``` + +In the above snippet, the crate root is the `src/main.nr` file. The compiler sees the module +declaration `mod foo` which prompts it to look for a foo.nr file. + +Visually this module hierarchy looks like the following : + +``` +crate + ├── main + │ + └── foo + └── from_foo + +``` + +### Importing a module throughout the tree + +All modules are accessible from the `crate::` namespace. + +``` +crate + ├── bar + ├── foo + └── main + +``` + +In the above snippet, if `bar` would like to use functions in `foo`, it can do so by `use crate::foo::function_name`. + +### Sub-modules + +Filename : `src/main.nr` + +```rust +mod foo; + +fn main() { + foo::from_foo(); +} +``` + +Filename : `src/foo.nr` + +```rust +mod bar; +fn from_foo() {} +``` + +Filename : `src/foo/bar.nr` + +```rust +fn from_bar() {} +``` + +In the above snippet, we have added an extra module to the module tree; `bar`. `bar` is a submodule +of `foo` hence we declare bar in `foo.nr` with `mod bar`. Since `foo` is not the crate root, the +compiler looks for the file associated with the `bar` module in `src/foo/bar.nr` + +Visually the module hierarchy looks as follows: + +``` +crate + ├── main + │ + └── foo + ├── from_foo + └── bar + └── from_bar +``` diff --git a/docs/versioned_docs/version-v0.24.0/noir/modules_packages_crates/workspaces.md b/docs/versioned_docs/version-v0.24.0/noir/modules_packages_crates/workspaces.md new file mode 100644 index 00000000000..67a1dafa372 --- /dev/null +++ b/docs/versioned_docs/version-v0.24.0/noir/modules_packages_crates/workspaces.md @@ -0,0 +1,40 @@ +--- +title: Workspaces +sidebar_position: 3 +--- + +Workspaces are a feature of nargo that allow you to manage multiple related Noir packages in a single repository. A workspace is essentially a group of related projects that share common build output directories and configurations. + +Each Noir project (with it's own Nargo.toml file) can be thought of as a package. Each package is expected to contain exactly one "named circuit", being the "name" defined in Nargo.toml with the program logic defined in `./src/main.nr`. + +For a project with the following structure: + +```tree +├── crates +│   ├── a +│   │   ├── Nargo.toml +│   │   └── src +│   │   └── main.nr +│   └── b +│   ├── Nargo.toml +│   └── src +│   └── main.nr +├── Nargo.toml +└── Prover.toml +``` + +You can define a workspace in Nargo.toml like so: + +```toml +[workspace] +members = ["crates/a", "crates/b"] +default-member = "crates/a" +``` + +`members` indicates which packages are included in the workspace. As such, all member packages of a workspace will be processed when the `--workspace` flag is used with various commands or if a `default-member` is not specified. + +`default-member` indicates which package various commands process by default. + +Libraries can be defined in a workspace. Inside a workspace, these are consumed as `{ path = "../to_lib" }` dependencies in Nargo.toml. + +Inside a workspace, these are consumed as `{ path = "../to_lib" }` dependencies in Nargo.toml. diff --git a/docs/versioned_docs/version-v0.24.0/noir/standard_library/_category_.json b/docs/versioned_docs/version-v0.24.0/noir/standard_library/_category_.json new file mode 100644 index 00000000000..af04c0933fd --- /dev/null +++ b/docs/versioned_docs/version-v0.24.0/noir/standard_library/_category_.json @@ -0,0 +1,6 @@ +{ + "label": "Standard Library", + "position": 1, + "collapsible": true, + "collapsed": true +} diff --git a/docs/versioned_docs/version-v0.24.0/noir/standard_library/black_box_fns.md b/docs/versioned_docs/version-v0.24.0/noir/standard_library/black_box_fns.md new file mode 100644 index 00000000000..eae8744abf0 --- /dev/null +++ b/docs/versioned_docs/version-v0.24.0/noir/standard_library/black_box_fns.md @@ -0,0 +1,31 @@ +--- +title: Black Box Functions +description: Black box functions are functions in Noir that rely on backends implementing support for specialized constraints. +keywords: [noir, black box functions] +--- + +Black box functions are functions in Noir that rely on backends implementing support for specialized constraints. This makes certain zk-snark unfriendly computations cheaper than if they were implemented in Noir. + +The ACVM spec defines a set of blackbox functions which backends will be expected to implement. This allows backends to use optimized implementations of these constraints if they have them, however they may also fallback to less efficient naive implementations if not. + +## Function list + +Here is a list of the current black box functions: + +- [SHA256](./cryptographic_primitives/hashes#sha256) +- [Schnorr signature verification](./cryptographic_primitives/schnorr) +- [Blake2s](./cryptographic_primitives/hashes#blake2s) +- [Blake3](./cryptographic_primitives/hashes#blake3) +- [Pedersen Hash](./cryptographic_primitives/hashes#pedersen_hash) +- [Pedersen Commitment](./cryptographic_primitives/hashes#pedersen_commitment) +- [ECDSA signature verification](./cryptographic_primitives/ecdsa_sig_verification) +- [Fixed base scalar multiplication](./cryptographic_primitives/scalar) +- AND +- XOR +- RANGE +- [Keccak256](./cryptographic_primitives/hashes#keccak256) +- [Recursive proof verification](./recursion) + +Most black box functions are included as part of the Noir standard library, however `AND`, `XOR` and `RANGE` are used as part of the Noir language syntax. For instance, using the bitwise operator `&` will invoke the `AND` black box function. + +You can view the black box functions defined in the ACVM code [here](https://github.com/noir-lang/noir/blob/master/acvm-repo/acir/src/circuit/black_box_functions.rs). diff --git a/docs/versioned_docs/version-v0.24.0/noir/standard_library/bn254.md b/docs/versioned_docs/version-v0.24.0/noir/standard_library/bn254.md new file mode 100644 index 00000000000..3294f005dbb --- /dev/null +++ b/docs/versioned_docs/version-v0.24.0/noir/standard_library/bn254.md @@ -0,0 +1,46 @@ +--- +title: Bn254 Field Library +--- + +Noir provides a module in standard library with some optimized functions for bn254 Fr in `std::field::bn254`. + +## decompose + +```rust +fn decompose(x: Field) -> (Field, Field) {} +``` + +Decomposes a single field into two fields, low and high. The low field contains the lower 16 bytes of the input field and the high field contains the upper 16 bytes of the input field. Both field results are range checked to 128 bits. + + +## assert_gt + +```rust +fn assert_gt(a: Field, b: Field) {} +``` + +Asserts that a > b. This will generate less constraints than using `assert(gt(a, b))`. + +## assert_lt + +```rust +fn assert_lt(a: Field, b: Field) {} +``` + +Asserts that a < b. This will generate less constraints than using `assert(lt(a, b))`. + +## gt + +```rust +fn gt(a: Field, b: Field) -> bool {} +``` + +Returns true if a > b. + +## lt + +```rust +fn lt(a: Field, b: Field) -> bool {} +``` + +Returns true if a < b. \ No newline at end of file diff --git a/docs/versioned_docs/version-v0.24.0/noir/standard_library/cryptographic_primitives/_category_.json b/docs/versioned_docs/version-v0.24.0/noir/standard_library/cryptographic_primitives/_category_.json new file mode 100644 index 00000000000..5d694210bbf --- /dev/null +++ b/docs/versioned_docs/version-v0.24.0/noir/standard_library/cryptographic_primitives/_category_.json @@ -0,0 +1,5 @@ +{ + "position": 0, + "collapsible": true, + "collapsed": true +} diff --git a/docs/versioned_docs/version-v0.24.0/noir/standard_library/cryptographic_primitives/ec_primitives.md b/docs/versioned_docs/version-v0.24.0/noir/standard_library/cryptographic_primitives/ec_primitives.md new file mode 100644 index 00000000000..d2b42d67b7c --- /dev/null +++ b/docs/versioned_docs/version-v0.24.0/noir/standard_library/cryptographic_primitives/ec_primitives.md @@ -0,0 +1,102 @@ +--- +title: Elliptic Curve Primitives +keywords: [cryptographic primitives, Noir project] +sidebar_position: 4 +--- + +Data structures and methods on them that allow you to carry out computations involving elliptic +curves over the (mathematical) field corresponding to `Field`. For the field currently at our +disposal, applications would involve a curve embedded in BN254, e.g. the +[Baby Jubjub curve](https://eips.ethereum.org/EIPS/eip-2494). + +## Data structures + +### Elliptic curve configurations + +(`std::ec::{tecurve,montcurve,swcurve}::{affine,curvegroup}::Curve`), i.e. the specific elliptic +curve you want to use, which would be specified using any one of the methods +`std::ec::{tecurve,montcurve,swcurve}::{affine,curvegroup}::new` which take the coefficients in the +defining equation together with a generator point as parameters. You can find more detail in the +comments in +[`noir_stdlib/src/ec.nr`](https://github.com/noir-lang/noir/blob/master/noir_stdlib/src/ec.nr), but +the gist of it is that the elliptic curves of interest are usually expressed in one of the standard +forms implemented here (Twisted Edwards, Montgomery and Short Weierstraß), and in addition to that, +you could choose to use `affine` coordinates (Cartesian coordinates - the usual (x,y) - possibly +together with a point at infinity) or `curvegroup` coordinates (some form of projective coordinates +requiring more coordinates but allowing for more efficient implementations of elliptic curve +operations). Conversions between all of these forms are provided, and under the hood these +conversions are done whenever an operation is more efficient in a different representation (or a +mixed coordinate representation is employed). + +### Points + +(`std::ec::{tecurve,montcurve,swcurve}::{affine,curvegroup}::Point`), i.e. points lying on the +elliptic curve. For a curve configuration `c` and a point `p`, it may be checked that `p` +does indeed lie on `c` by calling `c.contains(p1)`. + +## Methods + +(given a choice of curve representation, e.g. use `std::ec::tecurve::affine::Curve` and use +`std::ec::tecurve::affine::Point`) + +- The **zero element** is given by `Point::zero()`, and we can verify whether a point `p: Point` is + zero by calling `p.is_zero()`. +- **Equality**: Points `p1: Point` and `p2: Point` may be checked for equality by calling + `p1.eq(p2)`. +- **Addition**: For `c: Curve` and points `p1: Point` and `p2: Point` on the curve, adding these two + points is accomplished by calling `c.add(p1,p2)`. +- **Negation**: For a point `p: Point`, `p.negate()` is its negation. +- **Subtraction**: For `c` and `p1`, `p2` as above, subtracting `p2` from `p1` is accomplished by + calling `c.subtract(p1,p2)`. +- **Scalar multiplication**: For `c` as above, `p: Point` a point on the curve and `n: Field`, + scalar multiplication is given by `c.mul(n,p)`. If instead `n :: [u1; N]`, i.e. `n` is a bit + array, the `bit_mul` method may be used instead: `c.bit_mul(n,p)` +- **Multi-scalar multiplication**: For `c` as above and arrays `n: [Field; N]` and `p: [Point; N]`, + multi-scalar multiplication is given by `c.msm(n,p)`. +- **Coordinate representation conversions**: The `into_group` method converts a point or curve + configuration in the affine representation to one in the CurveGroup representation, and + `into_affine` goes in the other direction. +- **Curve representation conversions**: `tecurve` and `montcurve` curves and points are equivalent + and may be converted between one another by calling `into_montcurve` or `into_tecurve` on their + configurations or points. `swcurve` is more general and a curve c of one of the other two types + may be converted to this representation by calling `c.into_swcurve()`, whereas a point `p` lying + on the curve given by `c` may be mapped to its corresponding `swcurve` point by calling + `c.map_into_swcurve(p)`. +- **Map-to-curve methods**: The Elligator 2 method of mapping a field element `n: Field` into a + `tecurve` or `montcurve` with configuration `c` may be called as `c.elligator2_map(n)`. For all of + the curve configurations, the SWU map-to-curve method may be called as `c.swu_map(z,n)`, where + `z: Field` depends on `Field` and `c` and must be chosen by the user (the conditions it needs to + satisfy are specified in the comments + [here](https://github.com/noir-lang/noir/blob/master/noir_stdlib/src/ec.nr)). + +## Examples + +The +[ec_baby_jubjub test](https://github.com/noir-lang/noir/blob/master/test_programs/compile_success_empty/ec_baby_jubjub/src/main.nr) +illustrates all of the above primitives on various forms of the Baby Jubjub curve. A couple of more +interesting examples in Noir would be: + +Public-key cryptography: Given an elliptic curve and a 'base point' on it, determine the public key +from the private key. This is a matter of using scalar multiplication. In the case of Baby Jubjub, +for example, this code would do: + +```rust +use dep::std::ec::tecurve::affine::{Curve, Point}; + +fn bjj_pub_key(priv_key: Field) -> Point +{ + + let bjj = Curve::new(168700, 168696, G::new(995203441582195749578291179787384436505546430278305826713579947235728471134,5472060717959818805561601436314318772137091100104008585924551046643952123905)); + + let base_pt = Point::new(5299619240641551281634865583518297030282874472190772894086521144482721001553, 16950150798460657717958625567821834550301663161624707787222815936182638968203); + + bjj.mul(priv_key,base_pt) +} +``` + +This would come in handy in a Merkle proof. + +- EdDSA signature verification: This is a matter of combining these primitives with a suitable hash + function. See + [feat(stdlib): EdDSA sig verification noir#1136](https://github.com/noir-lang/noir/pull/1136) for + the case of Baby Jubjub and the Poseidon hash function. diff --git a/docs/versioned_docs/version-v0.24.0/noir/standard_library/cryptographic_primitives/ecdsa_sig_verification.mdx b/docs/versioned_docs/version-v0.24.0/noir/standard_library/cryptographic_primitives/ecdsa_sig_verification.mdx new file mode 100644 index 00000000000..4bf09cef178 --- /dev/null +++ b/docs/versioned_docs/version-v0.24.0/noir/standard_library/cryptographic_primitives/ecdsa_sig_verification.mdx @@ -0,0 +1,60 @@ +--- +title: ECDSA Signature Verification +description: Learn about the cryptographic primitives regarding ECDSA over the secp256k1 and secp256r1 curves +keywords: [cryptographic primitives, Noir project, ecdsa, secp256k1, secp256r1, signatures] +sidebar_position: 3 +--- + +import BlackBoxInfo from '@site/src/components/Notes/_blackbox.mdx'; + +Noir supports ECDSA signatures verification over the secp256k1 and secp256r1 curves. + +## ecdsa_secp256k1::verify_signature + +Verifier for ECDSA Secp256k1 signatures + +```rust title="ecdsa_secp256k1" showLineNumbers +pub fn verify_signature( + public_key_x: [u8; 32], + public_key_y: [u8; 32], + signature: [u8; 64], + message_hash: [u8; N] +) -> bool +``` +> Source code: noir_stdlib/src/ecdsa_secp256k1.nr#L2-L9 + + +example: + +```rust +fn main(hashed_message : [u8;32], pub_key_x : [u8;32], pub_key_y : [u8;32], signature : [u8;64]) { + let valid_signature = std::ecdsa_secp256k1::verify_signature(pub_key_x, pub_key_y, signature, hashed_message); + assert(valid_signature); +} +``` + +## ecdsa_secp256r1::verify_signature + +Verifier for ECDSA Secp256r1 signatures + +```rust title="ecdsa_secp256r1" showLineNumbers +pub fn verify_signature( + public_key_x: [u8; 32], + public_key_y: [u8; 32], + signature: [u8; 64], + message_hash: [u8; N] +) -> bool +``` +> Source code: noir_stdlib/src/ecdsa_secp256r1.nr#L2-L9 + + +example: + +```rust +fn main(hashed_message : [u8;32], pub_key_x : [u8;32], pub_key_y : [u8;32], signature : [u8;64]) { + let valid_signature = std::ecdsa_secp256r1::verify_signature(pub_key_x, pub_key_y, signature, hashed_message); + assert(valid_signature); +} +``` + + diff --git a/docs/versioned_docs/version-v0.24.0/noir/standard_library/cryptographic_primitives/eddsa.mdx b/docs/versioned_docs/version-v0.24.0/noir/standard_library/cryptographic_primitives/eddsa.mdx new file mode 100644 index 00000000000..a9c10da6c06 --- /dev/null +++ b/docs/versioned_docs/version-v0.24.0/noir/standard_library/cryptographic_primitives/eddsa.mdx @@ -0,0 +1,18 @@ +--- +title: EdDSA Verification +description: Learn about the cryptographic primitives regarding EdDSA +keywords: [cryptographic primitives, Noir project, eddsa, signatures] +sidebar_position: 5 +--- + +import BlackBoxInfo from '@site/src/components/Notes/_blackbox.mdx'; + +## eddsa::eddsa_poseidon_verify + +Verifier for EdDSA signatures + +```rust +fn eddsa_poseidon_verify(public_key_x : Field, public_key_y : Field, signature_s: Field, signature_r8_x: Field, signature_r8_y: Field, message: Field) -> bool +``` + + diff --git a/docs/versioned_docs/version-v0.24.0/noir/standard_library/cryptographic_primitives/hashes.mdx b/docs/versioned_docs/version-v0.24.0/noir/standard_library/cryptographic_primitives/hashes.mdx new file mode 100644 index 00000000000..730b6d4117f --- /dev/null +++ b/docs/versioned_docs/version-v0.24.0/noir/standard_library/cryptographic_primitives/hashes.mdx @@ -0,0 +1,234 @@ +--- +title: Hash methods +description: + Learn about the cryptographic primitives ready to use for any Noir project, including sha256, + blake2s, pedersen, mimc_bn254 and mimc +keywords: + [cryptographic primitives, Noir project, sha256, blake2s, pedersen, mimc_bn254, mimc, hash] +sidebar_position: 0 +--- + +import BlackBoxInfo from '@site/src/components/Notes/_blackbox.mdx'; + +## sha256 + +Given an array of bytes, returns the resulting sha256 hash. + +```rust title="sha256" showLineNumbers +pub fn sha256(input: [u8; N]) -> [u8; 32] +``` +> Source code: noir_stdlib/src/hash.nr#L5-L7 + + +example: + +```rust +fn main() { + let x = [163, 117, 178, 149]; // some random bytes + let hash = std::hash::sha256(x); +} +``` + + + +## blake2s + +Given an array of bytes, returns an array with the Blake2 hash + +```rust title="blake2s" showLineNumbers +pub fn blake2s(input: [u8; N]) -> [u8; 32] +``` +> Source code: noir_stdlib/src/hash.nr#L11-L13 + + +example: + +```rust +fn main() { + let x = [163, 117, 178, 149]; // some random bytes + let hash = std::hash::blake2s(x); +} +``` + + + +## blake3 + +Given an array of bytes, returns an array with the Blake3 hash + +```rust title="blake3" showLineNumbers +pub fn blake3(input: [u8; N]) -> [u8; 32] +``` +> Source code: noir_stdlib/src/hash.nr#L17-L19 + + +example: + +```rust +fn main() { + let x = [163, 117, 178, 149]; // some random bytes + let hash = std::hash::blake3(x); +} +``` + + + +## pedersen_hash + +Given an array of Fields, returns the Pedersen hash. + +```rust title="pedersen_hash" showLineNumbers +pub fn pedersen_hash(input: [Field; N]) -> Field +``` +> Source code: noir_stdlib/src/hash.nr#L42-L44 + + +example: + +```rust title="pedersen-hash" showLineNumbers +use dep::std; + +fn main(x: Field, y: Field, expected_hash: Field) { + let hash = std::hash::pedersen_hash([x, y]); + assert_eq(hash, expected_hash); +} +``` +> Source code: test_programs/execution_success/pedersen_hash/src/main.nr#L1-L8 + + + + + +## pedersen_commitment + +Given an array of Fields, returns the Pedersen commitment. + +```rust title="pedersen_commitment" showLineNumbers +struct PedersenPoint { + x : Field, + y : Field, +} + +pub fn pedersen_commitment(input: [Field; N]) -> PedersenPoint +``` +> Source code: noir_stdlib/src/hash.nr#L22-L29 + + +example: + +```rust title="pedersen-commitment" showLineNumbers +use dep::std; + +fn main(x: Field, y: Field, expected_commitment: std::hash::PedersenPoint) { + let commitment = std::hash::pedersen_commitment([x, y]); + assert_eq(commitment.x, expected_commitment.x); + assert_eq(commitment.y, expected_commitment.y); +} +``` +> Source code: test_programs/execution_success/pedersen_commitment/src/main.nr#L1-L9 + + + + +## keccak256 + +Given an array of bytes (`u8`), returns the resulting keccak hash as an array of 32 bytes +(`[u8; 32]`). Specify a message_size to hash only the first `message_size` bytes +of the input. + +```rust title="keccak256" showLineNumbers +pub fn keccak256(input: [u8; N], message_size: u32) -> [u8; 32] +``` +> Source code: noir_stdlib/src/hash.nr#L67-L69 + + +example: + +```rust title="keccak256" showLineNumbers +use dep::std; + +fn main(x: Field, result: [u8; 32]) { + // We use the `as` keyword here to denote the fact that we want to take just the first byte from the x Field + // The padding is taken care of by the program + let digest = std::hash::keccak256([x as u8], 1); + assert(digest == result); + + //#1399: variable message size + let message_size = 4; + let hash_a = std::hash::keccak256([1, 2, 3, 4], message_size); + let hash_b = std::hash::keccak256([1, 2, 3, 4, 0, 0, 0, 0], message_size); + + assert(hash_a == hash_b); + + let message_size_big = 8; + let hash_c = std::hash::keccak256([1, 2, 3, 4, 0, 0, 0, 0], message_size_big); + + assert(hash_a != hash_c); +} +``` +> Source code: test_programs/execution_success/keccak256/src/main.nr#L1-L22 + + + + +## poseidon + +Given an array of Fields, returns a new Field with the Poseidon Hash. Mind that you need to specify +how many inputs are there to your Poseidon function. + +```rust +// example for hash_1, hash_2 accepts an array of length 2, etc +fn hash_1(input: [Field; 1]) -> Field +``` + +example: + +```rust title="poseidon" showLineNumbers +use dep::std::hash::poseidon; + +fn main(x1: [Field; 2], y1: pub Field, x2: [Field; 4], y2: pub Field) { + let hash1 = poseidon::bn254::hash_2(x1); + assert(hash1 == y1); + + let hash2 = poseidon::bn254::hash_4(x2); + assert(hash2 == y2); +} +``` +> Source code: test_programs/execution_success/poseidon_bn254_hash/src/main.nr#L1-L11 + + +## mimc_bn254 and mimc + +`mimc_bn254` is `mimc`, but with hardcoded parameters for the BN254 curve. You can use it by +providing an array of Fields, and it returns a Field with the hash. You can use the `mimc` method if +you're willing to input your own constants: + +```rust +fn mimc(x: Field, k: Field, constants: [Field; N], exp : Field) -> Field +``` + +otherwise, use the `mimc_bn254` method: + +```rust +fn mimc_bn254(array: [Field; N]) -> Field +``` + +example: + +```rust + +fn main() { + let x = [163, 117, 178, 149]; // some random bytes + let hash = std::hash::mimc::mimc_bn254(x); +} +``` + +## hash_to_field + +```rust +fn hash_to_field(_input : [Field; N]) -> Field {} +``` + +Calculates the `blake2s` hash of the inputs and returns the hash modulo the field modulus to return +a value which can be represented as a `Field`. + diff --git a/docs/versioned_docs/version-v0.24.0/noir/standard_library/cryptographic_primitives/index.md b/docs/versioned_docs/version-v0.24.0/noir/standard_library/cryptographic_primitives/index.md new file mode 100644 index 00000000000..650f30165d5 --- /dev/null +++ b/docs/versioned_docs/version-v0.24.0/noir/standard_library/cryptographic_primitives/index.md @@ -0,0 +1,14 @@ +--- +title: Cryptographic Primitives +description: + Learn about the cryptographic primitives ready to use for any Noir project +keywords: + [ + cryptographic primitives, + Noir project, + ] +--- + +The Noir team is progressively adding new cryptographic primitives to the standard library. Reach out for news or if you would be interested in adding more of these calculations in Noir. + +Some methods are available thanks to the Aztec backend, not being performed using Noir. When using other backends, these methods may or may not be supplied. diff --git a/docs/versioned_docs/version-v0.24.0/noir/standard_library/cryptographic_primitives/scalar.mdx b/docs/versioned_docs/version-v0.24.0/noir/standard_library/cryptographic_primitives/scalar.mdx new file mode 100644 index 00000000000..df411ca5443 --- /dev/null +++ b/docs/versioned_docs/version-v0.24.0/noir/standard_library/cryptographic_primitives/scalar.mdx @@ -0,0 +1,33 @@ +--- +title: Scalar multiplication +description: See how you can perform scalar multiplications over a fixed base in Noir +keywords: [cryptographic primitives, Noir project, scalar multiplication] +sidebar_position: 1 +--- + +import BlackBoxInfo from '@site/src/components/Notes/_blackbox.mdx'; + +## scalar_mul::fixed_base_embedded_curve + +Performs scalar multiplication over the embedded curve whose coordinates are defined by the +configured noir field. For the BN254 scalar field, this is BabyJubJub or Grumpkin. + +```rust title="fixed_base_embedded_curve" showLineNumbers +pub fn fixed_base_embedded_curve( + low: Field, + high: Field +) -> [Field; 2] +``` +> Source code: noir_stdlib/src/scalar_mul.nr#L27-L32 + + +example + +```rust +fn main(x : Field) { + let scal = std::scalar_mul::fixed_base_embedded_curve(x); + println(scal); +} +``` + + diff --git a/docs/versioned_docs/version-v0.24.0/noir/standard_library/cryptographic_primitives/schnorr.mdx b/docs/versioned_docs/version-v0.24.0/noir/standard_library/cryptographic_primitives/schnorr.mdx new file mode 100644 index 00000000000..ae12e6c12dc --- /dev/null +++ b/docs/versioned_docs/version-v0.24.0/noir/standard_library/cryptographic_primitives/schnorr.mdx @@ -0,0 +1,45 @@ +--- +title: Schnorr Signatures +description: Learn how you can verify Schnorr signatures using Noir +keywords: [cryptographic primitives, Noir project, schnorr, signatures] +sidebar_position: 2 +--- + +import BlackBoxInfo from '@site/src/components/Notes/_blackbox.mdx'; + +## schnorr::verify_signature + +Verifier for Schnorr signatures over the embedded curve (for BN254 it is Grumpkin). + +```rust title="schnorr_verify" showLineNumbers +pub fn verify_signature( + public_key_x: Field, + public_key_y: Field, + signature: [u8; 64], + message: [u8; N] +) -> bool +``` +> Source code: noir_stdlib/src/schnorr.nr#L2-L9 + + +where `_signature` can be generated like so using the npm package +[@noir-lang/barretenberg](https://www.npmjs.com/package/@noir-lang/barretenberg) + +```js +const { BarretenbergWasm } = require('@noir-lang/barretenberg/dest/wasm'); +const { Schnorr } = require('@noir-lang/barretenberg/dest/crypto/schnorr'); + +... + +const barretenberg = await BarretenbergWasm.new(); +const schnorr = new Schnorr(barretenberg); +const pubKey = schnorr.computePublicKey(privateKey); +const message = ... +const signature = Array.from( + schnorr.constructSignature(hash, privateKey).toBuffer() +); + +... +``` + + diff --git a/docs/versioned_docs/version-v0.24.0/noir/standard_library/logging.md b/docs/versioned_docs/version-v0.24.0/noir/standard_library/logging.md new file mode 100644 index 00000000000..db75ef9f86f --- /dev/null +++ b/docs/versioned_docs/version-v0.24.0/noir/standard_library/logging.md @@ -0,0 +1,78 @@ +--- +title: Logging +description: + Learn how to use the println statement for debugging in Noir with this tutorial. Understand the + basics of logging in Noir and how to implement it in your code. +keywords: + [ + noir logging, + println statement, + print statement, + debugging in noir, + noir std library, + logging tutorial, + basic logging in noir, + noir logging implementation, + noir debugging techniques, + rust, + ] +--- + +The standard library provides two familiar statements you can use: `println` and `print`. Despite being a limited implementation of rust's `println!` and `print!` macros, these constructs can be useful for debugging. + +You can print the output of both statements in your Noir code by using the `nargo execute` command or the `--show-output` flag when using `nargo test` (provided there are print statements in your tests). + +It is recommended to use `nargo execute` if you want to debug failing constraints with `println` or `print` statements. This is due to every input in a test being a constant rather than a witness, so we issue an error during compilation while we only print during execution (which comes after compilation). Neither `println`, nor `print` are callable for failed constraints caught at compile time. + +Both `print` and `println` are generic functions which can work on integers, fields, strings, and even structs or expressions. Note however, that slices are currently unsupported. For example: + +```rust +struct Person { + age: Field, + height: Field, +} + +fn main(age: Field, height: Field) { + let person = Person { + age: age, + height: height, + }; + println(person); + println(age + height); + println("Hello world!"); +} +``` + +You can print different types in the same statement (including strings) with a type called `fmtstr`. It can be specified in the same way as a normal string, just prepended with an "f" character: + +```rust + let fmt_str = f"i: {i}, j: {j}"; + println(fmt_str); + + let s = myStruct { y: x, x: y }; + println(s); + + println(f"i: {i}, s: {s}"); + + println(x); + println([x, y]); + + let foo = fooStruct { my_struct: s, foo: 15 }; + println(f"s: {s}, foo: {foo}"); + + println(15); // prints 0x0f, implicit Field + println(-1 as u8); // prints 255 + println(-1 as i8); // prints -1 +``` + +Examples shown above are interchangeable between the two `print` statements: + +```rust +let person = Person { age : age, height : height }; + +println(person); +print(person); + +println("Hello world!"); // Prints with a newline at the end of the input +print("Hello world!"); // Prints the input and keeps cursor on the same line +``` diff --git a/docs/versioned_docs/version-v0.24.0/noir/standard_library/merkle_trees.md b/docs/versioned_docs/version-v0.24.0/noir/standard_library/merkle_trees.md new file mode 100644 index 00000000000..fa488677884 --- /dev/null +++ b/docs/versioned_docs/version-v0.24.0/noir/standard_library/merkle_trees.md @@ -0,0 +1,58 @@ +--- +title: Merkle Trees +description: Learn about Merkle Trees in Noir with this tutorial. Explore the basics of computing a merkle root using a proof, with examples. +keywords: + [ + Merkle trees in Noir, + Noir programming language, + check membership, + computing root from leaf, + Noir Merkle tree implementation, + Merkle tree tutorial, + Merkle tree code examples, + Noir libraries, + pedersen hash., + ] +--- + +## compute_merkle_root + +Returns the root of the tree from the provided leaf and its hash path, using a [Pedersen hash](./cryptographic_primitives/hashes.mdx#pedersen_hash). + +```rust +fn compute_merkle_root(leaf : Field, index : Field, hash_path: [Field]) -> Field +``` + +example: + +```rust +/** + // these values are for this example only + index = "0" + priv_key = "0x000000000000000000000000000000000000000000000000000000616c696365" + secret = "0x1929ea3ab8d9106a899386883d9428f8256cfedb3c4f6b66bf4aa4d28a79988f" + note_hash_path = [ + "0x1e61bdae0f027b1b2159e1f9d3f8d00fa668a952dddd822fda80dc745d6f65cc", + "0x0e4223f3925f98934393c74975142bd73079ab0621f4ee133cee050a3c194f1a", + "0x2fd7bb412155bf8693a3bd2a3e7581a679c95c68a052f835dddca85fa1569a40" + ] + */ +fn main(index: Field, priv_key: Field, secret: Field, note_hash_path: [Field; 3]) { + + let pubkey = std::scalar_mul::fixed_base_embedded_curve(priv_key); + let pubkey_x = pubkey[0]; + let pubkey_y = pubkey[1]; + let note_commitment = std::hash::pedersen([pubkey_x, pubkey_y, secret]); + + let root = std::merkle::compute_merkle_root(note_commitment[0], index, note_hash_path); + println(root); +} +``` + +To check merkle tree membership: + +1. Include a merkle root as a program input. +2. Compute the merkle root of a given leaf, index and hash path. +3. Assert the merkle roots are equal. + +For more info about merkle trees, see the Wikipedia [page](https://en.wikipedia.org/wiki/Merkle_tree). diff --git a/docs/versioned_docs/version-v0.24.0/noir/standard_library/options.md b/docs/versioned_docs/version-v0.24.0/noir/standard_library/options.md new file mode 100644 index 00000000000..a1bd4e1de5f --- /dev/null +++ b/docs/versioned_docs/version-v0.24.0/noir/standard_library/options.md @@ -0,0 +1,101 @@ +--- +title: Option Type +--- + +The `Option` type is a way to express that a value might be present (`Some(T))` or absent (`None`). It's a safer way to handle potential absence of values, compared to using nulls in many other languages. + +```rust +struct Option { + None, + Some(T), +} +``` + +The `Option` type, already imported into your Noir program, can be used directly: + +```rust +fn main() { + let none = Option::none(); + let some = Option::some(3); +} +``` + +See [this test](https://github.com/noir-lang/noir/blob/5cbfb9c4a06c8865c98ff2b594464b037d821a5c/crates/nargo_cli/tests/test_data/option/src/main.nr) for a more comprehensive set of examples of each of the methods described below. + +## Methods + +### none + +Constructs a none value. + +### some + +Constructs a some wrapper around a given value. + +### is_none + +Returns true if the Option is None. + +### is_some + +Returns true of the Option is Some. + +### unwrap + +Asserts `self.is_some()` and returns the wrapped value. + +### unwrap_unchecked + +Returns the inner value without asserting `self.is_some()`. This method can be useful within an if condition when we already know that `option.is_some()`. If the option is None, there is no guarantee what value will be returned, only that it will be of type T for an `Option`. + +### unwrap_or + +Returns the wrapped value if `self.is_some()`. Otherwise, returns the given default value. + +### unwrap_or_else + +Returns the wrapped value if `self.is_some()`. Otherwise, calls the given function to return a default value. + +### expect + +Asserts `self.is_some()` with a provided custom message and returns the contained `Some` value. The custom message is expected to be a format string. + +### map + +If self is `Some(x)`, this returns `Some(f(x))`. Otherwise, this returns `None`. + +### map_or + +If self is `Some(x)`, this returns `f(x)`. Otherwise, this returns the given default value. + +### map_or_else + +If self is `Some(x)`, this returns `f(x)`. Otherwise, this returns `default()`. + +### and + +Returns None if self is None. Otherwise, this returns `other`. + +### and_then + +If self is None, this returns None. Otherwise, this calls the given function with the Some value contained within self, and returns the result of that call. In some languages this function is called `flat_map` or `bind`. + +### or + +If self is Some, return self. Otherwise, return `other`. + +### or_else + +If self is Some, return self. Otherwise, return `default()`. + +### xor + +If only one of the two Options is Some, return that option. Otherwise, if both options are Some or both are None, None is returned. + +### filter + +Returns `Some(x)` if self is `Some(x)` and `predicate(x)` is true. Otherwise, this returns `None`. + +### flatten + +Flattens an `Option>` into a `Option`. This returns `None` if the outer Option is None. Otherwise, this returns the inner Option. diff --git a/docs/versioned_docs/version-v0.24.0/noir/standard_library/recursion.md b/docs/versioned_docs/version-v0.24.0/noir/standard_library/recursion.md new file mode 100644 index 00000000000..9337499dac8 --- /dev/null +++ b/docs/versioned_docs/version-v0.24.0/noir/standard_library/recursion.md @@ -0,0 +1,88 @@ +--- +title: Recursive Proofs +description: Learn about how to write recursive proofs in Noir. +keywords: [recursion, recursive proofs, verification_key, verify_proof] +--- + +Noir supports recursively verifying proofs, meaning you verify the proof of a Noir program in another Noir program. This enables creating proofs of arbitrary size by doing step-wise verification of smaller components of a large proof. + +Read [the explainer on recursion](../../explainers/explainer-recursion.md) to know more about this function and the [guide on how to use it.](../../how_to/how-to-recursion.md) + +## The `#[recursive]` Attribute + +In Noir, the `#[recursive]` attribute is used to indicate that a circuit is designed for recursive proof generation. When applied, it informs the compiler and the tooling that the circuit should be compiled in a way that makes its proofs suitable for recursive verification. This attribute eliminates the need for manual flagging of recursion at the tooling level, streamlining the proof generation process for recursive circuits. + +### Example usage with `#[recursive]` + +```rust +#[recursive] +fn main(x: Field, y: pub Field) { + assert(x == y, "x and y are not equal"); +} + +// This marks the circuit as recursion-friendly and indicates that proofs generated from this circuit +// are intended for recursive verification. +``` + +By incorporating this attribute directly in the circuit's definition, tooling like Nargo and NoirJS can automatically execute recursive-specific duties for Noir programs (e.g. recursive-friendly proof artifact generation) without additional flags or configurations. + +## Verifying Recursive Proofs + +```rust +#[foreign(verify_proof)] +fn verify_proof(_verification_key : [Field], _proof : [Field], _public_input : Field, _key_hash : Field) {} +``` + +:::info + +This is a black box function. Read [this section](./black_box_fns) to learn more about black box functions in Noir. + +::: + +## Example usage + +```rust +use dep::std; + +fn main( + verification_key : [Field; 114], + proof : [Field; 93], + public_inputs : [Field; 1], + key_hash : Field, + proof_b : [Field; 93], +) { + std::verify_proof( + verification_key.as_slice(), + proof.as_slice(), + public_inputs.as_slice(), + key_hash + ); + + std::verify_proof( + verification_key.as_slice(), + proof_b.as_slice(), + public_inputs.as_slice(), + key_hash + ); +} +``` + +You can see a full example of recursive proofs in [this example recursion demo repo](https://github.com/noir-lang/noir-examples/tree/master/recursion). + +## Parameters + +### `verification_key` + +The verification key for the zk program that is being verified. + +### `proof` + +The proof for the zk program that is being verified. + +### `public_inputs` + +These represent the public inputs of the proof we are verifying. + +### `key_hash` + +A key hash is used to check the validity of the verification key. The circuit implementing this opcode can use this hash to ensure that the key provided to the circuit matches the key produced by the circuit creator. diff --git a/docs/versioned_docs/version-v0.24.0/noir/standard_library/traits.md b/docs/versioned_docs/version-v0.24.0/noir/standard_library/traits.md new file mode 100644 index 00000000000..ba9fa2ee841 --- /dev/null +++ b/docs/versioned_docs/version-v0.24.0/noir/standard_library/traits.md @@ -0,0 +1,399 @@ +--- +title: Traits +description: Noir's stdlib provides a few commonly used traits. +keywords: [traits, trait, interface, protocol, default, add, eq] +--- + +## `std::default` + +### `std::default::Default` + +```rust title="default-trait" showLineNumbers +trait Default { + fn default() -> Self; +} +``` +> Source code: noir_stdlib/src/default.nr#L1-L5 + + +Constructs a default value of a type. + +Implementations: +```rust +impl Default for Field { .. } + +impl Default for i8 { .. } +impl Default for i16 { .. } +impl Default for i32 { .. } +impl Default for i64 { .. } + +impl Default for u8 { .. } +impl Default for u16 { .. } +impl Default for u32 { .. } +impl Default for u64 { .. } + +impl Default for () { .. } +impl Default for bool { .. } + +impl Default for [T; N] + where T: Default { .. } + +impl Default for (A, B) + where A: Default, B: Default { .. } + +impl Default for (A, B, C) + where A: Default, B: Default, C: Default { .. } + +impl Default for (A, B, C, D) + where A: Default, B: Default, C: Default, D: Default { .. } + +impl Default for (A, B, C, D, E) + where A: Default, B: Default, C: Default, D: Default, E: Default { .. } +``` + +For primitive integer types, the return value of `default` is `0`. Container +types such as arrays are filled with default values of their element type. + + +## `std::convert` + +### `std::convert::From` + +```rust title="from-trait" showLineNumbers +trait From { + fn from(input: T) -> Self; +} +``` +> Source code: noir_stdlib/src/convert.nr#L1-L5 + + +The `From` trait defines how to convert from a given type `T` to the type on which the trait is implemented. + +The Noir standard library provides a number of implementations of `From` between primitive types. +```rust title="from-impls" showLineNumbers +// Unsigned integers + +impl From for u32 { fn from(value: u8) -> u32 { value as u32 } } + +impl From for u64 { fn from(value: u8) -> u64 { value as u64 } } +impl From for u64 { fn from(value: u32) -> u64 { value as u64 } } + +impl From for Field { fn from(value: u8) -> Field { value as Field } } +impl From for Field { fn from(value: u32) -> Field { value as Field } } +impl From for Field { fn from(value: u64) -> Field { value as Field } } + +// Signed integers + +impl From for i32 { fn from(value: i8) -> i32 { value as i32 } } + +impl From for i64 { fn from(value: i8) -> i64 { value as i64 } } +impl From for i64 { fn from(value: i32) -> i64 { value as i64 } } + +// Booleans +impl From for u8 { fn from(value: bool) -> u8 { value as u8 } } +impl From for u32 { fn from(value: bool) -> u32 { value as u32 } } +impl From for u64 { fn from(value: bool) -> u64 { value as u64 } } +impl From for i8 { fn from(value: bool) -> i8 { value as i8 } } +impl From for i32 { fn from(value: bool) -> i32 { value as i32 } } +impl From for i64 { fn from(value: bool) -> i64 { value as i64 } } +impl From for Field { fn from(value: bool) -> Field { value as Field } } +``` +> Source code: noir_stdlib/src/convert.nr#L25-L52 + + +#### When to implement `From` + +As a general rule of thumb, `From` may be implemented in the [situations where it would be suitable in Rust](https://doc.rust-lang.org/std/convert/trait.From.html#when-to-implement-from): + +- The conversion is *infallible*: Noir does not provide an equivalent to Rust's `TryFrom`, if the conversion can fail then provide a named method instead. +- The conversion is *lossless*: semantically, it should not lose or discard information. For example, `u32: From` can losslessly convert any `u16` into a valid `u32` such that the original `u16` can be recovered. On the other hand, `u16: From` should not be implemented as `2**16` is a `u32` which cannot be losslessly converted into a `u16`. +- The conversion is *value-preserving*: the conceptual kind and meaning of the resulting value is the same, even though the Noir type and technical representation might be different. While it's possible to infallibly and losslessly convert a `u8` into a `str<2>` hex representation, `4u8` and `"04"` are too different for `str<2>: From` to be implemented. +- The conversion is *obvious*: it's the only reasonable conversion between the two types. If there's ambiguity on how to convert between them such that the same input could potentially map to two different values then a named method should be used. For instance rather than implementing `U128: From<[u8; 16]>`, the methods `U128::from_le_bytes` and `U128::from_be_bytes` are used as otherwise the endianness of the array would be ambiguous, resulting in two potential values of `U128` from the same byte array. + +One additional recommendation specific to Noir is: +- The conversion is *efficient*: it's relatively cheap to convert between the two types. Due to being a ZK DSL, it's more important to avoid unnecessary computation compared to Rust. If the implementation of `From` would encourage users to perform unnecessary conversion, resulting in additional proving time, then it may be preferable to expose functionality such that this conversion may be avoided. + +### `std::convert::Into` + +The `Into` trait is defined as the reciprocal of `From`. It should be easy to convince yourself that if we can convert to type `A` from type `B`, then it's possible to convert type `B` into type `A`. + +For this reason, implementing `From` on a type will automatically generate a matching `Into` implementation. One should always prefer implementing `From` over `Into` as implementing `Into` will not generate a matching `From` implementation. + +```rust title="into-trait" showLineNumbers +trait Into { + fn into(input: Self) -> T; +} + +impl Into for U where T: From { + fn into(input: U) -> T { + T::from(input) + } +} +``` +> Source code: noir_stdlib/src/convert.nr#L13-L23 + + +`Into` is most useful when passing function arguments where the types don't quite match up with what the function expects. In this case, the compiler has enough type information to perform the necessary conversion by just appending `.into()` onto the arguments in question. + + +## `std::cmp` + +### `std::cmp::Eq` + +```rust title="eq-trait" showLineNumbers +trait Eq { + fn eq(self, other: Self) -> bool; +} +``` +> Source code: noir_stdlib/src/cmp.nr#L1-L5 + + +Returns `true` if `self` is equal to `other`. Implementing this trait on a type +allows the type to be used with `==` and `!=`. + +Implementations: +```rust +impl Eq for Field { .. } + +impl Eq for i8 { .. } +impl Eq for i16 { .. } +impl Eq for i32 { .. } +impl Eq for i64 { .. } + +impl Eq for u8 { .. } +impl Eq for u16 { .. } +impl Eq for u32 { .. } +impl Eq for u64 { .. } + +impl Eq for () { .. } +impl Eq for bool { .. } + +impl Eq for [T; N] + where T: Eq { .. } + +impl Eq for (A, B) + where A: Eq, B: Eq { .. } + +impl Eq for (A, B, C) + where A: Eq, B: Eq, C: Eq { .. } + +impl Eq for (A, B, C, D) + where A: Eq, B: Eq, C: Eq, D: Eq { .. } + +impl Eq for (A, B, C, D, E) + where A: Eq, B: Eq, C: Eq, D: Eq, E: Eq { .. } +``` + +### `std::cmp::Ord` + +```rust title="ord-trait" showLineNumbers +trait Ord { + fn cmp(self, other: Self) -> Ordering; +} +``` +> Source code: noir_stdlib/src/cmp.nr#L92-L96 + + +`a.cmp(b)` compares two values returning `Ordering::less()` if `a < b`, +`Ordering::equal()` if `a == b`, or `Ordering::greater()` if `a > b`. +Implementing this trait on a type allows `<`, `<=`, `>`, and `>=` to be +used on values of the type. + +Implementations: + +```rust +impl Ord for u8 { .. } +impl Ord for u16 { .. } +impl Ord for u32 { .. } +impl Ord for u64 { .. } + +impl Ord for i8 { .. } +impl Ord for i16 { .. } +impl Ord for i32 { .. } + +impl Ord for i64 { .. } + +impl Ord for () { .. } +impl Ord for bool { .. } + +impl Ord for [T; N] + where T: Ord { .. } + +impl Ord for (A, B) + where A: Ord, B: Ord { .. } + +impl Ord for (A, B, C) + where A: Ord, B: Ord, C: Ord { .. } + +impl Ord for (A, B, C, D) + where A: Ord, B: Ord, C: Ord, D: Ord { .. } + +impl Ord for (A, B, C, D, E) + where A: Ord, B: Ord, C: Ord, D: Ord, E: Ord { .. } +``` + +## `std::ops` + +### `std::ops::Add`, `std::ops::Sub`, `std::ops::Mul`, and `std::ops::Div` + +These traits abstract over addition, subtraction, multiplication, and division respectively. +Implementing these traits for a given type will also allow that type to be used with the corresponding operator +for that trait (`+` for Add, etc) in addition to the normal method names. + +```rust title="add-trait" showLineNumbers +trait Add { + fn add(self, other: Self) -> Self; +} +``` +> Source code: noir_stdlib/src/ops.nr#L1-L5 + +```rust title="sub-trait" showLineNumbers +trait Sub { + fn sub(self, other: Self) -> Self; +} +``` +> Source code: noir_stdlib/src/ops.nr#L17-L21 + +```rust title="mul-trait" showLineNumbers +trait Mul { + fn mul(self, other: Self) -> Self; +} +``` +> Source code: noir_stdlib/src/ops.nr#L33-L37 + +```rust title="div-trait" showLineNumbers +trait Div { + fn div(self, other: Self) -> Self; +} +``` +> Source code: noir_stdlib/src/ops.nr#L49-L53 + + +The implementations block below is given for the `Add` trait, but the same types that implement +`Add` also implement `Sub`, `Mul`, and `Div`. + +Implementations: +```rust +impl Add for Field { .. } + +impl Add for i8 { .. } +impl Add for i16 { .. } +impl Add for i32 { .. } +impl Add for i64 { .. } + +impl Add for u8 { .. } +impl Add for u16 { .. } +impl Add for u32 { .. } +impl Add for u64 { .. } +``` + +### `std::ops::Rem` + +```rust title="rem-trait" showLineNumbers +trait Rem{ + fn rem(self, other: Self) -> Self; +} +``` +> Source code: noir_stdlib/src/ops.nr#L65-L69 + + +`Rem::rem(a, b)` is the remainder function returning the result of what is +left after dividing `a` and `b`. Implementing `Rem` allows the `%` operator +to be used with the implementation type. + +Unlike other numeric traits, `Rem` is not implemented for `Field`. + +Implementations: +```rust +impl Rem for u8 { fn rem(self, other: u8) -> u8 { self % other } } +impl Rem for u16 { fn rem(self, other: u16) -> u16 { self % other } } +impl Rem for u32 { fn rem(self, other: u32) -> u32 { self % other } } +impl Rem for u64 { fn rem(self, other: u64) -> u64 { self % other } } + +impl Rem for i8 { fn rem(self, other: i8) -> i8 { self % other } } +impl Rem for i16 { fn rem(self, other: i16) -> i16 { self % other } } +impl Rem for i32 { fn rem(self, other: i32) -> i32 { self % other } } +impl Rem for i64 { fn rem(self, other: i64) -> i64 { self % other } } +``` + +### `std::ops::{ BitOr, BitAnd, BitXor }` + +```rust title="bitor-trait" showLineNumbers +trait BitOr { + fn bitor(self, other: Self) -> Self; +} +``` +> Source code: noir_stdlib/src/ops.nr#L79-L83 + +```rust title="bitand-trait" showLineNumbers +trait BitAnd { + fn bitand(self, other: Self) -> Self; +} +``` +> Source code: noir_stdlib/src/ops.nr#L95-L99 + +```rust title="bitxor-trait" showLineNumbers +trait BitXor { + fn bitxor(self, other: Self) -> Self; +} +``` +> Source code: noir_stdlib/src/ops.nr#L111-L115 + + +Traits for the bitwise operations `|`, `&`, and `^`. + +Implementing `BitOr`, `BitAnd` or `BitXor` for a type allows the `|`, `&`, or `^` operator respectively +to be used with the type. + +The implementations block below is given for the `BitOr` trait, but the same types that implement +`BitOr` also implement `BitAnd` and `BitXor`. + +Implementations: +```rust +impl BitOr for bool { fn bitor(self, other: bool) -> bool { self | other } } + +impl BitOr for u8 { fn bitor(self, other: u8) -> u8 { self | other } } +impl BitOr for u16 { fn bitor(self, other: u16) -> u16 { self | other } } +impl BitOr for u32 { fn bitor(self, other: u32) -> u32 { self | other } } +impl BitOr for u64 { fn bitor(self, other: u64) -> u64 { self | other } } + +impl BitOr for i8 { fn bitor(self, other: i8) -> i8 { self | other } } +impl BitOr for i16 { fn bitor(self, other: i16) -> i16 { self | other } } +impl BitOr for i32 { fn bitor(self, other: i32) -> i32 { self | other } } +impl BitOr for i64 { fn bitor(self, other: i64) -> i64 { self | other } } +``` + +### `std::ops::{ Shl, Shr }` + +```rust title="shl-trait" showLineNumbers +trait Shl { + fn shl(self, other: Self) -> Self; +} +``` +> Source code: noir_stdlib/src/ops.nr#L127-L131 + +```rust title="shr-trait" showLineNumbers +trait Shr { + fn shr(self, other: Self) -> Self; +} +``` +> Source code: noir_stdlib/src/ops.nr#L142-L146 + + +Traits for a bit shift left and bit shift right. + +Implementing `Shl` for a type allows the left shift operator (`<<`) to be used with the implementation type. +Similarly, implementing `Shr` allows the right shift operator (`>>`) to be used with the type. + +Note that bit shifting is not currently implemented for signed types. + +The implementations block below is given for the `Shl` trait, but the same types that implement +`Shl` also implement `Shr`. + +Implementations: +```rust +impl Shl for u8 { fn shl(self, other: u8) -> u8 { self << other } } +impl Shl for u16 { fn shl(self, other: u16) -> u16 { self << other } } +impl Shl for u32 { fn shl(self, other: u32) -> u32 { self << other } } +impl Shl for u64 { fn shl(self, other: u64) -> u64 { self << other } } +``` diff --git a/docs/versioned_docs/version-v0.24.0/noir/standard_library/zeroed.md b/docs/versioned_docs/version-v0.24.0/noir/standard_library/zeroed.md new file mode 100644 index 00000000000..97dab02dac2 --- /dev/null +++ b/docs/versioned_docs/version-v0.24.0/noir/standard_library/zeroed.md @@ -0,0 +1,25 @@ +--- +title: Zeroed Function +description: + The zeroed function returns a zeroed value of any type. +keywords: + [ + zeroed + ] +--- + +Implements `fn zeroed() -> T` to return a zeroed value of any type. This function is generally unsafe to use as the zeroed bit pattern is not guaranteed to be valid for all types. It can however, be useful in cases when the value is guaranteed not to be used such as in a BoundedVec library implementing a growable vector, up to a certain length, backed by an array. The array can be initialized with zeroed values which are guaranteed to be inaccessible until the vector is pushed to. Similarly, enumerations in noir can be implemented using this method by providing zeroed values for the unused variants. + +You can access the function at `std::unsafe::zeroed`. + +This function currently supports the following types: + +- Field +- Bool +- Uint +- Array +- String +- Tuple +- Function + +Using it on other types could result in unexpected behavior. diff --git a/docs/versioned_docs/version-v0.24.0/reference/_category_.json b/docs/versioned_docs/version-v0.24.0/reference/_category_.json new file mode 100644 index 00000000000..5b6a20a609a --- /dev/null +++ b/docs/versioned_docs/version-v0.24.0/reference/_category_.json @@ -0,0 +1,5 @@ +{ + "position": 4, + "collapsible": true, + "collapsed": true +} diff --git a/docs/versioned_docs/version-v0.24.0/reference/nargo_commands.md b/docs/versioned_docs/version-v0.24.0/reference/nargo_commands.md new file mode 100644 index 00000000000..8a309ef4e7e --- /dev/null +++ b/docs/versioned_docs/version-v0.24.0/reference/nargo_commands.md @@ -0,0 +1,380 @@ +--- +title: Nargo +description: + Noir CLI Commands for Noir Prover and Verifier to create, execute, prove and verify programs, + generate Solidity verifier smart contract and compile into JSON file containing ACIR + representation and ABI of circuit. +keywords: + [ + Nargo, + Noir CLI, + Noir Prover, + Noir Verifier, + generate Solidity verifier, + compile JSON file, + ACIR representation, + ABI of circuit, + TypeScript, + ] +sidebar_position: 0 +--- + +# Command-Line Help for `nargo` + +This document contains the help content for the `nargo` command-line program. + +**Command Overview:** + +* [`nargo`↴](#nargo) +* [`nargo backend`↴](#nargo-backend) +* [`nargo backend current`↴](#nargo-backend-current) +* [`nargo backend ls`↴](#nargo-backend-ls) +* [`nargo backend use`↴](#nargo-backend-use) +* [`nargo backend install`↴](#nargo-backend-install) +* [`nargo backend uninstall`↴](#nargo-backend-uninstall) +* [`nargo check`↴](#nargo-check) +* [`nargo fmt`↴](#nargo-fmt) +* [`nargo codegen-verifier`↴](#nargo-codegen-verifier) +* [`nargo compile`↴](#nargo-compile) +* [`nargo new`↴](#nargo-new) +* [`nargo init`↴](#nargo-init) +* [`nargo execute`↴](#nargo-execute) +* [`nargo prove`↴](#nargo-prove) +* [`nargo verify`↴](#nargo-verify) +* [`nargo test`↴](#nargo-test) +* [`nargo info`↴](#nargo-info) +* [`nargo lsp`↴](#nargo-lsp) + +## `nargo` + +Noir's package manager + +**Usage:** `nargo ` + +###### **Subcommands:** + +* `backend` — Install and select custom backends used to generate and verify proofs +* `check` — Checks the constraint system for errors +* `fmt` — Format the Noir files in a workspace +* `codegen-verifier` — Generates a Solidity verifier smart contract for the program +* `compile` — Compile the program and its secret execution trace into ACIR format +* `new` — Create a Noir project in a new directory +* `init` — Create a Noir project in the current directory +* `execute` — Executes a circuit to calculate its return value +* `prove` — Create proof for this program. The proof is returned as a hex encoded string +* `verify` — Given a proof and a program, verify whether the proof is valid +* `test` — Run the tests for this program +* `info` — Provides detailed information on a circuit +* `lsp` — Starts the Noir LSP server + +###### **Options:** + + + + +## `nargo backend` + +Install and select custom backends used to generate and verify proofs + +**Usage:** `nargo backend ` + +###### **Subcommands:** + +* `current` — Prints the name of the currently active backend +* `ls` — Prints the list of currently installed backends +* `use` — Select the backend to use +* `install` — Install a new backend from a URL +* `uninstall` — Uninstalls a backend + + + +## `nargo backend current` + +Prints the name of the currently active backend + +**Usage:** `nargo backend current` + + + +## `nargo backend ls` + +Prints the list of currently installed backends + +**Usage:** `nargo backend ls` + + + +## `nargo backend use` + +Select the backend to use + +**Usage:** `nargo backend use ` + +###### **Arguments:** + +* `` + + + +## `nargo backend install` + +Install a new backend from a URL + +**Usage:** `nargo backend install ` + +###### **Arguments:** + +* `` — The name of the backend to install +* `` — The URL from which to download the backend + + + +## `nargo backend uninstall` + +Uninstalls a backend + +**Usage:** `nargo backend uninstall ` + +###### **Arguments:** + +* `` — The name of the backend to uninstall + + + +## `nargo check` + +Checks the constraint system for errors + +**Usage:** `nargo check [OPTIONS]` + +###### **Options:** + +* `--package ` — The name of the package to check +* `--workspace` — Check all packages in the workspace +* `--expression-width ` — Override the expression width requested by the backend +* `--force` — Force a full recompilation +* `--print-acir` — Display the ACIR for compiled circuit +* `--deny-warnings` — Treat all warnings as errors +* `--silence-warnings` — Suppress warnings + + + +## `nargo fmt` + +Format the Noir files in a workspace + +**Usage:** `nargo fmt [OPTIONS]` + +###### **Options:** + +* `--check` — Run noirfmt in check mode + + + +## `nargo codegen-verifier` + +Generates a Solidity verifier smart contract for the program + +**Usage:** `nargo codegen-verifier [OPTIONS]` + +###### **Options:** + +* `--package ` — The name of the package to codegen +* `--workspace` — Codegen all packages in the workspace +* `--expression-width ` — Override the expression width requested by the backend +* `--force` — Force a full recompilation +* `--print-acir` — Display the ACIR for compiled circuit +* `--deny-warnings` — Treat all warnings as errors +* `--silence-warnings` — Suppress warnings + + + +## `nargo compile` + +Compile the program and its secret execution trace into ACIR format + +**Usage:** `nargo compile [OPTIONS]` + +###### **Options:** + +* `--package ` — The name of the package to compile +* `--workspace` — Compile all packages in the workspace +* `--expression-width ` — Override the expression width requested by the backend +* `--force` — Force a full recompilation +* `--print-acir` — Display the ACIR for compiled circuit +* `--deny-warnings` — Treat all warnings as errors +* `--silence-warnings` — Suppress warnings + + + +## `nargo new` + +Create a Noir project in a new directory + +**Usage:** `nargo new [OPTIONS] ` + +###### **Arguments:** + +* `` — The path to save the new project + +###### **Options:** + +* `--name ` — Name of the package [default: package directory name] +* `--lib` — Use a library template +* `--bin` — Use a binary template [default] +* `--contract` — Use a contract template + + + +## `nargo init` + +Create a Noir project in the current directory + +**Usage:** `nargo init [OPTIONS]` + +###### **Options:** + +* `--name ` — Name of the package [default: current directory name] +* `--lib` — Use a library template +* `--bin` — Use a binary template [default] +* `--contract` — Use a contract template + + + +## `nargo execute` + +Executes a circuit to calculate its return value + +**Usage:** `nargo execute [OPTIONS] [WITNESS_NAME]` + +###### **Arguments:** + +* `` — Write the execution witness to named file + +###### **Options:** + +* `-p`, `--prover-name ` — The name of the toml file which contains the inputs for the prover + + Default value: `Prover` +* `--package ` — The name of the package to execute +* `--workspace` — Execute all packages in the workspace +* `--expression-width ` — Override the expression width requested by the backend +* `--force` — Force a full recompilation +* `--print-acir` — Display the ACIR for compiled circuit +* `--deny-warnings` — Treat all warnings as errors +* `--silence-warnings` — Suppress warnings +* `--oracle-resolver ` — JSON RPC url to solve oracle calls + + + +## `nargo prove` + +Create proof for this program. The proof is returned as a hex encoded string + +**Usage:** `nargo prove [OPTIONS]` + +###### **Options:** + +* `-p`, `--prover-name ` — The name of the toml file which contains the inputs for the prover + + Default value: `Prover` +* `-v`, `--verifier-name ` — The name of the toml file which contains the inputs for the verifier + + Default value: `Verifier` +* `--verify` — Verify proof after proving +* `--package ` — The name of the package to prove +* `--workspace` — Prove all packages in the workspace +* `--expression-width ` — Override the expression width requested by the backend +* `--force` — Force a full recompilation +* `--print-acir` — Display the ACIR for compiled circuit +* `--deny-warnings` — Treat all warnings as errors +* `--silence-warnings` — Suppress warnings +* `--oracle-resolver ` — JSON RPC url to solve oracle calls + + + +## `nargo verify` + +Given a proof and a program, verify whether the proof is valid + +**Usage:** `nargo verify [OPTIONS]` + +###### **Options:** + +* `-v`, `--verifier-name ` — The name of the toml file which contains the inputs for the verifier + + Default value: `Verifier` +* `--package ` — The name of the package verify +* `--workspace` — Verify all packages in the workspace +* `--expression-width ` — Override the expression width requested by the backend +* `--force` — Force a full recompilation +* `--print-acir` — Display the ACIR for compiled circuit +* `--deny-warnings` — Treat all warnings as errors +* `--silence-warnings` — Suppress warnings + + + +## `nargo test` + +Run the tests for this program + +**Usage:** `nargo test [OPTIONS] [TEST_NAME]` + +###### **Arguments:** + +* `` — If given, only tests with names containing this string will be run + +###### **Options:** + +* `--show-output` — Display output of `println` statements +* `--exact` — Only run tests that match exactly +* `--package ` — The name of the package to test +* `--workspace` — Test all packages in the workspace +* `--expression-width ` — Override the expression width requested by the backend +* `--force` — Force a full recompilation +* `--print-acir` — Display the ACIR for compiled circuit +* `--deny-warnings` — Treat all warnings as errors +* `--silence-warnings` — Suppress warnings +* `--oracle-resolver ` — JSON RPC url to solve oracle calls + + + +## `nargo info` + +Provides detailed information on a circuit + +Current information provided: 1. The number of ACIR opcodes 2. Counts the final number gates in the circuit used by a backend + +**Usage:** `nargo info [OPTIONS]` + +###### **Options:** + +* `--package ` — The name of the package to detail +* `--workspace` — Detail all packages in the workspace +* `--expression-width ` — Override the expression width requested by the backend +* `--force` — Force a full recompilation +* `--print-acir` — Display the ACIR for compiled circuit +* `--deny-warnings` — Treat all warnings as errors +* `--silence-warnings` — Suppress warnings + + + +## `nargo lsp` + +Starts the Noir LSP server + +Starts an LSP server which allows IDEs such as VS Code to display diagnostics in Noir source. + +VS Code Noir Language Support: https://marketplace.visualstudio.com/items?itemName=noir-lang.vscode-noir + +**Usage:** `nargo lsp` + + + +
+ + + This document was generated automatically by + clap-markdown. + + diff --git a/docs/versioned_docs/version-v0.24.0/tutorials/noirjs_app.md b/docs/versioned_docs/version-v0.24.0/tutorials/noirjs_app.md new file mode 100644 index 00000000000..ad76dd255cc --- /dev/null +++ b/docs/versioned_docs/version-v0.24.0/tutorials/noirjs_app.md @@ -0,0 +1,279 @@ +--- +title: Building a web app with NoirJS +description: Learn how to setup a new app that uses Noir to generate and verify zero-knowledge SNARK proofs in a typescript or javascript environment. +keywords: [how to, guide, javascript, typescript, noir, barretenberg, zero-knowledge, proofs, app] +sidebar_position: 0 +pagination_next: noir/concepts/data_types/index +--- + +NoirJS is a set of packages meant to work both in a browser and a server environment. In this tutorial, we will build a simple web app using them. From here, you should get an idea on how to proceed with your own Noir projects! + +You can find the complete app code for this guide [here](https://github.com/noir-lang/tiny-noirjs-app). + +## Setup + +:::note + +Feel free to use whatever versions, just keep in mind that Nargo and the NoirJS packages are meant to be in sync. For example, Nargo 0.19.x matches `noir_js@0.19.x`, etc. + +In this guide, we will be pinned to 0.19.4. + +::: + +Before we start, we want to make sure we have Node and Nargo installed. + +We start by opening a terminal and executing `node --version`. If we don't get an output like `v20.10.0`, that means node is not installed. Let's do that by following the handy [nvm guide](https://github.com/nvm-sh/nvm?tab=readme-ov-file#install--update-script). + +As for `Nargo`, we can follow the the [Nargo guide](../getting_started/installation/index.md) to install it. If you're lazy, just paste this on a terminal and run `noirup`: + +```sh +curl -L https://raw.githubusercontent.com/noir-lang/noirup/main/install | bash +``` + +Easy enough. Onwards! + +## Our project + +ZK is a powerful technology. An app that doesn't reveal one of the inputs to *anyone* is almost unbelievable, yet Noir makes it as easy as a single line of code. + +In fact, it's so simple that it comes nicely packaged in `nargo`. Let's do that! + +### Nargo + +Run: + +```nargo new circuit``` + +And... That's about it. Your program is ready to be compiled and run. + +To compile, let's `cd` into the `circuit` folder to enter our project, and call: + +```nargo compile``` + +This compiles our circuit into `json` format and add it to a new `target` folder. + +:::info + +At this point in the tutorial, your folder structure should look like this: + +```tree +. +└── circuit <---- our working directory + ├── Nargo.toml + ├── src + │ └── main.nr + └── target + └── circuit.json +``` + +::: + +### Node and Vite + +If you want to explore Nargo, feel free to go on a side-quest now and follow the steps in the +[getting started](../getting_started/hello_noir/index.md) guide. However, we want our app to run on the browser, so we need Vite. + +Vite is a powerful tool to generate static websites. While it provides all kinds of features, let's just go barebones with some good old vanilla JS. + +To do this this, go back to the previous folder (`cd ..`) and create a new vite project by running `npm create vite` and choosing "Vanilla" and "Javascript". + +You should see `vite-project` appear in your root folder. This seems like a good time to `cd` into it and install our NoirJS packages: + +```bash +npm i @noir-lang/backend_barretenberg@0.19.4 @noir-lang/noir_js@0.19.4 +``` + +:::info + +At this point in the tutorial, your folder structure should look like this: + +```tree +. +└── circuit + └── ...etc... +└── vite-project <---- our working directory + └── ...etc... +``` + +::: + +#### Some cleanup + +`npx create vite` is amazing but it creates a bunch of files we don't really need for our simple example. Actually, let's just delete everything except for `index.html`, `main.js` and `package.json`. I feel lighter already. + +![my heart is ready for you, noir.js](@site/static/img/memes/titanic.jpeg) + +## HTML + +Our app won't run like this, of course. We need some working HTML, at least. Let's open our broken-hearted `index.html` and replace everything with this code snippet: + +```html + + + + + + +

Noir app

+
+ + +
+
+

Logs

+

Proof

+
+ + +``` + +It *could* be a beautiful UI... Depending on which universe you live in. + +## Some good old vanilla Javascript + +Our love for Noir needs undivided attention, so let's just open `main.js` and delete everything (this is where the romantic scenery becomes a bit creepy). + +Start by pasting in this boilerplate code: + +```js +const setup = async () => { + await Promise.all([ + import("@noir-lang/noirc_abi").then(module => + module.default(new URL("@noir-lang/noirc_abi/web/noirc_abi_wasm_bg.wasm", import.meta.url).toString()) + ), + import("@noir-lang/acvm_js").then(module => + module.default(new URL("@noir-lang/acvm_js/web/acvm_js_bg.wasm", import.meta.url).toString()) + ) + ]); +} + +function display(container, msg) { + const c = document.getElementById(container); + const p = document.createElement('p'); + p.textContent = msg; + c.appendChild(p); +} + +document.getElementById('submitGuess').addEventListener('click', async () => { + try { + // here's where love happens + } catch(err) { + display("logs", "Oh 💔 Wrong guess") + } +}); + +``` + +The display function doesn't do much. We're simply manipulating our website to see stuff happening. For example, if the proof fails, it will simply log a broken heart 😢 + +As for the `setup` function, it's just a sad reminder that dealing with `wasm` on the browser is not as easy as it should. Just copy, paste, and forget. + +:::info + +At this point in the tutorial, your folder structure should look like this: + +```tree +. +└── circuit + └── ...same as above +└── vite-project + ├── main.js + ├── package.json + └── index.html +``` + +You'll see other files and folders showing up (like `package-lock.json`, `node_modules`) but you shouldn't have to care about those. + +::: + +## Some NoirJS + +We're starting with the good stuff now. If you've compiled the circuit as described above, you should have a `json` file we want to import at the very top of our `main.js` file: + +```ts +import circuit from '../circuit/target/circuit.json'; +``` + +[Noir is backend-agnostic](../index.mdx#whats-new-about-noir). We write Noir, but we also need a proving backend. That's why we need to import and instantiate the two dependencies we installed above: `BarretenbergBackend` and `Noir`. Let's import them right below: + +```js +import { BarretenbergBackend } from '@noir-lang/backend_barretenberg'; +import { Noir } from '@noir-lang/noir_js'; +``` + +And instantiate them inside our try-catch block: + +```ts +// try { +const backend = new BarretenbergBackend(circuit); +const noir = new Noir(circuit, backend); +// } +``` + +:::note + +For the remainder of the tutorial, everything will be happening inside the `try` block + +::: + +## Our app + +Now for the app itself. We're capturing whatever is in the input when people press the submit button. Just add this: + +```js +const x = parseInt(document.getElementById('guessInput').value); +const input = { x, y: 2 }; +``` + +Now we're ready to prove stuff! Let's feed some inputs to our circuit and calculate the proof: + +```js +await setup(); // let's squeeze our wasm inits here + +display('logs', 'Generating proof... ⌛'); +const proof = await noir.generateFinalProof(input); +display('logs', 'Generating proof... ✅'); +display('results', proof.proof); +``` + +You're probably eager to see stuff happening, so go and run your app now! + +From your terminal, run `npm run dev`. If it doesn't open a browser for you, just visit `localhost:5173`. You should now see the worst UI ever, with an ugly input. + +![Getting Started 0](@site/static/img/noir_getting_started_1.png) + +Now, our circuit says `fn main(x: Field, y: pub Field)`. This means only the `y` value is public, and it's hardcoded above: `input = { x, y: 2 }`. In other words, you won't need to send your secret`x` to the verifier! + +By inputting any number other than 2 in the input box and clicking "submit", you should get a valid proof. Otherwise the proof won't even generate correctly. By the way, if you're human, you shouldn't be able to understand anything on the "proof" box. That's OK. We like you, human ❤️. + +## Verifying + +Time to celebrate, yes! But we shouldn't trust machines so blindly. Let's add these lines to see our proof being verified: + +```js +display('logs', 'Verifying proof... ⌛'); +const verification = await noir.verifyFinalProof(proof); +if (verification) display('logs', 'Verifying proof... ✅'); +``` + +You have successfully generated a client-side Noir web app! + +![coded app without math knowledge](@site/static/img/memes/flextape.jpeg) + +## Further Reading + +You can see how noirjs is used in a full stack Next.js hardhat application in the [noir-starter repo here](https://github.com/noir-lang/noir-starter/tree/main/vite-hardhat). The example shows how to calculate a proof in the browser and verify it with a deployed Solidity verifier contract from noirjs. + +You should also check out the more advanced examples in the [noir-examples repo](https://github.com/noir-lang/noir-examples), where you'll find reference usage for some cool apps. diff --git a/docs/versioned_sidebars/version-v0.24.0-sidebars.json b/docs/versioned_sidebars/version-v0.24.0-sidebars.json new file mode 100644 index 00000000000..b16f79cc176 --- /dev/null +++ b/docs/versioned_sidebars/version-v0.24.0-sidebars.json @@ -0,0 +1,83 @@ +{ + "sidebar": [ + { + "type": "doc", + "id": "index" + }, + { + "type": "category", + "label": "Getting Started", + "items": [ + { + "type": "autogenerated", + "dirName": "getting_started" + } + ] + }, + { + "type": "category", + "label": "The Noir Language", + "items": [ + { + "type": "autogenerated", + "dirName": "noir" + } + ] + }, + { + "type": "html", + "value": "
", + "defaultStyle": true + }, + { + "type": "category", + "label": "How To Guides", + "items": [ + { + "type": "autogenerated", + "dirName": "how_to" + } + ] + }, + { + "type": "category", + "label": "Explainers", + "items": [ + { + "type": "autogenerated", + "dirName": "explainers" + } + ] + }, + { + "type": "category", + "label": "Tutorials", + "items": [ + { + "type": "autogenerated", + "dirName": "tutorials" + } + ] + }, + { + "type": "category", + "label": "Reference", + "items": [ + { + "type": "autogenerated", + "dirName": "reference" + } + ] + }, + { + "type": "html", + "value": "
", + "defaultStyle": true + }, + { + "type": "doc", + "id": "migration_notes", + "label": "Migration notes" + } + ] +} diff --git a/flake.nix b/flake.nix index 6849dc0a0ad..5125dad06be 100644 --- a/flake.nix +++ b/flake.nix @@ -44,7 +44,7 @@ rustToolchain = fenix.packages.${system}.fromToolchainFile { file = ./rust-toolchain.toml; - sha256 = "sha256-R0F0Risbr74xg9mEYydyebx/z0Wu6HI0/KWwrV30vZo="; + sha256 = "sha256-rLP8+fTxnPHoR96ZJiCa/5Ans1OojI7MLsmSqR2ip8o="; }; craneLib = (crane.mkLib pkgs).overrideToolchain rustToolchain; @@ -73,7 +73,7 @@ # Configuration shared between builds config = { # x-release-please-start-version - version = "0.23.0"; + version = "0.24.0"; # x-release-please-end src = pkgs.lib.cleanSourceWith { @@ -81,7 +81,7 @@ # Custom filter with various file extensions that we rely upon to build packages # Currently: `.nr`, `.sol`, `.sh`, `.json`, `.md` and `.wasm` filter = path: type: - (builtins.match ".*\.(nr|sol|sh|json|md|wasm)$" path != null) || (craneLib.filterCargoSources path type); + (builtins.match ".*\.(nr|sol|sh|json|md|wasm|txt)$" path != null) || (craneLib.filterCargoSources path type); }; # TODO(#1198): It'd be nice to include these flags when running `cargo clippy` in a devShell. diff --git a/noir_stdlib/src/array.nr b/noir_stdlib/src/array.nr index bcdf56dd7aa..3da4b649174 100644 --- a/noir_stdlib/src/array.nr +++ b/noir_stdlib/src/array.nr @@ -1,25 +1,55 @@ +use crate::cmp::{Ord}; // TODO: Once we fully move to the new SSA pass this module can be removed and replaced // by the methods in the `slice` module impl [T; N] { #[builtin(array_len)] - pub fn len(_self: Self) -> Field {} + pub fn len(self) -> u64 {} - #[builtin(arraysort)] - pub fn sort(_self: Self) -> Self {} + pub fn sort(self) -> Self where T: Ord { + self.sort_via(|a: T, b: T| a <= b) + } + + pub fn sort_via(self, ordering: fn[Env](T, T) -> bool) -> Self { + let sorted_index = self.get_sorting_index(ordering); + let mut result = self; + // Ensure the indexes are correct + for i in 0..N { + let pos = find_index(sorted_index, i); + assert(sorted_index[pos] == i); + } + // Sort the array using the indexes + for i in 0..N { + result[i] = self[sorted_index[i]]; + } + // Ensure the array is sorted + for i in 0..N - 1 { + assert(ordering(result[i], result[i + 1])); + } + + result + } - // Sort with a custom sorting function. - pub fn sort_via(mut a: Self, ordering: fn[Env](T, T) -> bool) -> Self { - for i in 1 .. a.len() { + /// Returns the index of the elements in the array that would sort it, using the provided custom sorting function. + unconstrained fn get_sorting_index(self, ordering: fn[Env](T, T) -> bool) -> [u64; N] { + let mut result = [0; N]; + let mut a = self; + for i in 0..N { + result[i] = i; + } + for i in 1..N { for j in 0..i { if ordering(a[i], a[j]) { let old_a_j = a[j]; a[j] = a[i]; a[i] = old_a_j; + let old_j = result[j]; + result[j] = result[i]; + result[i] = old_j; } } } - a + result } // Converts an array into a slice. @@ -37,7 +67,7 @@ impl [T; N] { let first_elem = f(self[0]); let mut ret = [first_elem; N]; - for i in 1 .. self.len() { + for i in 1..self.len() { ret[i] = f(self[i]); } @@ -59,7 +89,7 @@ impl [T; N] { // element of the given array as its starting accumulator value. pub fn reduce(self, f: fn[Env](T, T) -> T) -> T { let mut accumulator = self[0]; - for i in 1 .. self.len() { + for i in 1..self.len() { accumulator = f(accumulator, self[i]); } accumulator @@ -83,3 +113,15 @@ impl [T; N] { ret } } + +// helper function used to look up the position of a value in an array of Field +// Note that function returns 0 if the value is not found +unconstrained fn find_index(a: [u64; N], find: u64) -> u64 { + let mut result = 0; + for i in 0..a.len() { + if a[i] == find { + result = i; + } + } + result +} diff --git a/noir_stdlib/src/bigint.nr b/noir_stdlib/src/bigint.nr index 14790f69241..98237a54779 100644 --- a/noir_stdlib/src/bigint.nr +++ b/noir_stdlib/src/bigint.nr @@ -1,4 +1,18 @@ -use crate::ops::{Add, Sub, Mul, Div, Rem,}; +use crate::ops::{Add, Sub, Mul, Div}; +use crate::cmp::Eq; + +global bn254_fq = [0x47, 0xFD, 0x7C, 0xD8, 0x16, 0x8C, 0x20, 0x3C, 0x8d, 0xca, 0x71, 0x68, 0x91, 0x6a, 0x81, 0x97, + 0x5d, 0x58, 0x81, 0x81, 0xb6, 0x45, 0x50, 0xb8, 0x29, 0xa0, 0x31, 0xe1, 0x72, 0x4e, 0x64, 0x30]; +global bn254_fr = [0x01, 0x00, 0x00, 0x00, 0x3F, 0x59, 0x1F, 0x43, 0x09, 0x97, 0xB9, 0x79, 0x48, 0xE8, 0x33, 0x28, + 0x5D, 0x58, 0x81, 0x81, 0xB6, 0x45, 0x50, 0xB8, 0x29, 0xA0, 0x31, 0xE1, 0x72, 0x4E, 0x64, 0x30]; +global secpk1_fr = [0x41, 0x41, 0x36, 0xD0, 0x8C, 0x5E, 0xD2, 0xBF, 0x3B, 0xA0, 0x48, 0xAF, 0xE6, 0xDC, 0xAE, 0xBA, + 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF]; +global secpk1_fq = [0x2F, 0xFC, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF]; +global secpr1_fq = [0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF]; +global secpr1_fr = [0x51, 0x25, 0x63, 0xFC, 0xC2, 0xCA, 0xB9, 0xF3, 0x84, 0x9E, 0x17, 0xA7, 0xAD, 0xFA, 0xE6, 0xBC, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00,0xFF, 0xFF, 0xFF, 0xFF]; struct BigInt { pointer: u32, @@ -7,47 +21,330 @@ struct BigInt { impl BigInt { #[builtin(bigint_add)] - pub fn bigint_add(_self: Self, _other: BigInt) -> BigInt { - } - #[builtin(bigint_neg)] - pub fn bigint_neg(_self: Self, _other: BigInt) -> BigInt { - } + fn bigint_add(self, other: BigInt) -> BigInt {} + #[builtin(bigint_sub)] + fn bigint_sub(self, other: BigInt) -> BigInt {} #[builtin(bigint_mul)] - pub fn bigint_mul(_self: Self, _other: BigInt) -> BigInt { - } + fn bigint_mul(self, other: BigInt) -> BigInt {} #[builtin(bigint_div)] - pub fn bigint_div(_self: Self, _other: BigInt) -> BigInt { - } + fn bigint_div(self, other: BigInt) -> BigInt {} #[builtin(bigint_from_le_bytes)] - pub fn from_le_bytes(_bytes: [u8], _modulus: [u8]) -> BigInt {} + fn from_le_bytes(bytes: [u8], modulus: [u8]) -> BigInt {} #[builtin(bigint_to_le_bytes)] - pub fn to_le_bytes(_self: Self) -> [u8] {} + fn to_le_bytes(self) -> [u8] {} + + fn check_32_bytes(self: Self, other: BigInt) -> bool { + let bytes = self.to_le_bytes(); + let o_bytes = other.to_le_bytes(); + let mut result = true; + for i in 0..32 { + result = result & (bytes[i] == o_bytes[i]); + } + result + } } -impl Add for BigInt { - fn add(self: Self, other: BigInt) -> BigInt { - self.bigint_add(other) +trait BigField { + fn from_le_bytes(bytes: [u8]) -> Self; + fn to_le_bytes(self) -> [u8]; +} + +struct Secpk1Fq { + inner: BigInt, +} + +impl BigField for Secpk1Fq { + fn from_le_bytes(bytes: [u8]) -> Secpk1Fq { + Secpk1Fq { + inner: BigInt::from_le_bytes(bytes, secpk1_fq) + } + } + fn to_le_bytes(self) -> [u8] { + self.inner.to_le_bytes() } } -impl Sub for BigInt { - fn sub(self: Self, other: BigInt) -> BigInt { - self.bigint_neg(other) + +impl Add for Secpk1Fq { + fn add(self: Self, other: Secpk1Fq) -> Secpk1Fq { + Secpk1Fq { + inner: self.inner.bigint_add(other.inner) + } } } -impl Mul for BigInt { - fn mul(self: Self, other: BigInt) -> BigInt { - self.bigint_mul(other) +impl Sub for Secpk1Fq { + fn sub(self: Self, other: Secpk1Fq) -> Secpk1Fq { + Secpk1Fq { + inner: self.inner.bigint_sub(other.inner) + } } } -impl Div for BigInt { - fn div(self: Self, other: BigInt) -> BigInt { - self.bigint_div(other) +impl Mul for Secpk1Fq { + fn mul(self: Self, other: Secpk1Fq) -> Secpk1Fq { + Secpk1Fq { + inner: self.inner.bigint_mul(other.inner) + } + + } +} +impl Div for Secpk1Fq { + fn div(self: Self, other: Secpk1Fq) -> Secpk1Fq { + Secpk1Fq { + inner: self.inner.bigint_div(other.inner) + } + } +} +impl Eq for Secpk1Fq { + fn eq(self: Self, other: Secpk1Fq) -> bool { + self.inner.check_32_bytes(other.inner) + } +} + +struct Secpk1Fr { + inner: BigInt, +} + +impl BigField for Secpk1Fr { + fn from_le_bytes(bytes: [u8]) -> Secpk1Fr { + Secpk1Fr { + inner: BigInt::from_le_bytes(bytes, secpk1_fr) + } + } + fn to_le_bytes(self) -> [u8] { + self.inner.to_le_bytes() + } +} + +impl Add for Secpk1Fr { + fn add(self: Self, other: Secpk1Fr) -> Secpk1Fr { + Secpk1Fr { + inner: self.inner.bigint_add(other.inner) + } } } -impl Rem for BigInt { - fn rem(self: Self, other: BigInt) -> BigInt { - let quotient = self.bigint_div(other); - self.bigint_neg(quotient.bigint_mul(other)) +impl Sub for Secpk1Fr { + fn sub(self: Self, other: Secpk1Fr) -> Secpk1Fr { + Secpk1Fr { + inner: self.inner.bigint_sub(other.inner) + } } } +impl Mul for Secpk1Fr { + fn mul(self: Self, other: Secpk1Fr) -> Secpk1Fr { + Secpk1Fr { + inner: self.inner.bigint_mul(other.inner) + } + } +} +impl Div for Secpk1Fr { + fn div(self: Self, other: Secpk1Fr) -> Secpk1Fr { + Secpk1Fr { + inner: self.inner.bigint_div(other.inner) + } + } +} +impl Eq for Secpk1Fr { + fn eq(self: Self, other: Secpk1Fr) -> bool { + self.inner.check_32_bytes(other.inner) + } +} + +struct Bn254Fr { + inner: BigInt, +} + +impl BigField for Bn254Fr { + fn from_le_bytes(bytes: [u8]) -> Bn254Fr { + Bn254Fr { + inner: BigInt::from_le_bytes(bytes, bn254_fr) + } + } + fn to_le_bytes(self) -> [u8] { + self.inner.to_le_bytes() + } +} + +impl Add for Bn254Fr { + fn add(self: Self, other: Bn254Fr) -> Bn254Fr { + Bn254Fr { + inner: self.inner.bigint_add(other.inner) + } + } +} +impl Sub for Bn254Fr { + fn sub(self: Self, other: Bn254Fr) -> Bn254Fr { + Bn254Fr { + inner: self.inner.bigint_sub(other.inner) + } + } +} +impl Mul for Bn254Fr { + fn mul(self: Self, other: Bn254Fr) -> Bn254Fr { + Bn254Fr { + inner: self.inner.bigint_mul(other.inner) + } + + } +} +impl Div for Bn254Fr { + fn div(self: Self, other: Bn254Fr) -> Bn254Fr { + Bn254Fr { + inner: self.inner.bigint_div(other.inner) + } + } +} +impl Eq for Bn254Fr { + fn eq(self: Self, other: Bn254Fr) -> bool { + self.inner.check_32_bytes(other.inner) + } +} + +struct Bn254Fq { + inner: BigInt, +} + +impl BigField for Bn254Fq { + fn from_le_bytes(bytes: [u8]) -> Bn254Fq { + Bn254Fq { + inner: BigInt::from_le_bytes(bytes, bn254_fq) + } + } + fn to_le_bytes(self) -> [u8] { + self.inner.to_le_bytes() + } +} + +impl Add for Bn254Fq { + fn add(self: Self, other: Bn254Fq) -> Bn254Fq { + Bn254Fq { + inner: self.inner.bigint_add(other.inner) + } + } +} +impl Sub for Bn254Fq { + fn sub(self: Self, other: Bn254Fq) -> Bn254Fq { + Bn254Fq { + inner: self.inner.bigint_sub(other.inner) + } + } +} +impl Mul for Bn254Fq { + fn mul(self: Self, other: Bn254Fq) -> Bn254Fq { + Bn254Fq { + inner: self.inner.bigint_mul(other.inner) + } + + } +} +impl Div for Bn254Fq { + fn div(self: Self, other: Bn254Fq) -> Bn254Fq { + Bn254Fq { + inner: self.inner.bigint_div(other.inner) + } + } +} +impl Eq for Bn254Fq { + fn eq(self: Self, other: Bn254Fq) -> bool { + self.inner.check_32_bytes(other.inner) + } +} + +struct Secpr1Fq { + inner: BigInt, +} + +impl BigField for Secpr1Fq { + fn from_le_bytes(bytes: [u8]) -> Secpr1Fq { + Secpr1Fq { + inner: BigInt::from_le_bytes(bytes, secpr1_fq) + } + } + fn to_le_bytes(self) -> [u8] { + self.inner.to_le_bytes() + } +} + +impl Add for Secpr1Fq { + fn add(self: Self, other: Secpr1Fq) -> Secpr1Fq { + Secpr1Fq { + inner: self.inner.bigint_add(other.inner) + } + } +} +impl Sub for Secpr1Fq { + fn sub(self: Self, other: Secpr1Fq) -> Secpr1Fq { + Secpr1Fq { + inner: self.inner.bigint_sub(other.inner) + } + } +} +impl Mul for Secpr1Fq { + fn mul(self: Self, other: Secpr1Fq) -> Secpr1Fq { + Secpr1Fq { + inner: self.inner.bigint_mul(other.inner) + } + + } +} +impl Div for Secpr1Fq { + fn div(self: Self, other: Secpr1Fq) -> Secpr1Fq { + Secpr1Fq { + inner: self.inner.bigint_div(other.inner) + } + } +} +impl Eq for Secpr1Fq { + fn eq(self: Self, other: Secpr1Fq) -> bool { + self.inner.check_32_bytes(other.inner) + } +} + +struct Secpr1Fr { + inner: BigInt, +} + +impl BigField for Secpr1Fr { + fn from_le_bytes(bytes: [u8]) -> Secpr1Fr { + Secpr1Fr { + inner: BigInt::from_le_bytes(bytes, secpr1_fr) + } + } + fn to_le_bytes(self) -> [u8] { + self.inner.to_le_bytes() + } +} + +impl Add for Secpr1Fr { + fn add(self: Self, other: Secpr1Fr) -> Secpr1Fr { + Secpr1Fr { + inner: self.inner.bigint_add(other.inner) + } + } +} +impl Sub for Secpr1Fr { + fn sub(self: Self, other: Secpr1Fr) -> Secpr1Fr { + Secpr1Fr { + inner: self.inner.bigint_sub(other.inner) + } + } +} +impl Mul for Secpr1Fr { + fn mul(self: Self, other: Secpr1Fr) -> Secpr1Fr { + Secpr1Fr { + inner: self.inner.bigint_mul(other.inner) + } + + } +} +impl Div for Secpr1Fr { + fn div(self: Self, other: Secpr1Fr) -> Secpr1Fr { + Secpr1Fr { + inner: self.inner.bigint_div(other.inner) + } + } +} +impl Eq for Secpr1Fr { + fn eq(self: Self, other: Secpr1Fr) -> bool { + self.inner.check_32_bytes(other.inner) + } +} diff --git a/noir_stdlib/src/cmp.nr b/noir_stdlib/src/cmp.nr index b3de3e2658e..38316e5d6a8 100644 --- a/noir_stdlib/src/cmp.nr +++ b/noir_stdlib/src/cmp.nr @@ -8,12 +8,10 @@ impl Eq for Field { fn eq(self, other: Field) -> bool { self == other } } impl Eq for u1 { fn eq(self, other: u1) -> bool { self == other } } impl Eq for u8 { fn eq(self, other: u8) -> bool { self == other } } -impl Eq for u16 { fn eq(self, other: u16) -> bool { self == other } } impl Eq for u32 { fn eq(self, other: u32) -> bool { self == other } } impl Eq for u64 { fn eq(self, other: u64) -> bool { self == other } } impl Eq for i8 { fn eq(self, other: i8) -> bool { self == other } } -impl Eq for i16 { fn eq(self, other: i16) -> bool { self == other } } impl Eq for i32 { fn eq(self, other: i32) -> bool { self == other } } impl Eq for i64 { fn eq(self, other: i64) -> bool { self == other } } @@ -111,18 +109,6 @@ impl Ord for u8 { } } -impl Ord for u16 { - fn cmp(self, other: u16) -> Ordering { - if self < other { - Ordering::less() - } else if self > other { - Ordering::greater() - } else { - Ordering::equal() - } - } -} - impl Ord for u32 { fn cmp(self, other: u32) -> Ordering { if self < other { @@ -159,18 +145,6 @@ impl Ord for i8 { } } -impl Ord for i16 { - fn cmp(self, other: i16) -> Ordering { - if self < other { - Ordering::less() - } else if self > other { - Ordering::greater() - } else { - Ordering::equal() - } - } -} - impl Ord for i32 { fn cmp(self, other: i32) -> Ordering { if self < other { diff --git a/noir_stdlib/src/collections.nr b/noir_stdlib/src/collections.nr index 177ca96816f..2d952f4d6cd 100644 --- a/noir_stdlib/src/collections.nr +++ b/noir_stdlib/src/collections.nr @@ -1,2 +1,3 @@ mod vec; mod bounded_vec; +mod map; diff --git a/noir_stdlib/src/collections/bounded_vec.nr b/noir_stdlib/src/collections/bounded_vec.nr index 332fefa63f9..752b96d6591 100644 --- a/noir_stdlib/src/collections/bounded_vec.nr +++ b/noir_stdlib/src/collections/bounded_vec.nr @@ -1,37 +1,35 @@ struct BoundedVec { storage: [T; MaxLen], - // TODO: change this to return a u64 as Noir now - // uses u64 for indexing - len: Field, - empty_value: T, + len: u64, } impl BoundedVec { - pub fn new(initial_value: T) -> Self { - BoundedVec { storage: [initial_value; MaxLen], len: 0, empty_value: initial_value } + pub fn new() -> Self { + let zeroed = crate::unsafe::zeroed(); + BoundedVec { storage: [zeroed; MaxLen], len: 0 } } - pub fn get(mut self: Self, index: Field) -> T { - assert(index as u64 < self.len as u64); + pub fn get(mut self: Self, index: u64) -> T { + assert(index as u64 < self.len); self.storage[index] } - pub fn get_unchecked(mut self: Self, index: Field) -> T { + pub fn get_unchecked(mut self: Self, index: u64) -> T { self.storage[index] } pub fn push(&mut self, elem: T) { - assert(self.len as u64 < MaxLen as u64, "push out of bounds"); + assert(self.len < MaxLen as u64, "push out of bounds"); self.storage[self.len] = elem; self.len += 1; } - pub fn len(self) -> Field { + pub fn len(self) -> u64 { self.len } - pub fn max_len(_self: BoundedVec) -> Field { + pub fn max_len(_self: BoundedVec) -> u64 { MaxLen } @@ -59,7 +57,7 @@ impl BoundedVec { for i in 0..Len { exceeded_len |= i == append_len; if !exceeded_len { - self.storage[self.len + (i as Field)] = vec.get_unchecked(i as Field); + self.storage[self.len + i] = vec.get_unchecked(i); } } self.len = new_len; @@ -70,7 +68,7 @@ impl BoundedVec { self.len -= 1; let elem = self.storage[self.len]; - self.storage[self.len] = self.empty_value; + self.storage[self.len] = crate::unsafe::zeroed(); elem } @@ -79,10 +77,10 @@ impl BoundedVec { let mut exceeded_len = false; for i in 0..MaxLen { exceeded_len |= i == self.len; - if (!exceeded_len) { + if !exceeded_len { ret |= predicate(self.storage[i]); } } ret } -} \ No newline at end of file +} diff --git a/noir_stdlib/src/collections/map.nr b/noir_stdlib/src/collections/map.nr new file mode 100644 index 00000000000..056299b4238 --- /dev/null +++ b/noir_stdlib/src/collections/map.nr @@ -0,0 +1,456 @@ +use crate::cmp::Eq; +use crate::collections::vec::Vec; +use crate::option::Option; +use crate::default::Default; +use crate::hash::{Hash, Hasher, BuildHasher}; + +// We use load factor α_max = 0.75. +// Upon exceeding it, assert will fail in order to inform the user +// about performance degradation, so that he can adjust the capacity. +global MAX_LOAD_FACTOR_NUMERATOR = 3; +global MAX_LOAD_FACTOR_DEN0MINATOR = 4; + +// Hash table with open addressing and quadratic probing. +// Size of the underlying table must be known at compile time. +// It is advised to select capacity N as a power of two, or a prime number +// because utilized probing scheme is best tailored for it. +struct HashMap { + _table: [Slot; N], + + // Amount of valid elements in the map. + _len: u64, + + _build_hasher: B +} + +// Data unit in the HashMap table. +// In case Noir adds support for enums in the future, this +// should be refactored to have three states: +// 1. (key, value) +// 2. (empty) +// 3. (deleted) +struct Slot { + _key_value: Option<(K, V)>, + _is_deleted: bool, +} + +impl Default for Slot{ + fn default() -> Self{ + Slot{ + _key_value: Option::none(), + _is_deleted: false + } + } +} + +impl Slot { + fn is_valid(self) -> bool { + !self._is_deleted & self._key_value.is_some() + } + + fn is_available(self) -> bool { + self._is_deleted | self._key_value.is_none() + } + + fn key_value(self) -> Option<(K, V)> { + self._key_value + } + + fn key_value_unchecked(self) -> (K, V) { + self._key_value.unwrap_unchecked() + } + + fn set(&mut self, key: K, value: V) { + self._key_value = Option::some((key, value)); + self._is_deleted = false; + } + + // Shall not override `_key_value` with Option::none(), + // because we must be able to differentiate empty + // and deleted slots for lookup. + fn mark_deleted(&mut self) { + self._is_deleted = true; + } +} + +// While conducting lookup, we iterate attempt from 0 to N - 1 due to heuristic, +// that if we have went that far without finding desired, +// it is very unlikely to be after - performance will be heavily degraded. +impl HashMap { + // Creates a new instance of HashMap with specified BuildHasher. + pub fn with_hasher(_build_hasher: B) -> Self + where + B: BuildHasher { + let _table = [Slot::default(); N]; + let _len = 0; + Self { _table, _len, _build_hasher } + } + + // Clears the map, removing all key-value entries. + pub fn clear(&mut self) { + self._table = [Slot::default(); N]; + self._len = 0; + } + + // Returns true if the map contains a value for the specified key. + pub fn contains_key( + self, + key: K + ) -> bool + where + K: Hash + Eq, + B: BuildHasher, + H: Hasher { + self.get(key).is_some() + } + + // Returns true if the map contains no elements. + pub fn is_empty(self) -> bool { + self._len == 0 + } + + // Get the Option<(K, V) array of valid entries + // with a length of map capacity. First len() elements + // are safe to unwrap_unchecked(), whilst remaining + // are guaranteed to be Option::none(). + // + // This design is reasoned by compile-time limitations and + // temporary nested slices ban. + pub fn entries(self) -> [Option<(K, V)>; N] { + let mut entries = [Option::none(); N]; + let mut valid_amount = 0; + + for slot in self._table { + if slot.is_valid() { + entries[valid_amount] = slot.key_value(); + valid_amount += 1; + } + } + + let msg = f"Amount of valid elements should have been {self._len} times, but got {valid_amount}."; + assert(valid_amount == self._len, msg); + + entries + } + + // Get the Option array of valid keys + // with a length of map capacity. First len() elements + // are safe to unwrap_unchecked(), whilst remaining + // are guaranteed to be Option::none(). + // + // This design is reasoned by compile-time limitations and + // temporary nested slices ban. + pub fn keys(self) -> [Option; N] { + let mut keys = [Option::none(); N]; + let mut valid_amount = 0; + + for slot in self._table { + if slot.is_valid() { + let (key, _) = slot.key_value_unchecked(); + keys[valid_amount] = Option::some(key); + valid_amount += 1; + } + } + + let msg = f"Amount of valid elements should have been {self._len} times, but got {valid_amount}."; + assert(valid_amount == self._len, msg); + + keys + } + + // Get the Option array of valid values + // with a length of map capacity. First len() elements + // are safe to unwrap_unchecked(), whilst remaining + // are guaranteed to be Option::none(). + // + // This design is reasoned by compile-time limitations and + // temporary nested slices ban. + pub fn values(self) -> [Option; N] { + let mut values = [Option::none(); N]; + let mut valid_amount = 0; + + for slot in self._table { + if slot.is_valid() { + let (_, value) = slot.key_value_unchecked(); + values[valid_amount] = Option::some(value); + valid_amount += 1; + } + } + + let msg = f"Amount of valid elements should have been {self._len} times, but got {valid_amount}."; + assert(valid_amount == self._len, msg); + + values + } + + // For each key-value entry applies mutator function. + pub fn iter_mut( + &mut self, + f: fn(K, V) -> (K, V) + ) + where + K: Eq + Hash, + B: BuildHasher, + H: Hasher { + let mut entries = self.entries(); + let mut new_map = HashMap::with_hasher(self._build_hasher); + + for i in 0..N { + if i < self._len { + let entry = entries[i].unwrap_unchecked(); + let (key, value) = f(entry.0, entry.1); + new_map.insert(key, value); + } + } + + self._table = new_map._table; + } + + // For each key applies mutator function. + pub fn iter_keys_mut( + &mut self, + f: fn(K) -> K + ) + where + K: Eq + Hash, + B: BuildHasher, + H: Hasher { + let mut entries = self.entries(); + let mut new_map = HashMap::with_hasher(self._build_hasher); + + for i in 0..N { + if i < self._len { + let entry = entries[i].unwrap_unchecked(); + let (key, value) = (f(entry.0), entry.1); + new_map.insert(key, value); + } + } + + self._table = new_map._table; + } + + // For each value applies mutator function. + pub fn iter_values_mut(&mut self, f: fn(V) -> V) { + for i in 0..N { + let mut slot = self._table[i]; + if slot.is_valid() { + let (key, value) = slot.key_value_unchecked(); + slot.set(key, f(value)); + self._table[i] = slot; + } + } + } + + // Retains only the elements specified by the predicate. + pub fn retain(&mut self, f: fn(K, V) -> bool) { + for index in 0..N { + let mut slot = self._table[index]; + if slot.is_valid() { + let (key, value) = slot.key_value_unchecked(); + if !f(key, value) { + slot.mark_deleted(); + self._len -= 1; + self._table[index] = slot; + } + } + } + } + + // Amount of active key-value entries. + pub fn len(self) -> u64 { + self._len + } + + // Get the compile-time map capacity. + pub fn capacity(_self: Self) -> u64 { + N + } + + // Get the value by key. If it does not exist, returns none(). + pub fn get( + self, + key: K + ) -> Option + where + K: Eq + Hash, + B: BuildHasher, + H: Hasher { + let mut result = Option::none(); + + let hash = self.hash(key); + let mut break = false; + + for attempt in 0..N { + if !break { + let index = self.quadratic_probe(hash, attempt as u64); + let slot = self._table[index]; + + // Not marked as deleted and has key-value. + if slot.is_valid() { + let (current_key, value) = slot.key_value_unchecked(); + if current_key == key { + result = Option::some(value); + break = true; + } + } + } + } + + result + } + + // Insert key-value entry. In case key was already present, value is overridden. + pub fn insert( + &mut self, + key: K, + value: V + ) + where + K: Eq + Hash, + B: BuildHasher, + H: Hasher { + self.assert_load_factor(); + + let hash = self.hash(key); + let mut break = false; + + for attempt in 0..N { + if !break { + let index = self.quadratic_probe(hash, attempt as u64); + let mut slot = self._table[index]; + let mut insert = false; + + // Either marked as deleted or has unset key-value. + if slot.is_available() { + insert = true; + self._len += 1; + } else { + let (current_key, _) = slot.key_value_unchecked(); + if current_key == key { + insert = true; + } + } + + if insert { + slot.set(key, value); + self._table[index] = slot; + break = true; + } + } + } + } + + // Remove key-value entry. If key is not present, HashMap remains unchanged. + pub fn remove( + &mut self, + key: K + ) + where + K: Eq + Hash, + B: BuildHasher, + H: Hasher { + let hash = self.hash(key); + let mut break = false; + + for attempt in 0..N { + if !break { + let index = self.quadratic_probe(hash, attempt as u64); + let mut slot = self._table[index]; + + // Not marked as deleted and has key-value. + if slot.is_valid() { + let (current_key, _) = slot.key_value_unchecked(); + if current_key == key { + slot.mark_deleted(); + self._table[index] = slot; + self._len -= 1; + break = true; + } + } + } + } + } + + // Apply HashMap's hasher onto key to obtain pre-hash for probing. + fn hash( + self, + key: K + ) -> u64 + where + K: Hash, + B: BuildHasher, + H: Hasher { + let mut hasher = self._build_hasher.build_hasher(); + key.hash(&mut hasher); + hasher.finish() as u64 + } + + // Probing scheme: quadratic function. + // We use 0.5 constant near variadic attempt and attempt^2 monomials. + // This ensures good uniformity of distribution for table sizes + // equal to prime numbers or powers of two. + fn quadratic_probe(_self: Self, hash: u64, attempt: u64) -> u64 { + (hash + (attempt + attempt * attempt) / 2) % N + } + + // Amount of elements in the table in relation to available slots exceeds α_max. + // To avoid a comparatively more expensive division operation + // we conduct cross-multiplication instead. + // n / m >= MAX_LOAD_FACTOR_NUMERATOR / MAX_LOAD_FACTOR_DEN0MINATOR + // n * MAX_LOAD_FACTOR_DEN0MINATOR >= m * MAX_LOAD_FACTOR_NUMERATOR + fn assert_load_factor(self) { + let lhs = self._len * MAX_LOAD_FACTOR_DEN0MINATOR; + let rhs = self._table.len() * MAX_LOAD_FACTOR_NUMERATOR; + let exceeded = lhs >= rhs; + assert(!exceeded, "Load factor is exceeded, consider increasing the capacity."); + } +} + +// Equality class on HashMap has to test that they have +// equal sets of key-value entries, +// thus one is a subset of the other and vice versa. +impl Eq for HashMap +where + K: Eq + Hash, + V: Eq, + B: BuildHasher, + H: Hasher +{ + fn eq(self, other: HashMap) -> bool{ + let mut equal = false; + + if self.len() == other.len(){ + equal = true; + for slot in self._table{ + // Not marked as deleted and has key-value. + if equal & slot.is_valid(){ + let (key, value) = slot.key_value_unchecked(); + let other_value = other.get(key); + + if other_value.is_none(){ + equal = false; + }else{ + let other_value = other_value.unwrap_unchecked(); + if value != other_value{ + equal = false; + } + } + } + } + } + + equal + } +} + +impl Default for HashMap +where + B: BuildHasher + Default, + H: Hasher + Default +{ + fn default() -> Self{ + let _build_hasher = B::default(); + let map: HashMap = HashMap::with_hasher(_build_hasher); + map + } +} diff --git a/noir_stdlib/src/collections/vec.nr b/noir_stdlib/src/collections/vec.nr index 43d68e1d1e7..deec98185ff 100644 --- a/noir_stdlib/src/collections/vec.nr +++ b/noir_stdlib/src/collections/vec.nr @@ -17,14 +17,14 @@ impl Vec { /// Get an element from the vector at the given index. /// Panics if the given index /// points beyond the end of the vector. - pub fn get(self, index: Field) -> T { + pub fn get(self, index: u64) -> T { self.slice[index] - } + } /// Push a new element to the end of the vector, returning a /// new vector with a length one greater than the /// original unmodified vector. - pub fn push(&mut self, elem: T) { + pub fn push(&mut self, elem: T) { self.slice = self.slice.push_back(elem); } @@ -32,7 +32,7 @@ impl Vec { /// a new vector with a length of one less than the given vector, /// as well as the popped element. /// Panics if the given vector's length is zero. - pub fn pop(&mut self) -> T { + pub fn pop(&mut self) -> T { let (popped_slice, last_elem) = self.slice.pop_back(); self.slice = popped_slice; last_elem @@ -40,20 +40,20 @@ impl Vec { /// Insert an element at a specified index, shifting all elements /// after it to the right - pub fn insert(&mut self, index: Field, elem: T) { + pub fn insert(&mut self, index: u64, elem: T) { self.slice = self.slice.insert(index, elem); - } + } /// Remove an element at a specified index, shifting all elements /// after it to the left, returning the removed element - pub fn remove(&mut self, index: Field) -> T { + pub fn remove(&mut self, index: u64) -> T { let (new_slice, elem) = self.slice.remove(index); self.slice = new_slice; elem } /// Returns the number of elements in the vector - pub fn len(self) -> Field { + pub fn len(self) -> u64 { self.slice.len() } } diff --git a/noir_stdlib/src/convert.nr b/noir_stdlib/src/convert.nr index 814f63f1cde..00ac0a0fd8c 100644 --- a/noir_stdlib/src/convert.nr +++ b/noir_stdlib/src/convert.nr @@ -24,37 +24,28 @@ impl Into for U where T: From { // docs:start:from-impls // Unsigned integers -impl From for u16 { fn from(value: u8) -> u16 { value as u16 } } impl From for u32 { fn from(value: u8) -> u32 { value as u32 } } -impl From for u32 { fn from(value: u16) -> u32 { value as u32 } } impl From for u64 { fn from(value: u8) -> u64 { value as u64 } } -impl From for u64 { fn from(value: u16) -> u64 { value as u64 } } impl From for u64 { fn from(value: u32) -> u64 { value as u64 } } impl From for Field { fn from(value: u8) -> Field { value as Field } } -impl From for Field { fn from(value: u16) -> Field { value as Field } } impl From for Field { fn from(value: u32) -> Field { value as Field } } impl From for Field { fn from(value: u64) -> Field { value as Field } } // Signed integers -impl From for i16 { fn from(value: i8) -> i16 { value as i16 } } impl From for i32 { fn from(value: i8) -> i32 { value as i32 } } -impl From for i32 { fn from(value: i16) -> i32 { value as i32 } } impl From for i64 { fn from(value: i8) -> i64 { value as i64 } } -impl From for i64 { fn from(value: i16) -> i64 { value as i64 } } impl From for i64 { fn from(value: i32) -> i64 { value as i64 } } // Booleans impl From for u8 { fn from(value: bool) -> u8 { value as u8 } } -impl From for u16 { fn from(value: bool) -> u16 { value as u16 } } impl From for u32 { fn from(value: bool) -> u32 { value as u32 } } impl From for u64 { fn from(value: bool) -> u64 { value as u64 } } impl From for i8 { fn from(value: bool) -> i8 { value as i8 } } -impl From for i16 { fn from(value: bool) -> i16 { value as i16 } } impl From for i32 { fn from(value: bool) -> i32 { value as i32 } } impl From for i64 { fn from(value: bool) -> i64 { value as i64 } } impl From for Field { fn from(value: bool) -> Field { value as Field } } diff --git a/noir_stdlib/src/default.nr b/noir_stdlib/src/default.nr index ba6412a834f..32c4f3f3b48 100644 --- a/noir_stdlib/src/default.nr +++ b/noir_stdlib/src/default.nr @@ -7,12 +7,10 @@ trait Default { impl Default for Field { fn default() -> Field { 0 } } impl Default for u8 { fn default() -> u8 { 0 } } -impl Default for u16 { fn default() -> u16 { 0 } } impl Default for u32 { fn default() -> u32 { 0 } } impl Default for u64 { fn default() -> u64 { 0 } } impl Default for i8 { fn default() -> i8 { 0 } } -impl Default for i16 { fn default() -> i16 { 0 } } impl Default for i32 { fn default() -> i32 { 0 } } impl Default for i64 { fn default() -> i64 { 0 } } diff --git a/noir_stdlib/src/ec/montcurve.nr b/noir_stdlib/src/ec/montcurve.nr index 83a17bae322..7dc756781c0 100644 --- a/noir_stdlib/src/ec/montcurve.nr +++ b/noir_stdlib/src/ec/montcurve.nr @@ -31,7 +31,7 @@ mod affine { impl Point { // Point constructor pub fn new(x: Field, y: Field) -> Self { - Self {x, y, infty: false} + Self { x, y, infty: false } } // Check if zero @@ -45,30 +45,30 @@ mod affine { curvegroup::Point::zero() } else { let (x,y) = (self.x, self.y); - curvegroup::Point::new(x,y,1) + curvegroup::Point::new(x, y, 1) } } // Additive identity pub fn zero() -> Self { - Self {x: 0, y: 0, infty: true} + Self { x: 0, y: 0, infty: true } } // Negation fn negate(self) -> Self { let Self {x, y, infty} = self; - Self {x, y: 0-y, infty} + Self { x, y: 0 - y, infty } } // Map into equivalent Twisted Edwards curve fn into_tecurve(self) -> TEPoint { let Self {x, y, infty} = self; - - if infty | (y*(x+1) == 0) { + + if infty | (y * (x + 1) == 0) { TEPoint::zero() } else { - TEPoint::new(x/y, (x-1)/(x+1)) + TEPoint::new(x / y, (x - 1) / (x + 1)) } } } @@ -84,9 +84,9 @@ mod affine { pub fn new(j: Field, k: Field, gen: Point) -> Self { // Check curve coefficients assert(k != 0); - assert(j*j != 4); + assert(j * j != 4); - let curve = Self {j, k, gen}; + let curve = Self { j, k, gen }; // gen should be on the curve assert(curve.contains(curve.gen)); @@ -103,8 +103,8 @@ mod affine { pub fn contains(self, p: Point) -> bool { let Self {j, k, gen: _gen} = self; let Point {x, y, infty: infty} = p; - - infty | (k*y*y == x*(x*x + j*x + 1)) + + infty | (k * y * y == x * (x * x + j * x + 1)) } // Point addition @@ -122,7 +122,7 @@ mod affine { fn mul(self, n: Field, p: Point) -> Point { self.into_tecurve().mul(n, p.into_tecurve()).into_montcurve() } - + // Multi-scalar multiplication (n[0]*p[0] + ... + n[N]*p[N], where * denotes scalar multiplication) fn msm(self, n: [Field; N], p: [Point; N]) -> Point { let mut out = Point::zero(); @@ -142,15 +142,15 @@ mod affine { // Conversion to equivalent Twisted Edwards curve fn into_tecurve(self) -> TECurve { let Self {j, k, gen} = self; - TECurve::new((j+2)/k, (j-2)/k, gen.into_tecurve()) + TECurve::new((j + 2) / k, (j - 2) / k, gen.into_tecurve()) } // Conversion to equivalent Short Weierstraß curve pub fn into_swcurve(self) -> SWCurve { let j = self.j; let k = self.k; - let a0 = (3-j*j)/(3*k*k); - let b0 = (2*j*j*j - 9*j)/(27*k*k*k); + let a0 = (3 - j * j) / (3 * k * k); + let b0 = (2 * j * j * j - 9 * j) / (27 * k * k * k); SWCurve::new(a0, b0, self.map_into_swcurve(self.gen)) } @@ -160,8 +160,7 @@ mod affine { if p.is_zero() { SWPoint::zero() } else { - SWPoint::new((3*p.x + self.j)/(3*self.k), - p.y/self.k) + SWPoint::new((3 * p.x + self.j) / (3 * self.k), p.y / self.k) } } @@ -170,8 +169,8 @@ mod affine { let SWPoint {x, y, infty} = p; let j = self.j; let k = self.k; - - Point {x: (3*k*x - j)/3, y: y*k, infty} + + Point { x: (3 * k * x - j) / 3, y: y * k, infty } } // Elligator 2 map-to-curve method; see . @@ -179,18 +178,18 @@ mod affine { let j = self.j; let k = self.k; let z = ZETA; // Non-square Field element required for map - + // Check whether curve is admissible assert(j != 0); - let l = (j*j - 4)/(k*k); + let l = (j * j - 4) / (k * k); assert(l != 0); assert(is_square(l) == false); - let x1 = safe_inverse(1+z*u*u)*(0 - (j/k)); - - let gx1 = x1*x1*x1 + (j/k)*x1*x1 + x1/(k*k); - let x2 = 0 - x1 - (j/k); - let gx2 = x2*x2*x2 + (j/k)*x2*x2 + x2/(k*k); + let x1 = safe_inverse(1 + z * u * u) * (0 - (j / k)); + + let gx1 = x1 * x1 * x1 + (j / k) * x1 * x1 + x1 / (k * k); + let x2 = 0 - x1 - (j / k); + let gx2 = x2 * x2 * x2 + (j / k) * x2 * x2 + x2 / (k * k); let x = if is_square(gx1) { x1 } else { x2 }; @@ -202,13 +201,12 @@ mod affine { if y0.sgn0() == 0 { y0 } else { 0 - y0 } }; - Point::new(x*k, y*k) - + Point::new(x * k, y * k) } // SWU map-to-curve method (via rational map) fn swu_map(self, z: Field, u: Field) -> Point { - self.map_from_swcurve(self.into_swcurve().swu_map(z,u)) + self.map_from_swcurve(self.into_swcurve().swu_map(z, u)) } } } @@ -240,7 +238,7 @@ mod curvegroup { impl Point { // Point constructor pub fn new(x: Field, y: Field, z: Field) -> Self { - Self {x, y, z} + Self { x, y, z } } // Check if zero @@ -254,20 +252,20 @@ mod curvegroup { affine::Point::zero() } else { let (x,y,z) = (self.x, self.y, self.z); - affine::Point::new(x/z, y/z) + affine::Point::new(x / z, y / z) } } // Additive identity pub fn zero() -> Self { - Self {x: 0, y: 1,z: 0} + Self { x: 0, y: 1, z: 0 } } // Negation fn negate(self) -> Self { let Self {x, y, z} = self; - Point::new(x, 0-y, z) + Point::new(x, 0 - y, z) } // Map into equivalent Twisted Edwards curve @@ -287,9 +285,9 @@ mod curvegroup { pub fn new(j: Field, k: Field, gen: Point) -> Self { // Check curve coefficients assert(k != 0); - assert(j*j != 4); + assert(j * j != 4); - let curve = Self {j, k, gen}; + let curve = Self { j, k, gen }; // gen should be on the curve assert(curve.contains(curve.gen)); @@ -306,8 +304,8 @@ mod curvegroup { pub fn contains(self, p: Point) -> bool { let Self {j, k, gen: _gen} = self; let Point {x, y, z} = p; - - k*y*y*z == x*(x*x + j*x*z + z*z) + + k * y * y * z == x * (x * x + j * x * z + z * z) } // Point addition @@ -320,12 +318,12 @@ mod curvegroup { fn bit_mul(self, bits: [u1; N], p: Point) -> Point { self.into_tecurve().bit_mul(bits, p.into_tecurve()).into_montcurve() } - + // Scalar multiplication (p + ... + p n times) pub fn mul(self, n: Field, p: Point) -> Point { self.into_tecurve().mul(n, p.into_tecurve()).into_montcurve() } - + // Multi-scalar multiplication (n[0]*p[0] + ... + n[N]*p[N], where * denotes scalar multiplication) fn msm(self, n: [Field; N], p: [Point; N]) -> Point { let mut out = Point::zero(); @@ -345,18 +343,17 @@ mod curvegroup { // Conversion to equivalent Twisted Edwards curve fn into_tecurve(self) -> TECurve { let Self {j, k, gen} = self; - TECurve::new((j+2)/k, (j-2)/k, gen.into_tecurve()) + TECurve::new((j + 2) / k, (j - 2) / k, gen.into_tecurve()) } // Conversion to equivalent Short Weierstraß curve fn into_swcurve(self) -> SWCurve { let j = self.j; let k = self.k; - let a0 = (3-j*j)/(3*k*k); - let b0 = (2*j*j*j - 9*j)/(27*k*k*k); + let a0 = (3 - j * j) / (3 * k * k); + let b0 = (2 * j * j * j - 9 * j) / (27 * k * k * k); - SWCurve::new(a0, b0, - self.map_into_swcurve(self.gen)) + SWCurve::new(a0, b0, self.map_into_swcurve(self.gen)) } // Point mapping into equivalent Short Weierstraß curve @@ -373,10 +370,10 @@ mod curvegroup { fn elligator2_map(self, u: Field) -> Point { self.into_affine().elligator2_map(u).into_group() } - + // SWU map-to-curve method (via rational map) fn swu_map(self, z: Field, u: Field) -> Point { - self.into_affine().swu_map(z,u).into_group() + self.into_affine().swu_map(z, u).into_group() } } } diff --git a/noir_stdlib/src/ec/swcurve.nr b/noir_stdlib/src/ec/swcurve.nr index e64f5a7be02..9dd324f3085 100644 --- a/noir_stdlib/src/ec/swcurve.nr +++ b/noir_stdlib/src/ec/swcurve.nr @@ -27,14 +27,14 @@ mod affine { impl Point { // Point constructor pub fn new(x: Field, y: Field) -> Self { - Self {x, y, infty: false} + Self { x, y, infty: false } } // Check if zero pub fn is_zero(self) -> bool { self.eq(Point::zero()) } - + // Conversion to CurveGroup coordinates fn into_group(self) -> curvegroup::Point { let Self {x, y, infty} = self; @@ -45,16 +45,16 @@ mod affine { curvegroup::Point::new(x, y, 1) } } - + // Additive identity pub fn zero() -> Self { - Self {x: 0, y: 0, infty: true} + Self { x: 0, y: 0, infty: true } } - + // Negation fn negate(self) -> Self { let Self {x, y, infty} = self; - Self {x, y: 0-y, infty} + Self { x, y: 0 - y, infty } } } @@ -72,8 +72,8 @@ mod affine { // Curve constructor pub fn new(a: Field, b: Field, gen: Point) -> Curve { // Check curve coefficients - assert(4*a*a*a + 27*b*b != 0); - + assert(4 * a * a * a + 27 * b * b != 0); + let curve = Curve { a, b, gen }; // gen should be on the curve @@ -85,16 +85,16 @@ mod affine { // Conversion to CurveGroup coordinates fn into_group(self) -> curvegroup::Curve { let Curve{a, b, gen} = self; - - curvegroup::Curve {a, b, gen: gen.into_group()} + + curvegroup::Curve { a, b, gen: gen.into_group() } } // Membership check pub fn contains(self, p: Point) -> bool { let Point {x, y, infty} = p; - infty | (y*y == x*x*x + self.a*x + self.b) + infty | (y * y == x * x * x + self.a * x + self.b) } - + // Point addition, implemented in terms of mixed addition for reasons of efficiency pub fn add(self, p1: Point, p2: Point) -> Point { self.mixed_add(p1, p2.into_group()).into_affine() @@ -109,9 +109,9 @@ mod affine { } else { let Point {x: x1, y: y1, infty: _inf} = p1; let curvegroup::Point {x: x2, y: y2, z: z2} = p2; - let you1 = x1*z2*z2; + let you1 = x1 * z2 * z2; let you2 = x2; - let s1 = y1*z2*z2*z2; + let s1 = y1 * z2 * z2 * z2; let s2 = y2; if you1 == you2 { @@ -120,15 +120,14 @@ mod affine { } else { self.into_group().double(p2) } - } else - { + } else { let h = you2 - you1; let r = s2 - s1; - let x3 = r*r - h*h*h - 2*you1*h*h; - let y3 = r*(you1*h*h - x3) - s1*h*h*h; - let z3 = h*z2; + let x3 = r * r - h * h * h - 2 * you1 * h * h; + let y3 = r * (you1 * h * h - x3) - s1 * h * h * h; + let z3 = h * z2; - curvegroup::Point::new(x3,y3,z3) + curvegroup::Point::new(x3, y3, z3) } } } @@ -138,7 +137,7 @@ mod affine { fn bit_mul(self, bits: [u1; N], p: Point) -> Point { self.into_group().bit_mul(bits, p.into_group()).into_affine() } - + // Scalar multiplication (p + ... + p n times) pub fn mul(self, n: Field, p: Point) -> Point { self.into_group().mul(n, p.into_group()).into_affine() @@ -165,17 +164,25 @@ mod affine { // where g(x) = x^3 + a*x + b. swu_map(c,z,.) then maps a Field element to a point on curve c. fn swu_map(self, z: Field, u: Field) -> Point { // Check whether curve is admissible - assert(self.a*self.b != 0); - + assert(self.a * self.b != 0); + let Curve {a, b, gen: _gen} = self; - - let tv1 = safe_inverse(z*z*u*u*u*u + u*u*z); - let x1 = if tv1 == 0 {b/(z*a)} else {(0-b/a)*(1 + tv1)}; - let gx1 = x1*x1*x1 + a*x1 + b; - let x2 = z*u*u*x1; - let gx2 = x2*x2*x2 + a*x2 + b; - let (x,y) = if is_square(gx1) {(x1, sqrt(gx1))} else {(x2, sqrt(gx2))}; - Point::new(x, if u.sgn0() != y.sgn0() {0-y} else {y}) + + let tv1 = safe_inverse(z * z * u * u * u * u + u * u * z); + let x1 = if tv1 == 0 { + b / (z * a) + } else { + (0 - b / a) * (1 + tv1) + }; + let gx1 = x1 * x1 * x1 + a * x1 + b; + let x2 = z * u * u * x1; + let gx2 = x2 * x2 * x2 + a * x2 + b; + let (x,y) = if is_square(gx1) { + (x1, sqrt(gx1)) + } else { + (x2, sqrt(gx2)) + }; + Point::new(x, if u.sgn0() != y.sgn0() { 0 - y } else { y }) } } } @@ -205,14 +212,14 @@ mod curvegroup { impl Point { // Point constructor pub fn new(x: Field, y: Field, z: Field) -> Self { - Self {x, y, z} + Self { x, y, z } } // Check if zero pub fn is_zero(self) -> bool { self.eq(Point::zero()) } - + // Conversion to affine coordinates pub fn into_affine(self) -> affine::Point { let Self {x, y, z} = self; @@ -220,20 +227,19 @@ mod curvegroup { if z == 0 { affine::Point::zero() } else { - affine::Point::new(x/(z*z), y/(z*z*z)) + affine::Point::new(x / (z * z), y / (z * z * z)) } } // Additive identity pub fn zero() -> Self { - Self {x: 0, y: 0, z: 0} + Self { x: 0, y: 0, z: 0 } } - - + // Negation fn negate(self) -> Self { let Self {x, y, z} = self; - Self {x, y: 0-y, z} + Self { x, y: 0 - y, z } } } @@ -250,8 +256,8 @@ mod curvegroup { // Curve constructor pub fn new(a: Field, b: Field, gen: Point) -> Curve { // Check curve coefficients - assert(4*a*a*a + 27*b*b != 0); - + assert(4 * a * a * a + 27 * b * b != 0); + let curve = Curve { a, b, gen }; // gen should be on the curve @@ -264,7 +270,7 @@ mod curvegroup { pub fn into_affine(self) -> affine::Curve { let Curve{a, b, gen} = self; - affine::Curve {a, b, gen: gen.into_affine()} + affine::Curve { a, b, gen: gen.into_affine() } } // Membership check @@ -273,13 +279,12 @@ mod curvegroup { if z == 0 { true } else { - y*y == x*x*x + self.a*x*z*z*z*z + self.b*z*z*z*z*z*z + y * y == x * x * x + self.a * x * z * z * z * z + self.b * z * z * z * z * z * z } } - + // Addition pub fn add(self, p1: Point, p2: Point) -> Point { - if p1.is_zero() { p2 } else if p2.is_zero() { @@ -287,10 +292,10 @@ mod curvegroup { } else { let Point {x: x1, y: y1, z: z1} = p1; let Point {x: x2, y: y2, z: z2} = p2; - let you1 = x1*z2*z2; - let you2 = x2*z1*z1; - let s1 = y1*z2*z2*z2; - let s2 = y2*z1*z1*z1; + let you1 = x1 * z2 * z2; + let you2 = x2 * z1 * z1; + let s1 = y1 * z2 * z2 * z2; + let s2 = y2 * z1 * z1 * z1; if you1 == you2 { if s1 != s2 { @@ -301,11 +306,11 @@ mod curvegroup { } else { let h = you2 - you1; let r = s2 - s1; - let x3 = r*r - h*h*h - 2*you1*h*h; - let y3 = r*(you1*h*h - x3) - s1*h*h*h; - let z3 = h*z1*z2; + let x3 = r * r - h * h * h - 2 * you1 * h * h; + let y3 = r * (you1 * h * h - x3) - s1 * h * h * h; + let z3 = h * z1 * z2; - Point::new(x3,y3,z3) + Point::new(x3, y3, z3) } } } @@ -313,19 +318,19 @@ mod curvegroup { // Point doubling pub fn double(self, p: Point) -> Point { let Point {x, y, z} = p; - + if p.is_zero() { p } else if y == 0 { Point::zero() } else { - let s = 4*x*y*y; - let m = 3*x*x + self.a*z*z*z*z; - let x0 = m*m - 2*s; - let y0 = m*(s-x0) - 8*y*y*y*y; - let z0 = 2*y*z; + let s = 4 * x * y * y; + let m = 3 * x * x + self.a * z * z * z * z; + let x0 = m * m - 2 * s; + let y0 = m * (s - x0) - 8 * y * y * y * y; + let z0 = 2 * y * z; - Point::new(x0,y0,z0) + Point::new(x0, y0, z0) } } @@ -351,7 +356,7 @@ mod curvegroup { let mut n_as_bits: [u1; 254] = [0; 254]; let tmp = n.to_le_bits(N_BITS as u32); for i in 0..254 { - n_as_bits[i] = tmp[i]; + n_as_bits[i] = tmp[i]; } self.bit_mul(n_as_bits, p) @@ -375,7 +380,7 @@ mod curvegroup { // Simplified SWU map-to-curve method fn swu_map(self, z: Field, u: Field) -> Point { - self.into_affine().swu_map(z,u).into_group() + self.into_affine().swu_map(z, u).into_group() } } } diff --git a/noir_stdlib/src/ec/tecurve.nr b/noir_stdlib/src/ec/tecurve.nr index 5333ece4c4a..506fe89313a 100644 --- a/noir_stdlib/src/ec/tecurve.nr +++ b/noir_stdlib/src/ec/tecurve.nr @@ -40,18 +40,18 @@ mod affine { fn into_group(self) -> curvegroup::Point { let Self {x, y} = self; - curvegroup::Point::new(x, y, x*y, 1) + curvegroup::Point::new(x, y, x * y, 1) } // Additive identity pub fn zero() -> Self { - Point::new(0,1) + Point::new(0, 1) } // Negation fn negate(self) -> Self { let Self {x, y} = self; - Point::new(0-x, y) + Point::new(0 - x, y) } // Map into prime-order subgroup of equivalent Montgomery curve @@ -60,10 +60,10 @@ mod affine { MPoint::zero() } else { let Self {x, y} = self; - let x0 = (1+y)/(1-y); - let y0 = (1+y)/(x*(1-y)); + let x0 = (1 + y) / (1 - y); + let y0 = (1 + y) / (x * (1 - y)); - MPoint::new(x0,y0) + MPoint::new(x0, y0) } } } @@ -81,9 +81,9 @@ mod affine { // Curve constructor pub fn new(a: Field, d: Field, gen: Point) -> Curve { // Check curve coefficients - assert(a*d*(a-d) != 0); - - let curve = Curve {a, d, gen}; + assert(a * d * (a - d) != 0); + + let curve = Curve { a, d, gen }; // gen should be on the curve assert(curve.contains(curve.gen)); @@ -95,15 +95,15 @@ mod affine { fn into_group(self) -> curvegroup::Curve { let Curve{a, d, gen} = self; - curvegroup::Curve {a, d, gen: gen.into_group()} + curvegroup::Curve { a, d, gen: gen.into_group() } } - + // Membership check pub fn contains(self, p: Point) -> bool { let Point {x, y} = p; - self.a*x*x + y*y == 1 + self.d*x*x*y*y + self.a * x * x + y * y == 1 + self.d * x * x * y * y } - + // Point addition, implemented in terms of mixed addition for reasons of efficiency pub fn add(self, p1: Point, p2: Point) -> Point { self.mixed_add(p1, p2.into_group()).into_affine() @@ -114,20 +114,20 @@ mod affine { let Point{x: x1, y: y1} = p1; let curvegroup::Point{x: x2, y: y2, t: t2, z: z2} = p2; - let a = x1*x2; - let b = y1*y2; - let c = self.d*x1*y1*t2; - let e = (x1 + y1)*(x2 + y2) - a - b; + let a = x1 * x2; + let b = y1 * y2; + let c = self.d * x1 * y1 * t2; + let e = (x1 + y1) * (x2 + y2) - a - b; let f = z2 - c; let g = z2 + c; - let h = b - self.a*a; + let h = b - self.a * a; - let x = e*f; - let y = g*h; - let t = e*h; - let z = f*g; + let x = e * f; + let y = g * h; + let t = e * h; + let z = f * g; - curvegroup::Point::new(x,y,t,z) + curvegroup::Point::new(x, y, t, z) } // Scalar multiplication with scalar represented by a bit array (little-endian convention). @@ -135,7 +135,7 @@ mod affine { fn bit_mul(self, bits: [u1; N], p: Point) -> Point { self.into_group().bit_mul(bits, p.into_group()).into_affine() } - + // Scalar multiplication (p + ... + p n times) fn mul(self, n: Field, p: Point) -> Point { self.into_group().mul(n, p.into_group()).into_affine() @@ -159,10 +159,10 @@ mod affine { // Conversion to equivalent Montgomery curve pub fn into_montcurve(self) -> MCurve { - let j = 2*(self.a + self.d)/(self.a - self.d); - let k = 4/(self.a - self.d); + let j = 2 * (self.a + self.d) / (self.a - self.d); + let k = 4 / (self.a - self.d); let gen_montcurve = self.gen.into_montcurve(); - + MCurve::new(j, k, gen_montcurve) } @@ -188,7 +188,7 @@ mod affine { // Simplified SWU map-to-curve method (via rational map) fn swu_map(self, z: Field, u: Field) -> Point { - self.into_montcurve().swu_map(z,u).into_tecurve() + self.into_montcurve().swu_map(z, u).into_tecurve() } } } @@ -222,7 +222,7 @@ mod curvegroup { impl Point { // Point constructor pub fn new(x: Field, y: Field, t: Field, z: Field) -> Self { - Self {x, y, t, z} + Self { x, y, t, z } } // Check if zero @@ -235,19 +235,19 @@ mod curvegroup { pub fn into_affine(self) -> affine::Point { let Self {x, y, t: _t, z} = self; - affine::Point::new(x/z, y/z) + affine::Point::new(x / z, y / z) } // Additive identity pub fn zero() -> Self { - Point::new(0,1,0,1) + Point::new(0, 1, 0, 1) } // Negation fn negate(self) -> Self { let Self {x, y, t, z} = self; - Point::new(0-x, y, 0-t, z) + Point::new(0 - x, y, 0 - t, z) } // Map into prime-order subgroup of equivalent Montgomery curve @@ -269,8 +269,8 @@ mod curvegroup { // Curve constructor pub fn new(a: Field, d: Field, gen: Point) -> Curve { // Check curve coefficients - assert(a*d*(a-d) != 0); - + assert(a * d * (a - d) != 0); + let curve = Curve { a, d, gen }; // gen should be on the curve @@ -283,14 +283,16 @@ mod curvegroup { pub fn into_affine(self) -> affine::Curve { let Curve{a, d, gen} = self; - affine::Curve {a, d, gen: gen.into_affine()} + affine::Curve { a, d, gen: gen.into_affine() } } // Membership check pub fn contains(self, p: Point) -> bool { let Point {x, y, t, z} = p; - (z != 0) & (z*t == x*y) & (z*z*(self.a*x*x + y*y) == z*z*z*z + self.d*x*x*y*y) + (z != 0) + & (z * t == x * y) + & (z * z * (self.a * x * x + y * y) == z * z * z * z + self.d * x * x * y * y) } // Point addition @@ -298,40 +300,40 @@ mod curvegroup { let Point{x: x1, y: y1, t: t1, z: z1} = p1; let Point{x: x2, y: y2, t: t2, z: z2} = p2; - let a = x1*x2; - let b = y1*y2; - let c = self.d*t1*t2; - let d = z1*z2; - let e = (x1 + y1)*(x2 + y2) - a - b; + let a = x1 * x2; + let b = y1 * y2; + let c = self.d * t1 * t2; + let d = z1 * z2; + let e = (x1 + y1) * (x2 + y2) - a - b; let f = d - c; let g = d + c; - let h = b - self.a*a; + let h = b - self.a * a; - let x = e*f; - let y = g*h; - let t = e*h; - let z = f*g; + let x = e * f; + let y = g * h; + let t = e * h; + let z = f * g; - Point::new(x,y,t,z) + Point::new(x, y, t, z) } // Point doubling, cf. §3.3 pub fn double(self, p: Point) -> Point { let Point{x, y, t: _t, z} = p; - let a = x*x; - let b = y*y; - let c = 2*z*z; - let d = self.a*a; - let e = (x + y)*(x + y) - a - b; + let a = x * x; + let b = y * y; + let c = 2 * z * z; + let d = self.a * a; + let e = (x + y) * (x + y) - a - b; let g = d + b; let f = g - c; let h = d - b; - let x0 = e*f; - let y0 = g*h; - let t0 = e*h; - let z0 = f*g; + let x0 = e * f; + let y0 = g * h; + let t0 = e * h; + let z0 = f * g; Point::new(x0, y0, t0, z0) } @@ -340,7 +342,7 @@ mod curvegroup { // If k is the natural number represented by `bits`, then this computes p + ... + p k times. fn bit_mul(self, bits: [u1; N], p: Point) -> Point { let mut out = Point::zero(); - + for i in 0..N { out = self.add( self.add(out, out), @@ -349,7 +351,7 @@ mod curvegroup { out } - + // Scalar multiplication (p + ... + p n times) pub fn mul(self, n: Field, p: Point) -> Point { let N_BITS = crate::field::modulus_num_bits(); @@ -358,7 +360,7 @@ mod curvegroup { let mut n_as_bits: [u1; 254] = [0; 254]; let tmp = n.to_le_bits(N_BITS as u32); for i in 0..254 { - n_as_bits[i] = tmp[i]; + n_as_bits[i] = tmp[i]; } self.bit_mul(n_as_bits, p) @@ -407,7 +409,7 @@ mod curvegroup { // Simplified SWU map-to-curve method (via rational map) fn swu_map(self, z: Field, u: Field) -> Point { - self.into_montcurve().swu_map(z,u).into_tecurve() + self.into_montcurve().swu_map(z, u).into_tecurve() } } } diff --git a/noir_stdlib/src/ecdsa_secp256k1.nr b/noir_stdlib/src/ecdsa_secp256k1.nr index 290ccba27e5..b72a1acd041 100644 --- a/noir_stdlib/src/ecdsa_secp256k1.nr +++ b/noir_stdlib/src/ecdsa_secp256k1.nr @@ -1,10 +1,10 @@ #[foreign(ecdsa_secp256k1)] // docs:start:ecdsa_secp256k1 pub fn verify_signature( - _public_key_x: [u8; 32], - _public_key_y: [u8; 32], - _signature: [u8; 64], - _message_hash: [u8; N] + public_key_x: [u8; 32], + public_key_y: [u8; 32], + signature: [u8; 64], + message_hash: [u8; N] ) -> bool // docs:end:ecdsa_secp256k1 {} diff --git a/noir_stdlib/src/ecdsa_secp256r1.nr b/noir_stdlib/src/ecdsa_secp256r1.nr index 390f8ed39d2..ef92bf24ae4 100644 --- a/noir_stdlib/src/ecdsa_secp256r1.nr +++ b/noir_stdlib/src/ecdsa_secp256r1.nr @@ -1,10 +1,10 @@ #[foreign(ecdsa_secp256r1)] // docs:start:ecdsa_secp256r1 pub fn verify_signature( - _public_key_x: [u8; 32], - _public_key_y: [u8; 32], - _signature: [u8; 64], - _message_hash: [u8; N] + public_key_x: [u8; 32], + public_key_y: [u8; 32], + signature: [u8; 64], + message_hash: [u8; N] ) -> bool // docs:end:ecdsa_secp256r1 {} diff --git a/noir_stdlib/src/field.nr b/noir_stdlib/src/field.nr index fbd76a1e8a2..0f4c2caffdf 100644 --- a/noir_stdlib/src/field.nr +++ b/noir_stdlib/src/field.nr @@ -6,20 +6,20 @@ impl Field { crate::assert_constant(bit_size); self.__to_le_bits(bit_size) } - + pub fn to_be_bits(self: Self, bit_size: u32) -> [u1] { crate::assert_constant(bit_size); self.__to_be_bits(bit_size) } #[builtin(to_le_bits)] - fn __to_le_bits(_self: Self, _bit_size: u32) -> [u1] {} - + fn __to_le_bits(self, _bit_size: u32) -> [u1] {} + #[builtin(to_be_bits)] - fn __to_be_bits(_self: Self, _bit_size: u32) -> [u1] {} + fn __to_be_bits(self, bit_size: u32) -> [u1] {} #[builtin(apply_range_constraint)] - fn __assert_max_bit_size(_self: Self, _bit_size: u32) {} + fn __assert_max_bit_size(self, bit_size: u32) {} pub fn assert_max_bit_size(self: Self, bit_size: u32) { crate::assert_constant(bit_size); @@ -35,7 +35,6 @@ impl Field { self.to_be_radix(256, byte_size) } - pub fn to_le_radix(self: Self, radix: u32, result_len: u32) -> [u8] { crate::assert_constant(radix); crate::assert_constant(result_len); @@ -48,16 +47,13 @@ impl Field { self.__to_be_radix(radix, result_len) } - - // decompose `_self` into a `_result_len` vector over the `_radix` basis // `_radix` must be less than 256 #[builtin(to_le_radix)] - fn __to_le_radix(_self: Self, _radix: u32, _result_len: u32) -> [u8] {} - - #[builtin(to_be_radix)] - fn __to_be_radix(_self: Self, _radix: u32, _result_len: u32) -> [u8] {} + fn __to_le_radix(self, radix: u32, result_len: u32) -> [u8] {} + #[builtin(to_be_radix)] + fn __to_be_radix(self, radix: u32, result_len: u32) -> [u8] {} // Returns self to the power of the given exponent value. // Caution: we assume the exponent fits into 32 bits @@ -85,11 +81,10 @@ impl Field { lt_fallback(self, another) } } - } #[builtin(modulus_num_bits)] -pub fn modulus_num_bits() -> Field {} +pub fn modulus_num_bits() -> u64 {} #[builtin(modulus_be_bits)] pub fn modulus_be_bits() -> [u1] {} diff --git a/noir_stdlib/src/field/bn254.nr b/noir_stdlib/src/field/bn254.nr index f6e23f8db0c..9e1445fd3ba 100644 --- a/noir_stdlib/src/field/bn254.nr +++ b/noir_stdlib/src/field/bn254.nr @@ -1,7 +1,10 @@ +// The low and high decomposition of the field modulus global PLO: Field = 53438638232309528389504892708671455233; global PHI: Field = 64323764613183177041862057485226039389; + global TWO_POW_128: Field = 0x100000000000000000000000000000000; +/// A hint for decomposing a single field into two 16 byte fields. unconstrained fn decompose_unsafe(x: Field) -> (Field, Field) { let x_bytes = x.to_le_bytes(32); @@ -18,14 +21,20 @@ unconstrained fn decompose_unsafe(x: Field) -> (Field, Field) { (low, high) } +/// Decompose a single field into two 16 byte fields. pub fn decompose(x: Field) -> (Field, Field) { + // Take hints of the decomposition let (xlo, xhi) = decompose_unsafe(x); let borrow = lt_unsafe(PLO, xlo, 16); + // Range check the limbs xlo.assert_max_bit_size(128); xhi.assert_max_bit_size(128); + // Check that the decomposition is correct assert_eq(x, xlo + TWO_POW_128 * xhi); + + // Check that (xlo < plo && xhi <= phi) || (xlo >= plo && xhi < phi) let rlo = PLO - xlo + (borrow as Field) * TWO_POW_128; let rhi = PHI - xhi - (borrow as Field); @@ -59,11 +68,13 @@ unconstrained fn lte_unsafe(x: Field, y: Field, num_bytes: u32) -> bool { } pub fn assert_gt(a: Field, b: Field) { + // Decompose a and b let (alo, ahi) = decompose(a); let (blo, bhi) = decompose(b); let borrow = lte_unsafe(alo, blo, 16); + // Assert that (alo > blo && ahi >= bhi) || (alo <= blo && ahi > bhi) let rlo = alo - blo - 1 + (borrow as Field) * TWO_POW_128; let rhi = ahi - bhi - (borrow as Field); diff --git a/noir_stdlib/src/hash.nr b/noir_stdlib/src/hash.nr index 4033e2a5365..fcf21436197 100644 --- a/noir_stdlib/src/hash.nr +++ b/noir_stdlib/src/hash.nr @@ -1,21 +1,25 @@ mod poseidon; mod mimc; +mod poseidon2; +mod pedersen; + +use crate::default::Default; #[foreign(sha256)] // docs:start:sha256 -pub fn sha256(_input: [u8; N]) -> [u8; 32] +pub fn sha256(input: [u8; N]) -> [u8; 32] // docs:end:sha256 {} #[foreign(blake2s)] // docs:start:blake2s -pub fn blake2s(_input: [u8; N]) -> [u8; 32] +pub fn blake2s(input: [u8; N]) -> [u8; 32] // docs:end:blake2s {} #[foreign(blake3)] // docs:start:blake3 -pub fn blake3(_input: [u8; N]) -> [u8; 32] +pub fn blake3(input: [u8; N]) -> [u8; 32] // docs:end:blake3 {} @@ -32,7 +36,7 @@ pub fn pedersen_commitment(input: [Field; N]) -> PedersenPoint } #[foreign(pedersen_commitment)] -pub fn __pedersen_commitment_with_separator(_input: [Field; N], _separator: u32) -> [Field; 2] {} +pub fn __pedersen_commitment_with_separator(input: [Field; N], separator: u32) -> [Field; 2] {} pub fn pedersen_commitment_with_separator(input: [Field; N], separator: u32) -> PedersenPoint { let values = __pedersen_commitment_with_separator(input, separator); @@ -47,13 +51,13 @@ pub fn pedersen_hash(input: [Field; N]) -> Field } #[foreign(pedersen_hash)] -pub fn pedersen_hash_with_separator(_input: [Field; N], _separator: u32) -> Field {} +pub fn pedersen_hash_with_separator(input: [Field; N], separator: u32) -> Field {} -pub fn hash_to_field(_input: [Field; N]) -> Field { +pub fn hash_to_field(input: [Field; N]) -> Field { let mut inputs_as_bytes = []; for i in 0..N { - let input_bytes = _input[i].to_le_bytes(32); + let input_bytes = input[i].to_le_bytes(32); for i in 0..32 { inputs_as_bytes = inputs_as_bytes.push_back(input_bytes[i]); } @@ -65,12 +69,61 @@ pub fn hash_to_field(_input: [Field; N]) -> Field { #[foreign(keccak256)] // docs:start:keccak256 -pub fn keccak256(_input: [u8; N], _message_size: u32) -> [u8; 32] +pub fn keccak256(input: [u8; N], message_size: u32) -> [u8; 32] // docs:end:keccak256 {} #[foreign(poseidon2_permutation)] -pub fn poseidon2_permutation(_input: [u8; N], _state_length: u32) -> [u8; N] {} +pub fn poseidon2_permutation(_input: [Field; N], _state_length: u32) -> [Field; N] {} #[foreign(sha256_compression)] pub fn sha256_compression(_input: [u32; 16], _state: [u32; 8]) -> [u32; 8] {} + +// Generic hashing support. +// Partially ported and impacted by rust. + +// Hash trait shall be implemented per type. +trait Hash{ + fn hash(self, state: &mut H) where H: Hasher; +} + +// Hasher trait shall be implemented by algorithms to provide hash-agnostic means. +// TODO: consider making the types generic here ([u8], [Field], etc.) +trait Hasher{ + fn finish(self) -> Field; + + fn write(&mut self, input: [Field]); +} + +// BuildHasher is a factory trait, responsible for production of specific Hasher. +trait BuildHasher where H: Hasher{ + fn build_hasher(self) -> H; +} + +struct BuildHasherDefault; + +impl BuildHasher for BuildHasherDefault +where + H: Hasher + Default +{ + fn build_hasher(_self: Self) -> H{ + H::default() + } +} + +impl Default for BuildHasherDefault +where + H: Hasher + Default +{ + fn default() -> Self{ + BuildHasherDefault{} + } +} + +// TODO: add implementations for the remainder of primitive types. +impl Hash for Field{ + fn hash(self, state: &mut H) where H: Hasher{ + let input: [Field] = [self]; + H::write(state, input); + } +} diff --git a/noir_stdlib/src/hash/pedersen.nr b/noir_stdlib/src/hash/pedersen.nr new file mode 100644 index 00000000000..ace6851099d --- /dev/null +++ b/noir_stdlib/src/hash/pedersen.nr @@ -0,0 +1,24 @@ +use crate::hash::{Hasher, pedersen_hash}; +use crate::default::Default; + +struct PedersenHasher{ + _state: [Field] +} + +impl Hasher for PedersenHasher { + fn finish(self) -> Field { + pedersen_hash(self._state) + } + + fn write(&mut self, input: [Field]){ + self._state = self._state.append(input); + } +} + +impl Default for PedersenHasher{ + fn default() -> Self{ + PedersenHasher{ + _state: [] + } + } +} diff --git a/noir_stdlib/src/hash/poseidon.nr b/noir_stdlib/src/hash/poseidon.nr index 3f4de73c0db..b1a7c4a2367 100644 --- a/noir_stdlib/src/hash/poseidon.nr +++ b/noir_stdlib/src/hash/poseidon.nr @@ -21,7 +21,7 @@ pub fn config( // Input checks let mul = crate::wrapping_mul(t as u8, (rf + rp)); assert(mul == ark.len() as u8); - assert(t * t == mds.len()); + assert(t * t == mds.len() as Field); assert(alpha != 0); PoseidonConfig { t, rf, rp, alpha, ark, mds } @@ -30,7 +30,7 @@ pub fn config( fn permute(pos_conf: PoseidonConfig, mut state: [Field; O]) -> [Field; O] { let PoseidonConfig {t, rf, rp, alpha, ark, mds} = pos_conf; - assert(t == state.len()); + assert(t == state.len() as Field); let mut count = 0; // for r in 0..rf + rp @@ -47,7 +47,7 @@ fn permute(pos_conf: PoseidonConfig, mut state: [Field; O]) -> [F } state = apply_matrix(mds, state); // Apply MDS matrix - count = count + t; + count = count + t as u64; } state @@ -85,7 +85,7 @@ fn absorb( fn check_security(rate: Field, width: Field, security: Field) -> bool { let n = modulus_num_bits(); - ((n - 1) * (width - rate) / 2) as u8 > security as u8 + ((n - 1) as Field * (width - rate) / 2) as u8 > security as u8 } // A*x where A is an n x n matrix in row-major order and x an n-vector fn apply_matrix(a: [Field; M], x: [Field; N]) -> [Field; N] { diff --git a/noir_stdlib/src/hash/poseidon/bn254.nr b/noir_stdlib/src/hash/poseidon/bn254.nr index 0db6d9546dc..37b08e3c8fb 100644 --- a/noir_stdlib/src/hash/poseidon/bn254.nr +++ b/noir_stdlib/src/hash/poseidon/bn254.nr @@ -9,12 +9,12 @@ use crate::hash::poseidon::apply_matrix; #[field(bn254)] pub fn permute(pos_conf: PoseidonConfig, mut state: [Field; O]) -> [Field; O] { let PoseidonConfig {t, rf: config_rf, rp: config_rp, alpha, ark, mds} = pos_conf; - let rf = 8; - let rp = [56, 57, 56, 60, 60, 63, 64, 63, 60, 66, 60, 65, 70, 60, 64, 68][state.len() - 2]; + let rf: u8 = 8; + let rp: u8 = [56, 57, 56, 60, 60, 63, 64, 63, 60, 66, 60, 65, 70, 60, 64, 68][state.len() - 2]; - assert(t == state.len()); - assert(rf == config_rf as Field); - assert(rp == config_rp as Field); + assert(t == state.len() as Field); + assert(rf == config_rf); + assert(rp == config_rp); let mut count = 0; // First half of full rounds @@ -27,7 +27,7 @@ pub fn permute(pos_conf: PoseidonConfig, mut state: [Field; O]) - } state = apply_matrix(mds, state); // Apply MDS matrix - count = count + t; + count = count + t as u64; } // Partial rounds for _r in 0..rp { @@ -37,7 +37,7 @@ pub fn permute(pos_conf: PoseidonConfig, mut state: [Field; O]) - state[0] = state[0].pow_32(alpha); state = apply_matrix(mds, state); // Apply MDS matrix - count = count + t; + count = count + t as u64; } // Second half of full rounds for _r in 0..rf / 2 { @@ -49,7 +49,7 @@ pub fn permute(pos_conf: PoseidonConfig, mut state: [Field; O]) - } state = apply_matrix(mds, state); // Apply MDS matrix - count = count + t; + count = count + t as u64; } state diff --git a/noir_stdlib/src/hash/poseidon2.nr b/noir_stdlib/src/hash/poseidon2.nr new file mode 100644 index 00000000000..40eea029e82 --- /dev/null +++ b/noir_stdlib/src/hash/poseidon2.nr @@ -0,0 +1,113 @@ +global RATE = 3; + +struct Poseidon2 { + cache: [Field;3], + state: [Field;4], + cache_size: u32, + squeeze_mode: bool, // 0 => absorb, 1 => squeeze +} + +impl Poseidon2 { + + pub fn hash(input: [Field; N], message_size: u32) -> Field { + if message_size == N { + Poseidon2::hash_internal(input, N, false) + } else { + Poseidon2::hash_internal(input, message_size, true) + } + } + + fn new(iv: Field) -> Poseidon2 { + let mut result = Poseidon2 { cache: [0; 3], state: [0; 4], cache_size: 0, squeeze_mode: false }; + result.state[RATE] = iv; + result + } + + fn perform_duplex(&mut self) -> [Field; RATE] { + // zero-pad the cache + for i in 0..RATE { + if i >= self.cache_size { + self.cache[i] = 0; + } + } + // add the cache into sponge state + for i in 0..RATE { + self.state[i] += self.cache[i]; + } + self.state = crate::hash::poseidon2_permutation(self.state, 4); + // return `RATE` number of field elements from the sponge state. + let mut result = [0; RATE]; + for i in 0..RATE { + result[i] = self.state[i]; + } + result + } + + fn absorb(&mut self, input: Field) { + if (!self.squeeze_mode) & (self.cache_size == RATE) { + // If we're absorbing, and the cache is full, apply the sponge permutation to compress the cache + let _ = self.perform_duplex(); + self.cache[0] = input; + self.cache_size = 1; + } else if (!self.squeeze_mode) & (self.cache_size != RATE) { + // If we're absorbing, and the cache is not full, add the input into the cache + self.cache[self.cache_size] = input; + self.cache_size += 1; + } else if self.squeeze_mode { + // If we're in squeeze mode, switch to absorb mode and add the input into the cache. + // N.B. I don't think this code path can be reached?! + self.cache[0] = input; + self.cache_size = 1; + self.squeeze_mode = false; + } + } + + fn squeeze(&mut self) -> Field { + if self.squeeze_mode & (self.cache_size == 0) { + // If we're in squeze mode and the cache is empty, there is nothing left to squeeze out of the sponge! + // Switch to absorb mode. + self.squeeze_mode = false; + self.cache_size = 0; + } + if !self.squeeze_mode { + // If we're in absorb mode, apply sponge permutation to compress the cache, populate cache with compressed + // state and switch to squeeze mode. Note: this code block will execute if the previous `if` condition was + // matched + let new_output_elements = self.perform_duplex(); + self.squeeze_mode = true; + for i in 0..RATE { + self.cache[i] = new_output_elements[i]; + } + self.cache_size = RATE; + } + // By this point, we should have a non-empty cache. Pop one item off the top of the cache and return it. + let result = self.cache[0]; + for i in 1..RATE { + if i < self.cache_size { + self.cache[i - 1] = self.cache[i]; + } + } + self.cache_size -= 1; + self.cache[self.cache_size] = 0; + result + } + + fn hash_internal(input: [Field; N], in_len: u32, is_variable_length: bool) -> Field { + let two_pow_64 = 18446744073709551616; + let iv : Field = (in_len as Field) * two_pow_64; + let mut sponge = Poseidon2::new(iv); + for i in 0..input.len() { + if i as u32 < in_len { + sponge.absorb(input[i]); + } + } + + // In the case where the hash preimage is variable-length, we append `1` to the end of the input, to distinguish + // from fixed-length hashes. (the combination of this additional field element + the hash IV ensures + // fixed-length and variable-length hashes do not collide) + if is_variable_length { + sponge.absorb(1); + } + sponge.squeeze() + } +} diff --git a/noir_stdlib/src/lib.nr b/noir_stdlib/src/lib.nr index 90aff3c312b..ebde4b88858 100644 --- a/noir_stdlib/src/lib.nr +++ b/noir_stdlib/src/lib.nr @@ -25,12 +25,12 @@ mod ops; mod default; mod prelude; mod uint128; -// mod bigint; +mod bigint; // Oracle calls are required to be wrapped in an unconstrained function // Thus, the only argument to the `println` oracle is expected to always be an ident #[oracle(print)] -unconstrained fn print_oracle(_with_newline: bool, _input: T) {} +unconstrained fn print_oracle(with_newline: bool, input: T) {} unconstrained pub fn print(input: T) { print_oracle(false, input); @@ -41,20 +41,20 @@ unconstrained pub fn println(input: T) { } #[foreign(recursive_aggregation)] -pub fn verify_proof(_verification_key: [Field], _proof: [Field], _public_inputs: [Field], _key_hash: Field) {} +pub fn verify_proof(verification_key: [Field], proof: [Field], public_inputs: [Field], key_hash: Field) {} // Asserts that the given value is known at compile-time. // Useful for debugging for-loop bounds. #[builtin(assert_constant)] -pub fn assert_constant(_x: T) {} +pub fn assert_constant(x: T) {} // from_field and as_field are private since they are not valid for every type. // `as` should be the default for users to cast between primitive types, and in the future // traits can be used to work with generic types. #[builtin(from_field)] -fn from_field(_x: Field) -> T {} +fn from_field(x: Field) -> T {} #[builtin(as_field)] -fn as_field(_x: T) -> Field {} +fn as_field(x: T) -> Field {} pub fn wrapping_add(x: T, y: T) -> T { crate::from_field(crate::as_field(x) + crate::as_field(y)) diff --git a/noir_stdlib/src/ops.nr b/noir_stdlib/src/ops.nr index 50386290b8e..e561265629e 100644 --- a/noir_stdlib/src/ops.nr +++ b/noir_stdlib/src/ops.nr @@ -7,12 +7,10 @@ trait Add { impl Add for Field { fn add(self, other: Field) -> Field { self + other } } impl Add for u8 { fn add(self, other: u8) -> u8 { self + other } } -impl Add for u16 { fn add(self, other: u16) -> u16 { self + other } } impl Add for u32 { fn add(self, other: u32) -> u32 { self + other } } impl Add for u64 { fn add(self, other: u64) -> u64 { self + other } } impl Add for i8 { fn add(self, other: i8) -> i8 { self + other } } -impl Add for i16 { fn add(self, other: i16) -> i16 { self + other } } impl Add for i32 { fn add(self, other: i32) -> i32 { self + other } } impl Add for i64 { fn add(self, other: i64) -> i64 { self + other } } @@ -25,12 +23,10 @@ trait Sub { impl Sub for Field { fn sub(self, other: Field) -> Field { self - other } } impl Sub for u8 { fn sub(self, other: u8) -> u8 { self - other } } -impl Sub for u16 { fn sub(self, other: u16) -> u16 { self - other } } impl Sub for u32 { fn sub(self, other: u32) -> u32 { self - other } } impl Sub for u64 { fn sub(self, other: u64) -> u64 { self - other } } impl Sub for i8 { fn sub(self, other: i8) -> i8 { self - other } } -impl Sub for i16 { fn sub(self, other: i16) -> i16 { self - other } } impl Sub for i32 { fn sub(self, other: i32) -> i32 { self - other } } impl Sub for i64 { fn sub(self, other: i64) -> i64 { self - other } } @@ -43,12 +39,10 @@ trait Mul { impl Mul for Field { fn mul(self, other: Field) -> Field { self * other } } impl Mul for u8 { fn mul(self, other: u8) -> u8 { self * other } } -impl Mul for u16 { fn mul(self, other: u16) -> u16 { self * other } } impl Mul for u32 { fn mul(self, other: u32) -> u32 { self * other } } impl Mul for u64 { fn mul(self, other: u64) -> u64 { self * other } } impl Mul for i8 { fn mul(self, other: i8) -> i8 { self * other } } -impl Mul for i16 { fn mul(self, other: i16) -> i16 { self * other } } impl Mul for i32 { fn mul(self, other: i32) -> i32 { self * other } } impl Mul for i64 { fn mul(self, other: i64) -> i64 { self * other } } @@ -61,12 +55,10 @@ trait Div { impl Div for Field { fn div(self, other: Field) -> Field { self / other } } impl Div for u8 { fn div(self, other: u8) -> u8 { self / other } } -impl Div for u16 { fn div(self, other: u16) -> u16 { self / other } } impl Div for u32 { fn div(self, other: u32) -> u32 { self / other } } impl Div for u64 { fn div(self, other: u64) -> u64 { self / other } } impl Div for i8 { fn div(self, other: i8) -> i8 { self / other } } -impl Div for i16 { fn div(self, other: i16) -> i16 { self / other } } impl Div for i32 { fn div(self, other: i32) -> i32 { self / other } } impl Div for i64 { fn div(self, other: i64) -> i64 { self / other } } @@ -77,12 +69,10 @@ trait Rem{ // docs:end:rem-trait impl Rem for u8 { fn rem(self, other: u8) -> u8 { self % other } } -impl Rem for u16 { fn rem(self, other: u16) -> u16 { self % other } } impl Rem for u32 { fn rem(self, other: u32) -> u32 { self % other } } impl Rem for u64 { fn rem(self, other: u64) -> u64 { self % other } } impl Rem for i8 { fn rem(self, other: i8) -> i8 { self % other } } -impl Rem for i16 { fn rem(self, other: i16) -> i16 { self % other } } impl Rem for i32 { fn rem(self, other: i32) -> i32 { self % other } } impl Rem for i64 { fn rem(self, other: i64) -> i64 { self % other } } @@ -95,12 +85,10 @@ trait BitOr { impl BitOr for bool { fn bitor(self, other: bool) -> bool { self | other } } impl BitOr for u8 { fn bitor(self, other: u8) -> u8 { self | other } } -impl BitOr for u16 { fn bitor(self, other: u16) -> u16 { self | other } } impl BitOr for u32 { fn bitor(self, other: u32) -> u32 { self | other } } impl BitOr for u64 { fn bitor(self, other: u64) -> u64 { self | other } } impl BitOr for i8 { fn bitor(self, other: i8) -> i8 { self | other } } -impl BitOr for i16 { fn bitor(self, other: i16) -> i16 { self | other } } impl BitOr for i32 { fn bitor(self, other: i32) -> i32 { self | other } } impl BitOr for i64 { fn bitor(self, other: i64) -> i64 { self | other } } @@ -113,12 +101,10 @@ trait BitAnd { impl BitAnd for bool { fn bitand(self, other: bool) -> bool { self & other } } impl BitAnd for u8 { fn bitand(self, other: u8) -> u8 { self & other } } -impl BitAnd for u16 { fn bitand(self, other: u16) -> u16 { self & other } } impl BitAnd for u32 { fn bitand(self, other: u32) -> u32 { self & other } } impl BitAnd for u64 { fn bitand(self, other: u64) -> u64 { self & other } } impl BitAnd for i8 { fn bitand(self, other: i8) -> i8 { self & other } } -impl BitAnd for i16 { fn bitand(self, other: i16) -> i16 { self & other } } impl BitAnd for i32 { fn bitand(self, other: i32) -> i32 { self & other } } impl BitAnd for i64 { fn bitand(self, other: i64) -> i64 { self & other } } @@ -131,12 +117,10 @@ trait BitXor { impl BitXor for bool { fn bitxor(self, other: bool) -> bool { self ^ other } } impl BitXor for u8 { fn bitxor(self, other: u8) -> u8 { self ^ other } } -impl BitXor for u16 { fn bitxor(self, other: u16) -> u16 { self ^ other } } impl BitXor for u32 { fn bitxor(self, other: u32) -> u32 { self ^ other } } impl BitXor for u64 { fn bitxor(self, other: u64) -> u64 { self ^ other } } impl BitXor for i8 { fn bitxor(self, other: i8) -> i8 { self ^ other } } -impl BitXor for i16 { fn bitxor(self, other: i16) -> i16 { self ^ other } } impl BitXor for i32 { fn bitxor(self, other: i32) -> i32 { self ^ other } } impl BitXor for i64 { fn bitxor(self, other: i64) -> i64 { self ^ other } } @@ -147,13 +131,11 @@ trait Shl { // docs:end:shl-trait impl Shl for u8 { fn shl(self, other: u8) -> u8 { self << other } } -impl Shl for u16 { fn shl(self, other: u16) -> u16 { self << other } } impl Shl for u32 { fn shl(self, other: u32) -> u32 { self << other } } impl Shl for u64 { fn shl(self, other: u64) -> u64 { self << other } } // Bit shifting is not currently supported for signed integer types // impl Shl for i8 { fn shl(self, other: i8) -> i8 { self << other } } -// impl Shl for i16 { fn shl(self, other: i16) -> i16 { self << other } } // impl Shl for i32 { fn shl(self, other: i32) -> i32 { self << other } } // impl Shl for i64 { fn shl(self, other: i64) -> i64 { self << other } } @@ -164,12 +146,10 @@ trait Shr { // docs:end:shr-trait impl Shr for u8 { fn shr(self, other: u8) -> u8 { self >> other } } -impl Shr for u16 { fn shr(self, other: u16) -> u16 { self >> other } } impl Shr for u32 { fn shr(self, other: u32) -> u32 { self >> other } } impl Shr for u64 { fn shr(self, other: u64) -> u64 { self >> other } } // Bit shifting is not currently supported for signed integer types // impl Shr for i8 { fn shr(self, other: i8) -> i8 { self >> other } } -// impl Shr for i16 { fn shr(self, other: i16) -> i16 { self >> other } } // impl Shr for i32 { fn shr(self, other: i32) -> i32 { self >> other } } // impl Shr for i64 { fn shr(self, other: i64) -> i64 { self >> other } } diff --git a/noir_stdlib/src/option.nr b/noir_stdlib/src/option.nr index 137d57f33db..1c32f758af7 100644 --- a/noir_stdlib/src/option.nr +++ b/noir_stdlib/src/option.nr @@ -39,11 +39,7 @@ impl Option { /// Returns the wrapped value if `self.is_some()`. Otherwise, returns the given default value. pub fn unwrap_or(self, default: T) -> T { - if self._is_some { - self._value - } else { - default - } + if self._is_some { self._value } else { default } } /// Returns the wrapped value if `self.is_some()`. Otherwise, calls the given function to return @@ -56,6 +52,12 @@ impl Option { } } + /// Asserts `self.is_some()` with a provided custom message and returns the contained `Some` value + fn expect(self, message: fmtstr) -> T { + assert(self.is_some(), message); + self._value + } + /// If self is `Some(x)`, this returns `Some(f(x))`. Otherwise, this returns `None`. pub fn map(self, f: fn[Env](T) -> U) -> Option { if self._is_some { @@ -106,31 +108,19 @@ impl Option { /// If self is Some, return self. Otherwise, return `other`. pub fn or(self, other: Self) -> Self { - if self._is_some { - self - } else { - other - } + if self._is_some { self } else { other } } /// If self is Some, return self. Otherwise, return `default()`. pub fn or_else(self, default: fn[Env]() -> Self) -> Self { - if self._is_some { - self - } else { - default() - } + if self._is_some { self } else { default() } } // If only one of the two Options is Some, return that option. // Otherwise, if both options are Some or both are None, None is returned. pub fn xor(self, other: Self) -> Self { if self._is_some { - if other._is_some { - Option::none() - } else { - self - } + if other._is_some { Option::none() } else { self } } else if other._is_some { other } else { diff --git a/noir_stdlib/src/scalar_mul.nr b/noir_stdlib/src/scalar_mul.nr index 0e84b4f66fc..eee7aac39f2 100644 --- a/noir_stdlib/src/scalar_mul.nr +++ b/noir_stdlib/src/scalar_mul.nr @@ -6,7 +6,7 @@ struct EmbeddedCurvePoint { } impl EmbeddedCurvePoint { - fn double(self) -> EmbeddedCurvePoint { + fn double(self) -> EmbeddedCurvePoint { embedded_curve_add(self, self) } } @@ -26,11 +26,20 @@ impl Add for EmbeddedCurvePoint { #[foreign(fixed_base_scalar_mul)] // docs:start:fixed_base_embedded_curve pub fn fixed_base_embedded_curve( - _low: Field, - _high: Field + low: Field, + high: Field ) -> [Field; 2] // docs:end:fixed_base_embedded_curve {} +// This is a hack as returning an `EmbeddedCurvePoint` from a foreign function in brillig returns a [BrilligVariable::SingleAddr; 2] rather than BrilligVariable::BrilligArray +// as is defined in the brillig bytecode format. This is a workaround which allows us to fix this without modifying the serialization format. +fn embedded_curve_add(point1: EmbeddedCurvePoint, point2: EmbeddedCurvePoint) -> EmbeddedCurvePoint { + let point_array = embedded_curve_add_array_return(point1, point2); + let x = point_array[0]; + let y = point_array[1]; + EmbeddedCurvePoint { x, y } +} + #[foreign(embedded_curve_add)] -fn embedded_curve_add(_point1: EmbeddedCurvePoint, _point2: EmbeddedCurvePoint) -> EmbeddedCurvePoint {} +fn embedded_curve_add_array_return(_point1: EmbeddedCurvePoint, _point2: EmbeddedCurvePoint) -> [Field; 2] {} diff --git a/noir_stdlib/src/schnorr.nr b/noir_stdlib/src/schnorr.nr index 025c3a0f921..757963d40d7 100644 --- a/noir_stdlib/src/schnorr.nr +++ b/noir_stdlib/src/schnorr.nr @@ -1,10 +1,10 @@ #[foreign(schnorr_verify)] // docs:start:schnorr_verify pub fn verify_signature( - _public_key_x: Field, - _public_key_y: Field, - _signature: [u8; 64], - _message: [u8; N] + public_key_x: Field, + public_key_y: Field, + signature: [u8; 64], + message: [u8; N] ) -> bool // docs:end:schnorr_verify {} diff --git a/noir_stdlib/src/sha256.nr b/noir_stdlib/src/sha256.nr index 39e39b8cb6e..2f686a64165 100644 --- a/noir_stdlib/src/sha256.nr +++ b/noir_stdlib/src/sha256.nr @@ -1,91 +1,6 @@ // Implementation of SHA-256 mapping a byte array of variable length to // 32 bytes. -// Internal functions act on 32-bit unsigned integers for simplicity. -// Auxiliary mappings; names as in FIPS PUB 180-4 -fn rotr32(a: u32, b: u32) -> u32 // 32-bit right rotation -{ - // None of the bits overlap between `(a >> b)` and `(a << (32 - b))` - // Addition is then equivalent to OR, with fewer constraints. - (a >> b) + (a << (32 - b)) -} - -fn ch(x: u32, y: u32, z: u32) -> u32 { - (x & y) ^ ((!x) & z) -} - -fn maj(x: u32, y: u32, z: u32) -> u32 { - (x & y) ^ (x & z) ^ (y & z) -} - -fn bigma0(x: u32) -> u32 { - rotr32(x, 2) ^ rotr32(x, 13) ^ rotr32(x, 22) -} -fn bigma1(x: u32) -> u32 { - rotr32(x, 6) ^ rotr32(x, 11) ^ rotr32(x, 25) -} - -fn sigma0(x: u32) -> u32 { - rotr32(x, 7) ^ rotr32(x, 18) ^ (x >> 3) -} - -fn sigma1(x: u32) -> u32 { - rotr32(x, 17) ^ rotr32(x, 19) ^ (x >> 10) -} - -fn sha_w(msg: [u32; 16]) -> [u32; 64] // Expanded message blocks -{ - let mut w: [u32;64] = [0; 64]; - - for j in 0..16 { - w[j] = msg[j]; - } - - for j in 16..64 { - w[j] = crate::wrapping_add( - crate::wrapping_add(sigma1(w[j-2]), w[j-7]), - crate::wrapping_add(sigma0(w[j-15]), w[j-16]), - ); - } - - w -} -// SHA-256 compression function -fn sha_c(msg: [u32; 16], hash: [u32; 8]) -> [u32; 8] { - let K: [u32; 64] = [ - 1116352408, 1899447441, 3049323471, 3921009573, 961987163, 1508970993, 2453635748, - 2870763221, 3624381080, 310598401, 607225278, 1426881987, 1925078388, 2162078206, - 2614888103, 3248222580, 3835390401, 4022224774, 264347078, 604807628, 770255983, 1249150122, - 1555081692, 1996064986, 2554220882, 2821834349, 2952996808, 3210313671, 3336571891, - 3584528711, 113926993, 338241895, 666307205, 773529912, 1294757372, 1396182291, 1695183700, - 1986661051, 2177026350, 2456956037, 2730485921, 2820302411, 3259730800, 3345764771, - 3516065817, 3600352804, 4094571909, 275423344, 430227734, 506948616, 659060556, 883997877, - 958139571, 1322822218, 1537002063, 1747873779, 1955562222, 2024104815, 2227730452, - 2361852424, 2428436474, 2756734187, 3204031479, 3329325298 - ]; // first 32 bits of fractional parts of cube roots of first 64 primes - let mut out_h: [u32; 8] = hash; - let w = sha_w(msg); - for j in 0..64 { - let t1 = crate::wrapping_add( - crate::wrapping_add( - crate::wrapping_add(out_h[7], bigma1(out_h[4])), - ch(out_h[4], out_h[5], out_h[6]) - ), - crate::wrapping_add(K[j], w[j]) - ); - let t2 = crate::wrapping_add(bigma0(out_h[0]), maj(out_h[0], out_h[1], out_h[2])); - out_h[7] = out_h[6]; - out_h[6] = out_h[5]; - out_h[5] = out_h[4]; - out_h[4] = crate::wrapping_add(out_h[3], t1); - out_h[3] = out_h[2]; - out_h[2] = out_h[1]; - out_h[1] = out_h[0]; - out_h[0] = crate::wrapping_add(t1, t2); - } - - out_h -} // Convert 64-byte array to array of 16 u32s fn msg_u8_to_u32(msg: [u8; 64]) -> [u32; 16] { let mut msg32: [u32; 16] = [0; 16]; @@ -102,19 +17,15 @@ fn msg_u8_to_u32(msg: [u8; 64]) -> [u32; 16] { pub fn digest(msg: [u8; N]) -> [u8; 32] { let mut msg_block: [u8; 64] = [0; 64]; let mut h: [u32; 8] = [1779033703, 3144134277, 1013904242, 2773480762, 1359893119, 2600822924, 528734635, 1541459225]; // Intermediate hash, starting with the canonical initial value - let mut c: [u32; 8] = [0; 8]; // Compression of current message block as sequence of u32 let mut out_h: [u8; 32] = [0; 32]; // Digest as sequence of bytes let mut i: u64 = 0; // Message byte pointer - for k in 0..msg.len() { + for k in 0..N { // Populate msg_block msg_block[i as Field] = msg[k]; i = i + 1; if i == 64 { // Enough to hash block - c = sha_c(msg_u8_to_u32(msg_block), h); - for j in 0..8 { - h[j] = crate::wrapping_add(c[j], h[j]); - } + h = crate::hash::sha256_compression(msg_u8_to_u32(msg_block), h); i = 0; } @@ -135,11 +46,7 @@ pub fn digest(msg: [u8; N]) -> [u8; 32] { } } } - c = h; - c = sha_c(msg_u8_to_u32(msg_block), c); - for j in 0..8 { - h[j] = crate::wrapping_add(h[j], c[j]); - } + h = crate::hash::sha256_compression(msg_u8_to_u32(msg_block), h); i = 0; } @@ -150,7 +57,7 @@ pub fn digest(msg: [u8; N]) -> [u8; 32] { msg_block[i as Field] = 0; i = i + 1; } else if i < 64 { - let mut len = 8 * msg.len() as u64; + let mut len = 8 * msg.len(); for j in 0..8 { msg_block[63 - j] = len as u8; len >>= 8; @@ -159,11 +66,8 @@ pub fn digest(msg: [u8; N]) -> [u8; 32] { } } // Hash final padded block - c = h; - c = sha_c(msg_u8_to_u32(msg_block), c); - for j in 0..8 { - h[j] = crate::wrapping_add(h[j], c[j]); - } + h = crate::hash::sha256_compression(msg_u8_to_u32(msg_block), h); + // Return final hash as byte array for j in 0..8 { for k in 0..4 { diff --git a/noir_stdlib/src/sha512.nr b/noir_stdlib/src/sha512.nr index 155ba593bba..4dfe78308e2 100644 --- a/noir_stdlib/src/sha512.nr +++ b/noir_stdlib/src/sha512.nr @@ -136,7 +136,7 @@ pub fn digest(msg: [u8; N]) -> [u8; 64] { msg_block[i as Field] = 0; i += 1; } else if i < 128 { - let mut len = 8 * msg.len() as u64; // u128 unsupported + let mut len = 8 * msg.len(); for j in 0..16 { msg_block[127 - j] = len as u8; len >>= 8; diff --git a/noir_stdlib/src/slice.nr b/noir_stdlib/src/slice.nr index a5a9a38ed53..ea8d09d14ce 100644 --- a/noir_stdlib/src/slice.nr +++ b/noir_stdlib/src/slice.nr @@ -3,34 +3,34 @@ impl [T] { /// new slice with a length one greater than the /// original unmodified slice. #[builtin(slice_push_back)] - pub fn push_back(_self: Self, _elem: T) -> Self { } + pub fn push_back(self, elem: T) -> Self {} /// Push a new element to the front of the slice, returning a /// new slice with a length one greater than the /// original unmodified slice. #[builtin(slice_push_front)] - pub fn push_front(_self: Self, _elem: T) -> Self { } + pub fn push_front(self, elem: T) -> Self {} /// Remove the last element of the slice, returning the /// popped slice and the element in a tuple #[builtin(slice_pop_back)] - pub fn pop_back(_self: Self) -> (Self, T) { } + pub fn pop_back(self) -> (Self, T) {} /// Remove the first element of the slice, returning the /// element and the popped slice in a tuple #[builtin(slice_pop_front)] - pub fn pop_front(_self: Self) -> (T, Self) { } + pub fn pop_front(self) -> (T, Self) {} /// Insert an element at a specified index, shifting all elements /// after it to the right #[builtin(slice_insert)] - pub fn insert(_self: Self, _index: Field, _elem: T) -> Self { } + pub fn insert(self, index: u64, elem: T) -> Self {} /// Remove an element at a specified index, shifting all elements /// after it to the left, returning the altered slice and /// the removed element #[builtin(slice_remove)] - pub fn remove(_self: Self, _index: Field) -> (Self, T) { } + pub fn remove(self, index: u64) -> (Self, T) {} // Append each element of the `other` slice to the end of `self`. // This returns a new slice and leaves both input slices unchanged. diff --git a/noir_stdlib/src/string.nr b/noir_stdlib/src/string.nr index e402abf9ab6..12b5a1e75ec 100644 --- a/noir_stdlib/src/string.nr +++ b/noir_stdlib/src/string.nr @@ -2,7 +2,7 @@ use crate::collections::vec::Vec; impl str { /// Converts the given string into a byte array #[builtin(str_as_bytes)] - pub fn as_bytes(_self: Self) -> [u8; N] { } + pub fn as_bytes(self) -> [u8; N] {} /// return a byte vector of the str content pub fn as_bytes_vec(self: Self) -> Vec { diff --git a/noir_stdlib/src/test.nr b/noir_stdlib/src/test.nr index 47b31f4acea..e1c320215de 100644 --- a/noir_stdlib/src/test.nr +++ b/noir_stdlib/src/test.nr @@ -1,17 +1,17 @@ #[oracle(create_mock)] -unconstrained fn create_mock_oracle(_name: str) -> Field {} +unconstrained fn create_mock_oracle(name: str) -> Field {} #[oracle(set_mock_params)] -unconstrained fn set_mock_params_oracle

(_id: Field, _params: P) {} +unconstrained fn set_mock_params_oracle

(id: Field, params: P) {} #[oracle(set_mock_returns)] -unconstrained fn set_mock_returns_oracle(_id: Field, _returns: R) {} +unconstrained fn set_mock_returns_oracle(id: Field, returns: R) {} #[oracle(set_mock_times)] -unconstrained fn set_mock_times_oracle(_id: Field, _times: u64) {} +unconstrained fn set_mock_times_oracle(id: Field, times: u64) {} #[oracle(clear_mock)] -unconstrained fn clear_mock_oracle(_id: Field) {} +unconstrained fn clear_mock_oracle(id: Field) {} struct OracleMock { id: Field, @@ -19,9 +19,7 @@ struct OracleMock { impl OracleMock { unconstrained pub fn mock(name: str) -> Self { - Self { - id: create_mock_oracle(name), - } + Self { id: create_mock_oracle(name) } } unconstrained pub fn with_params

(self, params: P) -> Self { diff --git a/noir_stdlib/src/uint128.nr b/noir_stdlib/src/uint128.nr index c8c6217de90..b91ed5c4cb2 100644 --- a/noir_stdlib/src/uint128.nr +++ b/noir_stdlib/src/uint128.nr @@ -13,14 +13,11 @@ impl U128 { pub fn from_u64s_le(lo: u64, hi: u64) -> U128 { // in order to handle multiplication, we need to represent the product of two u64 without overflow assert(crate::field::modulus_num_bits() as u32 > 128); - U128 { - lo: lo as Field, - hi: hi as Field, - } + U128 { lo: lo as Field, hi: hi as Field } } pub fn from_u64s_be(hi: u64, lo: u64) -> U128 { - U128::from_u64s_le(lo,hi) + U128::from_u64s_le(lo, hi) } pub fn from_le_bytes(bytes: [u8; 16]) -> U128 { @@ -36,16 +33,13 @@ impl U128 { hi += (bytes[i] as Field)*base; base *= 256; } - U128 { - lo, - hi, - } + U128 { lo, hi } } pub fn to_be_bytes(self: Self) -> [u8; 16] { let lo = self.lo.to_be_bytes(8); let hi = self.hi.to_be_bytes(8); - let mut bytes = [0;16]; + let mut bytes = [0; 16]; for i in 0..8 { bytes[i] = hi[i]; bytes[i+8] = lo[i]; @@ -56,7 +50,7 @@ impl U128 { pub fn to_le_bytes(self: Self) -> [u8; 16] { let lo = self.lo.to_le_bytes(8); let hi = self.hi.to_le_bytes(8); - let mut bytes = [0;16]; + let mut bytes = [0; 16]; for i in 0..8 { bytes[i] = lo[i]; bytes[i+8] = hi[i]; @@ -73,9 +67,9 @@ impl U128 { let mut lo = 0; let mut hi = 0; - let mut base = 1; + let mut base = 1; if N <= 18 { - for i in 0..N-2 { + for i in 0..N - 2 { lo += U128::decode_ascii(bytes[N-i-1])*base; base = base*16; } @@ -85,27 +79,21 @@ impl U128 { base = base*16; } base = 1; - for i in 17..N-1 { + for i in 17..N - 1 { hi += U128::decode_ascii(bytes[N-i])*base; base = base*16; } } - U128 { - lo: lo as Field, - hi: hi as Field, - } + U128 { lo: lo as Field, hi: hi as Field } } fn decode_ascii(ascii: u8) -> Field { if ascii < 58 { ascii - 48 + } else if ascii < 71 { + ascii - 55 } else { - if ascii < 71 { - ascii - 55 - } else { - ascii - 87 - } - + ascii - 87 } as Field } @@ -114,15 +102,14 @@ impl U128 { (U128::from_u64s_le(0, 0), self) } else { //TODO check if this can overflow? - let (q,r) = self.unconstrained_div(b * U128::from_u64s_le(2,0)); - let q_mul_2 = q * U128::from_u64s_le(2,0); + let (q,r) = self.unconstrained_div(b * U128::from_u64s_le(2, 0)); + let q_mul_2 = q * U128::from_u64s_le(2, 0); if r < b { (q_mul_2, r) } else { - (q_mul_2 + U128::from_u64s_le(1,0), r - b) + (q_mul_2 + U128::from_u64s_le(1, 0), r - b) } - - } + } } pub fn from_integer(i: T) -> U128 { @@ -130,31 +117,25 @@ impl U128 { // Reject values which would overflow a u128 f.assert_max_bit_size(128); let lo = f as u64 as Field; - let hi = (f-lo) / pow64; - U128 { - lo, - hi, - } + let hi = (f - lo) / pow64; + U128 { lo, hi } } pub fn to_integer(self) -> T { - crate::from_field(self.lo+self.hi*pow64) + crate::from_field(self.lo + self.hi * pow64) } fn wrapping_mul(self: Self, b: U128) -> U128 { - let low = self.lo*b.lo; + let low = self.lo * b.lo; let lo = low as u64 as Field; let carry = (low - lo) / pow64; let high = if crate::field::modulus_num_bits() as u32 > 196 { - (self.lo+self.hi)*(b.lo+b.hi) - low + carry + (self.lo + self.hi) * (b.lo + b.hi) - low + carry } else { - self.lo*b.hi + self.hi*b.lo + carry + self.lo * b.hi + self.hi * b.lo + carry }; let hi = high as u64 as Field; - U128 { - lo, - hi, - } + U128 { lo, hi } } } @@ -180,7 +161,7 @@ impl Sub for U128 { let borrow = (low == lo) as Field; let high = self.hi - b.hi - borrow; let hi = high as u64 as Field; - assert(hi == high, "attempt to subtract with overflow"); + assert(hi == high, "attempt to subtract with underflow"); U128 { lo, hi, diff --git a/noirc_macros/Cargo.toml b/noirc_macros/Cargo.toml new file mode 100644 index 00000000000..699e6b01cae --- /dev/null +++ b/noirc_macros/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "noirc_macros" +version.workspace = true +authors.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +repository.workspace = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +noirc_frontend.workspace = true +iter-extended.workspace = true \ No newline at end of file diff --git a/noirc_macros/src/lib.rs b/noirc_macros/src/lib.rs new file mode 100644 index 00000000000..9a916843200 --- /dev/null +++ b/noirc_macros/src/lib.rs @@ -0,0 +1,73 @@ +use noirc_frontend::hir::def_collector::dc_crate::UnresolvedFunctions; +use noirc_frontend::hir::def_collector::dc_crate::UnresolvedTraitImpl; +use noirc_frontend::macros_api::parse_program; +use noirc_frontend::macros_api::HirContext; +use noirc_frontend::macros_api::SortedModule; +use noirc_frontend::macros_api::{CrateId, FileId}; +use noirc_frontend::macros_api::{MacroError, MacroProcessor}; + +pub struct AssertMessageMacro; + +impl MacroProcessor for AssertMessageMacro { + fn process_untyped_ast( + &self, + ast: SortedModule, + crate_id: &CrateId, + _context: &HirContext, + ) -> Result { + transform(ast, crate_id) + } + + fn process_unresolved_traits_impls( + &self, + _crate_id: &CrateId, + _context: &mut HirContext, + _unresolved_traits_impls: &[UnresolvedTraitImpl], + _collected_functions: &mut Vec, + ) -> Result<(), (MacroError, FileId)> { + Ok(()) + } + + // This macro does not need to process any information after name resolution + fn process_typed_ast( + &self, + _crate_id: &CrateId, + _context: &mut HirContext, + ) -> Result<(), (MacroError, FileId)> { + Ok(()) + } +} + +fn transform(ast: SortedModule, crate_id: &CrateId) -> Result { + let ast = add_resolve_assert_message_funcs(ast, crate_id)?; + + Ok(ast) +} + +fn add_resolve_assert_message_funcs( + mut ast: SortedModule, + crate_id: &CrateId, +) -> Result { + if !crate_id.is_stdlib() { + return Ok(ast); + } + let assert_message_oracles = " + #[oracle(assert_message)] + unconstrained fn assert_message_oracle(_input: T) {} + unconstrained pub fn resolve_assert_message(input: T, condition: bool) { + if !condition { + assert_message_oracle(input); + } + }"; + + let (assert_msg_funcs_ast, errors) = parse_program(assert_message_oracles); + assert_eq!(errors.len(), 0, "Failed to parse Noir macro code. This is either a bug in the compiler or the Noir macro code"); + + let assert_msg_funcs_ast = assert_msg_funcs_ast.into_sorted(); + + for func in assert_msg_funcs_ast.functions { + ast.functions.push(func) + } + + Ok(ast) +} diff --git a/package.json b/package.json index e70189b5522..4987602c709 100644 --- a/package.json +++ b/package.json @@ -28,7 +28,7 @@ "build:types": "yarn workspace @noir-lang/types run build", "build:backend_barretenberg": "yarn workspace @noir-lang/backend_barretenberg run build", "build:noir_js": "yarn workspace @noir-lang/noir_js run build", - "build:js:only": "yarn build:types && yarn build:backend_barretenberg && yarn build:noir_js", + "build:js:only": "yarn workspaces foreach -vtp --from \"{@noir-lang/types,@noir-lang/backend_barretenberg,@noir-lang/noir_js,@noir-lang/noir_codegen}\" run build", "prepare:publish": "yarn clean && yarn install:from:nix && yarn build:js:only", "nightly:version": "yarn workspaces foreach run nightly:version", "publish:all": "yarn install && yarn workspaces foreach run publish" @@ -38,7 +38,7 @@ "@typescript-eslint/parser": "^6.7.3", "chai": "^4.3.7", "cspell": "^8.3.2", - "eslint": "^8.50.0", + "eslint": "^8.56.0", "eslint-plugin-prettier": "^5.0.0", "mocha": "^10.2.0", "prettier": "3.0.3", diff --git a/rust-toolchain.toml b/rust-toolchain.toml index b6f7edc4bde..0e5ac891ce9 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,5 +1,5 @@ [toolchain] -channel = "1.71.1" +channel = "1.73.0" components = [ "rust-src" ] targets = [ "wasm32-unknown-unknown", "wasm32-wasi", "aarch64-apple-darwin" ] profile = "default" diff --git a/scripts/bootstrap_native.sh b/scripts/bootstrap_native.sh index 3e0e2ed853a..974f0edcfec 100755 --- a/scripts/bootstrap_native.sh +++ b/scripts/bootstrap_native.sh @@ -12,6 +12,12 @@ else export GIT_COMMIT=$(git rev-parse --verify HEAD) fi +# Check if the 'cargo' command is available in the system +if ! command -v cargo > /dev/null; then + echo "Cargo is not installed. Please install Cargo and the Rust toolchain." + exit 1 +fi + # Build native. if [ -n "${DEBUG:-}" ]; then cargo build diff --git a/scripts/bootstrap_packages.sh b/scripts/bootstrap_packages.sh index 18c34b9cfb7..47ffe12beec 100755 --- a/scripts/bootstrap_packages.sh +++ b/scripts/bootstrap_packages.sh @@ -3,7 +3,7 @@ set -eu cd $(dirname "$0")/.. -./scripts/install_wasm-bindgen.sh +./.github/scripts/wasm-bindgen-install.sh # If this project has been subrepod into another project, set build data manually. export SOURCE_DATE_EPOCH=$(date +%s) diff --git a/scripts/install_wasm-bindgen.sh b/scripts/install_wasm-bindgen.sh deleted file mode 100755 index f34ed4c0ad0..00000000000 --- a/scripts/install_wasm-bindgen.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/usr/bin/env bash -set -eu - -cd $(dirname "$0")/.. - -# Install binstall -curl -L --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/cargo-bins/cargo-binstall/main/install-from-binstall-release.sh | bash - -# Install wasm-bindgen-cli. -if [ "$(wasm-bindgen --version | cut -d' ' -f2)" != "0.2.86" ]; then - echo "Building wasm-bindgen..." - cargo binstall wasm-bindgen-cli@0.2.86 --force --no-confirm -fi - diff --git a/scripts/nargo_compile_noir_js_assert_lt.sh b/scripts/nargo_compile_noir_js_assert_lt.sh deleted file mode 100755 index 636ae59b996..00000000000 --- a/scripts/nargo_compile_noir_js_assert_lt.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/bash - -cd ./tooling/noir_js/test/noir_compiled_examples/assert_lt -nargo compile \ No newline at end of file diff --git a/scripts/nargo_compile_wasm_fixtures.sh b/scripts/nargo_compile_wasm_fixtures.sh deleted file mode 100755 index 95bb698c8a2..00000000000 --- a/scripts/nargo_compile_wasm_fixtures.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash - -fixtures_dir="./compiler/wasm/test/fixtures" - -nargo compile --program-dir=$fixtures_dir/noir-contract -nargo compile --program-dir=$fixtures_dir/simple -nargo compile --program-dir=$fixtures_dir/with-deps diff --git a/scripts/test_js_packages.sh b/scripts/test_js_packages.sh index cf4fd81326d..e1e10c543e0 100755 --- a/scripts/test_js_packages.sh +++ b/scripts/test_js_packages.sh @@ -3,7 +3,7 @@ set -eu cd $(dirname "$0")/.. -./scripts/install_wasm-bindgen.sh +./.github/scripts/wasm-bindgen-install.sh # If this project has been subrepod into another project, set build data manually. export SOURCE_DATE_EPOCH=$(date +%s) @@ -19,8 +19,7 @@ export PATH="${PATH}:/usr/src/noir/target/release/" yarn --immutable yarn build -npx playwright install -npx playwright install-deps +./.github/scripts/playwright-install.sh ./scripts/test.sh yarn test diff --git a/scripts/test_native.sh b/scripts/test_native.sh index bc1c47ecf12..9b9aa0ce4d7 100755 --- a/scripts/test_native.sh +++ b/scripts/test_native.sh @@ -12,4 +12,6 @@ else export GIT_COMMIT=$(git rev-parse --verify HEAD) fi -cargo test --workspace --locked --release \ No newline at end of file +cargo fmt --all --check +cargo clippy --workspace --locked --release +cargo test --workspace --locked --release diff --git a/test_programs/.gitignore b/test_programs/.gitignore index a229df6197f..6da0100814a 100644 --- a/test_programs/.gitignore +++ b/test_programs/.gitignore @@ -1,2 +1,3 @@ acir_artifacts -execution_success/**/crs \ No newline at end of file +execution_success/**/crs +./Nargo.toml diff --git a/test_programs/compile_failure/assert_msg_runtime/Nargo.toml b/test_programs/compile_failure/assert_msg_runtime/Nargo.toml new file mode 100644 index 00000000000..765f632ff74 --- /dev/null +++ b/test_programs/compile_failure/assert_msg_runtime/Nargo.toml @@ -0,0 +1,7 @@ +[package] +name = "assert_msg_runtime" +type = "bin" +authors = [""] +compiler_version = ">=0.23.0" + +[dependencies] \ No newline at end of file diff --git a/test_programs/compile_failure/assert_msg_runtime/Prover.toml b/test_programs/compile_failure/assert_msg_runtime/Prover.toml new file mode 100644 index 00000000000..f28f2f8cc48 --- /dev/null +++ b/test_programs/compile_failure/assert_msg_runtime/Prover.toml @@ -0,0 +1,2 @@ +x = "5" +y = "10" diff --git a/test_programs/compile_failure/assert_msg_runtime/src/main.nr b/test_programs/compile_failure/assert_msg_runtime/src/main.nr new file mode 100644 index 00000000000..bec3082550a --- /dev/null +++ b/test_programs/compile_failure/assert_msg_runtime/src/main.nr @@ -0,0 +1,7 @@ +fn main(x: Field, y: pub Field) { + assert(x != y, f"Expected x != y, but got both equal {x}"); + assert(x != y); + let z = x + y; + assert(z != y, f"Expected z != y, but got both equal {z}"); + assert_eq(x, y, f"Expected x == y, but x is {x} and y is {y}"); +} \ No newline at end of file diff --git a/test_programs/compile_failure/brillig_assert_msg_runtime/Nargo.toml b/test_programs/compile_failure/brillig_assert_msg_runtime/Nargo.toml new file mode 100644 index 00000000000..00f97b7273a --- /dev/null +++ b/test_programs/compile_failure/brillig_assert_msg_runtime/Nargo.toml @@ -0,0 +1,7 @@ +[package] +name = "brillig_assert_msg_runtime" +type = "bin" +authors = [""] +compiler_version = ">=0.23.0" + +[dependencies] \ No newline at end of file diff --git a/test_programs/compile_failure/brillig_assert_msg_runtime/Prover.toml b/test_programs/compile_failure/brillig_assert_msg_runtime/Prover.toml new file mode 100644 index 00000000000..0e5dfd5638d --- /dev/null +++ b/test_programs/compile_failure/brillig_assert_msg_runtime/Prover.toml @@ -0,0 +1 @@ +x = "5" diff --git a/test_programs/compile_failure/brillig_assert_msg_runtime/src/main.nr b/test_programs/compile_failure/brillig_assert_msg_runtime/src/main.nr new file mode 100644 index 00000000000..428b2006363 --- /dev/null +++ b/test_programs/compile_failure/brillig_assert_msg_runtime/src/main.nr @@ -0,0 +1,10 @@ +fn main(x: Field) { + assert(1 == conditional(x)); +} + +unconstrained fn conditional(x: Field) -> Field { + let z = x as u8 + 20; + assert_eq(z, 25, f"Expected 25 but got {z}"); + assert(x == 10, f"Expected x to equal 10, but got {x}"); + 1 +} \ No newline at end of file diff --git a/test_programs/compile_failure/brillig_mut_ref_from_acir/Nargo.toml b/test_programs/compile_failure/brillig_mut_ref_from_acir/Nargo.toml new file mode 100644 index 00000000000..a20ee09714c --- /dev/null +++ b/test_programs/compile_failure/brillig_mut_ref_from_acir/Nargo.toml @@ -0,0 +1,7 @@ +[package] +name = "brillig_mut_ref_from_acir" +type = "bin" +authors = [""] +compiler_version = ">=0.23.0" + +[dependencies] \ No newline at end of file diff --git a/test_programs/compile_failure/brillig_mut_ref_from_acir/src/main.nr b/test_programs/compile_failure/brillig_mut_ref_from_acir/src/main.nr new file mode 100644 index 00000000000..cf3279cac0d --- /dev/null +++ b/test_programs/compile_failure/brillig_mut_ref_from_acir/src/main.nr @@ -0,0 +1,8 @@ +unconstrained fn mut_ref_identity(value: &mut Field) -> Field { + *value +} + +fn main(mut x: Field, y: pub Field) { + let returned_x = mut_ref_identity(&mut x); + assert(returned_x == x); +} \ No newline at end of file diff --git a/test_programs/compile_failure/brillig_slice_to_acir/Nargo.toml b/test_programs/compile_failure/brillig_slice_to_acir/Nargo.toml new file mode 100644 index 00000000000..c3e51561cc7 --- /dev/null +++ b/test_programs/compile_failure/brillig_slice_to_acir/Nargo.toml @@ -0,0 +1,7 @@ +[package] +name = "brillig_slice_to_acir" +type = "bin" +authors = [""] +compiler_version = ">=0.23.0" + +[dependencies] \ No newline at end of file diff --git a/test_programs/compile_failure/brillig_slice_to_acir/src/main.nr b/test_programs/compile_failure/brillig_slice_to_acir/src/main.nr new file mode 100644 index 00000000000..dcf23aac5f5 --- /dev/null +++ b/test_programs/compile_failure/brillig_slice_to_acir/src/main.nr @@ -0,0 +1,14 @@ +global DEPTH: Field = 40000; + +fn main(x: [u32; DEPTH], y: u32) { + let mut new_x = []; + new_x = clear(x, y); +} + +unconstrained fn clear(x: [u32; DEPTH], y: u32) -> [u32] { + let mut a = []; + for i in 0..y { + a = a.push_back(x[i]); + } + a +} diff --git a/test_programs/compile_failure/brillig_vec_to_acir/Nargo.toml b/test_programs/compile_failure/brillig_vec_to_acir/Nargo.toml new file mode 100644 index 00000000000..c09fc417b55 --- /dev/null +++ b/test_programs/compile_failure/brillig_vec_to_acir/Nargo.toml @@ -0,0 +1,7 @@ +[package] +name = "brillig_vec_to_acir" +type = "bin" +authors = [""] +compiler_version = ">=0.23.0" + +[dependencies] \ No newline at end of file diff --git a/test_programs/compile_failure/brillig_vec_to_acir/src/main.nr b/test_programs/compile_failure/brillig_vec_to_acir/src/main.nr new file mode 100644 index 00000000000..8f872f1b903 --- /dev/null +++ b/test_programs/compile_failure/brillig_vec_to_acir/src/main.nr @@ -0,0 +1,14 @@ +global DEPTH: Field = 40000; + +fn main(x: [u32; DEPTH], y: u32) { + let mut new_x = Vec::new(); + new_x = clear(x, y); +} + +unconstrained fn clear(x: [u32; DEPTH], y: u32) -> Vec { + let mut a = Vec::new(); + for i in 0..y { + a.push(x[i]); + } + a +} diff --git a/test_programs/compile_failure/builtin_function_declaration/Nargo.toml b/test_programs/compile_failure/builtin_function_declaration/Nargo.toml new file mode 100644 index 00000000000..3835292a6ba --- /dev/null +++ b/test_programs/compile_failure/builtin_function_declaration/Nargo.toml @@ -0,0 +1,7 @@ +[package] +name = "builtin_function_declaration" +type = "bin" +authors = [""] +compiler_version = ">=0.23.0" + +[dependencies] diff --git a/test_programs/compile_failure/builtin_function_declaration/src/main.nr b/test_programs/compile_failure/builtin_function_declaration/src/main.nr new file mode 100644 index 00000000000..ed376557371 --- /dev/null +++ b/test_programs/compile_failure/builtin_function_declaration/src/main.nr @@ -0,0 +1,10 @@ +// This test prevents users from trying to create their own builtin functions as these should only exist in the stdlib. + +// This would otherwise be a perfectly valid declaration of the `to_le_bits` builtin function +#[builtin(to_le_bits)] +fn to_le_bits(_x: Field, _bit_size: u32) -> [u1] {} + +fn main(x: Field) -> pub u1 { + let bits = to_le_bits(x, 100); + bits[0] +} diff --git a/test_programs/compile_failure/foreign_function_declaration/Nargo.toml b/test_programs/compile_failure/foreign_function_declaration/Nargo.toml new file mode 100644 index 00000000000..951658d7fb8 --- /dev/null +++ b/test_programs/compile_failure/foreign_function_declaration/Nargo.toml @@ -0,0 +1,7 @@ +[package] +name = "foreign_function_declaration" +type = "bin" +authors = [""] +compiler_version = ">=0.23.0" + +[dependencies] diff --git a/test_programs/compile_failure/foreign_function_declaration/src/main.nr b/test_programs/compile_failure/foreign_function_declaration/src/main.nr new file mode 100644 index 00000000000..6273067f6a7 --- /dev/null +++ b/test_programs/compile_failure/foreign_function_declaration/src/main.nr @@ -0,0 +1,10 @@ +// This test prevents users from trying to create their own blackbox functions as these should only exist in the stdlib. + +// This would otherwise be a perfectly valid definition of the `pedersen_hash` black box function, +// however executing the circuit results in an unhelpful ICE. +#[foreign(pedersen_hash)] +fn my_pedersen_hash(_input: [Field; N]) -> Field {} + +fn main() -> pub Field { + my_pedersen_hash([1]) +} diff --git a/test_programs/compile_failure/hashmap_load_factor/Nargo.toml b/test_programs/compile_failure/hashmap_load_factor/Nargo.toml new file mode 100644 index 00000000000..92da5a357f4 --- /dev/null +++ b/test_programs/compile_failure/hashmap_load_factor/Nargo.toml @@ -0,0 +1,6 @@ +[package] +name = "hashmap_load_factor" +type = "bin" +authors = [""] + +[dependencies] \ No newline at end of file diff --git a/test_programs/compile_failure/hashmap_load_factor/Prover.toml b/test_programs/compile_failure/hashmap_load_factor/Prover.toml new file mode 100644 index 00000000000..e54319c61e9 --- /dev/null +++ b/test_programs/compile_failure/hashmap_load_factor/Prover.toml @@ -0,0 +1,26 @@ +# Expected 6 key-value entries for hashmap capacity of 8. +# These must be distinct (both key-to-key, and value-to-value) for correct testing. + +[[input]] +key = 2 +value = 17 + +[[input]] +key = 3 +value = 19 + +[[input]] +key = 5 +value = 23 + +[[input]] +key = 7 +value = 29 + +[[input]] +key = 11 +value = 31 + +[[input]] +key = 41 +value = 43 \ No newline at end of file diff --git a/test_programs/compile_failure/hashmap_load_factor/src/main.nr b/test_programs/compile_failure/hashmap_load_factor/src/main.nr new file mode 100644 index 00000000000..ade43f898e1 --- /dev/null +++ b/test_programs/compile_failure/hashmap_load_factor/src/main.nr @@ -0,0 +1,35 @@ +use dep::std::collections::map::HashMap; +use dep::std::hash::BuildHasherDefault; +use dep::std::hash::pedersen::PedersenHasher; + +struct Entry{ + key: Field, + value: Field +} + +global HASHMAP_CAP = 8; +global HASHMAP_LEN = 6; + +fn allocate_hashmap() -> HashMap> { + HashMap::default() +} + +fn main(input: [Entry; HASHMAP_LEN]) { + test_load_factor(input); +} + +// In this test we exceed load factor: +// α_max = 0.75, thus for capacity of 8 and lenght of 6 +// insertion of new unique key (7-th) should throw assertion error. +fn test_load_factor(input: [Entry; HASHMAP_LEN]) { + let mut hashmap = allocate_hashmap(); + + for entry in input { + hashmap.insert(entry.key, entry.value); + } + + // We use prime numbers for testing, + // therefore it is guaranteed that doubling key we get unique value. + let key = input[0].key * 2; + hashmap.insert(key, input[0].value); +} diff --git a/test_programs/compile_failure/integer_literal_overflow/src/main.nr b/test_programs/compile_failure/integer_literal_overflow/src/main.nr index d89505c0085..e4d21b5c3b9 100644 --- a/test_programs/compile_failure/integer_literal_overflow/src/main.nr +++ b/test_programs/compile_failure/integer_literal_overflow/src/main.nr @@ -2,4 +2,4 @@ fn main() { foo(1234) } -fn foo(_x: u4) {} +fn foo(_x: u8) {} diff --git a/test_programs/compile_failure/option_expect/Nargo.toml b/test_programs/compile_failure/option_expect/Nargo.toml new file mode 100644 index 00000000000..1ee1215ff71 --- /dev/null +++ b/test_programs/compile_failure/option_expect/Nargo.toml @@ -0,0 +1,7 @@ +[package] +name = "option_expect" +type = "bin" +authors = [""] +compiler_version = ">=0.23.0" + +[dependencies] \ No newline at end of file diff --git a/test_programs/compile_failure/option_expect/src/main.nr b/test_programs/compile_failure/option_expect/src/main.nr new file mode 100644 index 00000000000..439ce4f386e --- /dev/null +++ b/test_programs/compile_failure/option_expect/src/main.nr @@ -0,0 +1,8 @@ +fn main() { + let inner_value = 3; + let none = Option::none(); + let some = Option::some(inner_value); + + assert(some.expect(f"Should have the value {inner_value}") == 3); + assert(none.expect(f"Should have the value {inner_value}") == 3); +} diff --git a/test_programs/compile_failure/option_expect_bad_input/Nargo.toml b/test_programs/compile_failure/option_expect_bad_input/Nargo.toml new file mode 100644 index 00000000000..0555681e188 --- /dev/null +++ b/test_programs/compile_failure/option_expect_bad_input/Nargo.toml @@ -0,0 +1,7 @@ +[package] +name = "option_expect_bad_input" +type = "bin" +authors = [""] +compiler_version = ">=0.23.0" + +[dependencies] \ No newline at end of file diff --git a/test_programs/compile_failure/option_expect_bad_input/src/main.nr b/test_programs/compile_failure/option_expect_bad_input/src/main.nr new file mode 100644 index 00000000000..cc93e767975 --- /dev/null +++ b/test_programs/compile_failure/option_expect_bad_input/src/main.nr @@ -0,0 +1,6 @@ +fn main() { + let inner_value = 3; + let some = Option::some(inner_value); + + assert(some.expect("Should have the value {inner_value}") == 3); +} diff --git a/test_programs/compile_failure/restricted_bit_sizes/Nargo.toml b/test_programs/compile_failure/restricted_bit_sizes/Nargo.toml new file mode 100644 index 00000000000..36f8253e8e7 --- /dev/null +++ b/test_programs/compile_failure/restricted_bit_sizes/Nargo.toml @@ -0,0 +1,5 @@ +[package] +name = "restricted_bit_sizes" +type = "bin" +authors = [""] +[dependencies] diff --git a/test_programs/compile_failure/restricted_bit_sizes/src/main.nr b/test_programs/compile_failure/restricted_bit_sizes/src/main.nr new file mode 100644 index 00000000000..01e72bfcfd7 --- /dev/null +++ b/test_programs/compile_failure/restricted_bit_sizes/src/main.nr @@ -0,0 +1,5 @@ +use dep::std::assert_constant; + +fn main() -> pub u63 { + 5 +} diff --git a/test_programs/compile_success_empty/brillig_cast/src/main.nr b/test_programs/compile_success_empty/brillig_cast/src/main.nr index 3ba29b52982..ecb832468ba 100644 --- a/test_programs/compile_success_empty/brillig_cast/src/main.nr +++ b/test_programs/compile_success_empty/brillig_cast/src/main.nr @@ -17,33 +17,25 @@ unconstrained fn bool_casts() { unconstrained fn field_casts() { assert(5 as u8 as Field == 5); - assert(16 as u4 as Field == 0); + assert(256 as u8 as Field == 0); } unconstrained fn uint_casts() { - let x: u32 = 100; - assert(x as u2 == 0); - assert(x as u4 == 4); - assert(x as u6 == 36); - assert(x as u8 == 100); - assert(x as u64 == 100); - assert(x as u126 == 100); + let x: u32 = 300; + assert(x as u8 == 44); + assert(x as u32 == 300); + assert(x as u64 == 300); } unconstrained fn int_casts() { - let x: i32 = 100; - assert(x as i2 == 0); - assert(x as i4 == 4); - assert(x as i6 == -28 as i6); - assert(x as i8 == 100); - assert(x as i8 == 100); - assert(x as i8 == 100); + let x: i32 = 456; + assert(x as i8 == -56 as i8); + assert(x as i64 == 456); } unconstrained fn mixed_casts() { assert(100 as u32 as i32 as u32 == 100); - assert(13 as u4 as i2 as u32 == 1); - assert(15 as u4 as i2 as u32 == 3); + assert(257 as u8 as u32 == 1); assert(1 as u8 as bool == true); assert(true as i8 == 1); } diff --git a/test_programs/compile_success_empty/brillig_modulo/src/main.nr b/test_programs/compile_success_empty/brillig_modulo/src/main.nr index ed0353b101a..195ed31fb08 100644 --- a/test_programs/compile_success_empty/brillig_modulo/src/main.nr +++ b/test_programs/compile_success_empty/brillig_modulo/src/main.nr @@ -7,9 +7,9 @@ fn main() { assert(signed_modulo(5, 3) == 2); assert(signed_modulo(2, 3) == 2); - let minus_two: i4 = -2; // 14 - let minus_three: i4 = -3; // 13 - let minus_five: i4 = -5; // 11 + let minus_two: i8 = -2; // 254 + let minus_three: i8 = -3; // 253 + let minus_five: i8 = -5; // 251 // (5 / -3) * -3 + 2 = -1 * -3 + 2 = 3 + 2 = 5 assert(signed_modulo(5, minus_three) == 2); // (-5 / 3) * 3 - 2 = -1 * 3 - 2 = -3 - 2 = -5 @@ -22,6 +22,6 @@ unconstrained fn modulo(x: u32, y: u32) -> u32 { x % y } -unconstrained fn signed_modulo(x: i4, y: i4) -> i4 { +unconstrained fn signed_modulo(x: i8, y: i8) -> i8 { x % y } diff --git a/test_programs/compile_success_empty/closure_explicit_types/src/main.nr b/test_programs/compile_success_empty/closure_explicit_types/src/main.nr index eec2b90b5b2..b6c8a6b7b3c 100644 --- a/test_programs/compile_success_empty/closure_explicit_types/src/main.nr +++ b/test_programs/compile_success_empty/closure_explicit_types/src/main.nr @@ -7,13 +7,13 @@ fn ret_closure1() -> fn[(Field,)]() -> Field { || x + 10 } // return lamda that captures two things -fn ret_closure2() -> fn[(Field,Field)]() -> Field { +fn ret_closure2() -> fn[(Field, Field)]() -> Field { let x = 20; let y = 10; || x + y + 10 } // return lamda that captures two things with different types -fn ret_closure3() -> fn[(u32,u64)]() -> u64 { +fn ret_closure3() -> fn[(u32, u64)]() -> u64 { let x: u32 = 20; let y: u64 = 10; || x as u64 + y + 10 diff --git a/test_programs/compile_success_empty/comptime_sort/src/main.nr b/test_programs/compile_success_empty/comptime_sort/src/main.nr deleted file mode 100644 index a24a6ebaba6..00000000000 --- a/test_programs/compile_success_empty/comptime_sort/src/main.nr +++ /dev/null @@ -1,7 +0,0 @@ -fn main() { - let unsorted: [u8; 3] = [3, 1, 2]; - let sorted = unsorted.sort(); - assert(sorted[0] == 1); - assert(sorted[1] == 2); - assert(sorted[2] == 3); -} diff --git a/test_programs/compile_success_empty/conditional_regression_579/src/main.nr b/test_programs/compile_success_empty/conditional_regression_579/src/main.nr index a479a7a6fbf..a517f4fdb70 100644 --- a/test_programs/compile_success_empty/conditional_regression_579/src/main.nr +++ b/test_programs/compile_success_empty/conditional_regression_579/src/main.nr @@ -12,9 +12,7 @@ struct MyStruct579 { impl MyStruct579 { fn new(array_param: [u32; 2]) -> MyStruct579 { - MyStruct579 { - array_param: array_param - } + MyStruct579 { array_param } } } diff --git a/test_programs/compile_success_empty/literal_not_simplification/Nargo.toml b/test_programs/compile_success_empty/literal_not_simplification/Nargo.toml new file mode 100644 index 00000000000..63d73ed3c0a --- /dev/null +++ b/test_programs/compile_success_empty/literal_not_simplification/Nargo.toml @@ -0,0 +1,7 @@ +[package] +name = "literal_not_simplification" +type = "bin" +authors = [""] +compiler_version = ">=0.23.0" + +[dependencies] diff --git a/test_programs/compile_success_empty/literal_not_simplification/src/main.nr b/test_programs/compile_success_empty/literal_not_simplification/src/main.nr new file mode 100644 index 00000000000..33198a326c9 --- /dev/null +++ b/test_programs/compile_success_empty/literal_not_simplification/src/main.nr @@ -0,0 +1,8 @@ +fn main() { + let four: u8 = 4; + let not_four: u8 = !four; + + let five: u8 = 5; + let not_five: u8 = !five; + assert(not_four != not_five); +} diff --git a/test_programs/compile_success_empty/method_call_regression/Nargo.toml b/test_programs/compile_success_empty/method_call_regression/Nargo.toml index 92c9b942008..09f95590aad 100644 --- a/test_programs/compile_success_empty/method_call_regression/Nargo.toml +++ b/test_programs/compile_success_empty/method_call_regression/Nargo.toml @@ -1,5 +1,5 @@ [package] -name = "short" +name = "method_call_regression" type = "bin" authors = [""] compiler_version = ">=0.19.4" diff --git a/test_programs/compile_success_empty/option/src/main.nr b/test_programs/compile_success_empty/option/src/main.nr index 1f879bd375f..989c8f65bf4 100644 --- a/test_programs/compile_success_empty/option/src/main.nr +++ b/test_programs/compile_success_empty/option/src/main.nr @@ -1,5 +1,3 @@ -use dep::std::option::Option; - fn main() { let ten = 10; // giving this a name, to ensure that the Option functions work with closures let none = Option::none(); @@ -22,6 +20,8 @@ fn main() { assert(some.map(|x| x * 2).unwrap() == 6); assert(some.map(|x| x * ten).unwrap() == 30); + assert(some.expect(f"Should have a value") == 3); + assert(none.map_or(0, |x| x * 2) == 0); assert(some.map_or(0, |x| x * 2) == 6); assert(none.map_or(0, |x| x * ten) == 0); diff --git a/test_programs/compile_success_empty/reexports/src/main.nr b/test_programs/compile_success_empty/reexports/src/main.nr index bb94b21b221..ed469ff77d0 100644 --- a/test_programs/compile_success_empty/reexports/src/main.nr +++ b/test_programs/compile_success_empty/reexports/src/main.nr @@ -1,8 +1,6 @@ use dep::reexporting_lib::{FooStruct, MyStruct, lib}; fn main() { - let x: FooStruct = MyStruct { - inner: 0 - }; + let x: FooStruct = MyStruct { inner: 0 }; assert(lib::is_struct_zero(x)); } diff --git a/test_programs/compile_success_empty/specialization/src/main.nr b/test_programs/compile_success_empty/specialization/src/main.nr index 9cd32e0f1eb..30116330a86 100644 --- a/test_programs/compile_success_empty/specialization/src/main.nr +++ b/test_programs/compile_success_empty/specialization/src/main.nr @@ -1,11 +1,15 @@ struct Foo {} impl Foo { - fn foo(_self: Self) -> Field { 1 } + fn foo(_self: Self) -> Field { + 1 + } } impl Foo { - fn foo(_self: Self) -> Field { 2 } + fn foo(_self: Self) -> Field { + 2 + } } fn main() { diff --git a/test_programs/compile_success_empty/trait_static_methods/Nargo.toml b/test_programs/compile_success_empty/trait_static_methods/Nargo.toml index 71c541ccd4f..ea30031b9a5 100644 --- a/test_programs/compile_success_empty/trait_static_methods/Nargo.toml +++ b/test_programs/compile_success_empty/trait_static_methods/Nargo.toml @@ -1,5 +1,5 @@ [package] -name = "trait_self" +name = "trait_static_methods" type = "bin" authors = [""] diff --git a/test_programs/execution_success/1327_concrete_in_generic/src/main.nr b/test_programs/execution_success/1327_concrete_in_generic/src/main.nr index e1d601b13c9..3e476107c29 100644 --- a/test_programs/execution_success/1327_concrete_in_generic/src/main.nr +++ b/test_programs/execution_success/1327_concrete_in_generic/src/main.nr @@ -10,31 +10,31 @@ struct B { } impl B { - fn new(new_concrete_t_c_constructor: fn () -> T_C) -> B { - B { new_concrete_t_c_constructor } - } + fn new(new_concrete_t_c_constructor: fn() -> T_C) -> B { + B { new_concrete_t_c_constructor } + } - fn get_t_c(self) -> T_C { - let new_concrete_t_c_constructor = self.new_concrete_t_c_constructor; - new_concrete_t_c_constructor() - } + fn get_t_c(self) -> T_C { + let new_concrete_t_c_constructor = self.new_concrete_t_c_constructor; + new_concrete_t_c_constructor() } +} // --- -// Set +// PrivateSet struct C { t_d_interface: MethodInterface, } impl C { - fn new (t_d_interface: MethodInterface) -> Self { - C { t_d_interface } - } + fn new(t_d_interface: MethodInterface) -> Self { + C { t_d_interface } + } - fn call_method_of_t_d(self, t_d: T_D) -> Field { - let some_method_on_t_d = self.t_d_interface.some_method_on_t_d; - some_method_on_t_d(t_d) - } + fn call_method_of_t_d(self, t_d: T_D) -> Field { + let some_method_on_t_d = self.t_d_interface.some_method_on_t_d; + some_method_on_t_d(t_d) } +} // --- struct MethodInterface { some_method_on_t_d: fn(T_D)->Field, @@ -55,7 +55,7 @@ fn get_d_method_interface() -> MethodInterface { // --- fn main(input: Field) -> pub Field { let b: B> = B::new(new_concrete_c_over_d); - let c: C = b.get_t_c(); // Singleton + let c: C = b.get_t_c(); // PrivateMutable let d: D = D { d: input }; // Note let output = c.call_method_of_t_d(d); diff --git a/test_programs/execution_success/5_over/src/main.nr b/test_programs/execution_success/5_over/src/main.nr index f24ff06cb2a..313d580a8d1 100644 --- a/test_programs/execution_success/5_over/src/main.nr +++ b/test_programs/execution_success/5_over/src/main.nr @@ -5,6 +5,6 @@ fn main(mut x: u32, y: u32) { x = std::wrapping_mul(x,x); assert(y == x); - let c: u3 = 2; - assert(c > x as u3); + let c: u1 = 0; + assert(x as u1 > c); } diff --git a/test_programs/execution_success/array_dynamic/src/main.nr b/test_programs/execution_success/array_dynamic/src/main.nr index dde7bacc455..6b51095bd8c 100644 --- a/test_programs/execution_success/array_dynamic/src/main.nr +++ b/test_programs/execution_success/array_dynamic/src/main.nr @@ -2,8 +2,8 @@ fn main( x: [u32; 5], mut z: u32, t: u32, - index: [Field;5], - index2: [Field;5], + index: [Field; 5], + index2: [Field; 5], offset: Field, sublen: Field ) { diff --git a/test_programs/execution_success/array_dynamic_blackbox_input/Nargo.toml b/test_programs/execution_success/array_dynamic_blackbox_input/Nargo.toml new file mode 100644 index 00000000000..03da304acc3 --- /dev/null +++ b/test_programs/execution_success/array_dynamic_blackbox_input/Nargo.toml @@ -0,0 +1,7 @@ +[package] +name = "array_dynamic_blackbox_input" +type = "bin" +authors = [""] +compiler_version = ">=0.24.0" + +[dependencies] \ No newline at end of file diff --git a/test_programs/execution_success/array_dynamic_blackbox_input/Prover.toml b/test_programs/execution_success/array_dynamic_blackbox_input/Prover.toml new file mode 100644 index 00000000000..cc60eb8a8ba --- /dev/null +++ b/test_programs/execution_success/array_dynamic_blackbox_input/Prover.toml @@ -0,0 +1,4 @@ +index = "1" +leaf = ["51", "109", "224", "175", "60", "42", "79", "222", "117", "255", "174", "79", "126", "242", "74", "34", "100", "35", "20", "200", "109", "89", "191", "219", "41", "10", "118", "217", "165", "224", "215", "109"] +path = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19", "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", "30", "31", "32", "33", "34", "35", "36", "37", "38", "39", "40", "41", "42", "43", "44", "45", "46", "47", "48", "49", "50", "51", "52", "53", "54", "55", "56", "57", "58", "59", "60", "61", "62", "63"] +root = [79, 230, 126, 184, 98, 125, 226, 58, 117, 45, 140, 15, 72, 118, 89, 173, 117, 161, 166, 0, 214, 125, 13, 16, 113, 81, 173, 156, 97, 15, 57, 216] diff --git a/test_programs/execution_success/array_dynamic_blackbox_input/src/main.nr b/test_programs/execution_success/array_dynamic_blackbox_input/src/main.nr new file mode 100644 index 00000000000..4cbf1bd8e6d --- /dev/null +++ b/test_programs/execution_success/array_dynamic_blackbox_input/src/main.nr @@ -0,0 +1,27 @@ +fn main(leaf: [u8; 32], path: [u8; 64], index: u32, root: [u8; 32]) { + compute_root(leaf, path, index, root); +} + +fn compute_root(leaf: [u8; 32], path: [u8; 64], _index: u32, root: [u8; 32]) { + let mut current = leaf; + let mut index = _index; + + for i in 0..2 { + let mut hash_input = [0; 64]; + let offset = i * 32; + let is_right = (index & 1) != 0; + let a = if is_right { 32 } else { 0 }; + let b = if is_right { 0 } else { 32 }; + + for j in 0..32 { + hash_input[j + a] = current[j]; + hash_input[j + b] = path[offset + j]; + } + + current = dep::std::hash::sha256(hash_input); + index = index >> 1; + } + + // Regression for issue #4258 + assert(root == current); +} diff --git a/test_programs/execution_success/array_dynamic_main_output/Nargo.toml b/test_programs/execution_success/array_dynamic_main_output/Nargo.toml new file mode 100644 index 00000000000..b202b7aba0a --- /dev/null +++ b/test_programs/execution_success/array_dynamic_main_output/Nargo.toml @@ -0,0 +1,7 @@ +[package] +name = "array_dynamic_main_output" +type = "bin" +authors = [""] +compiler_version = ">=0.23.0" + +[dependencies] \ No newline at end of file diff --git a/test_programs/execution_success/array_dynamic_main_output/Prover.toml b/test_programs/execution_success/array_dynamic_main_output/Prover.toml new file mode 100644 index 00000000000..3f1d55b9b64 --- /dev/null +++ b/test_programs/execution_success/array_dynamic_main_output/Prover.toml @@ -0,0 +1,2 @@ +index = "5" +x = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"] diff --git a/test_programs/execution_success/array_dynamic_main_output/src/main.nr b/test_programs/execution_success/array_dynamic_main_output/src/main.nr new file mode 100644 index 00000000000..50feb71f983 --- /dev/null +++ b/test_programs/execution_success/array_dynamic_main_output/src/main.nr @@ -0,0 +1,4 @@ +fn main(mut x: [Field; 10], index: u8) -> pub [Field; 10] { + x[index] = 0; + x +} diff --git a/test_programs/execution_success/array_dynamic_nested_blackbox_input/Nargo.toml b/test_programs/execution_success/array_dynamic_nested_blackbox_input/Nargo.toml new file mode 100644 index 00000000000..07d867d433f --- /dev/null +++ b/test_programs/execution_success/array_dynamic_nested_blackbox_input/Nargo.toml @@ -0,0 +1,7 @@ +[package] +name = "array_dynamic_nested_blackbox_input" +type = "bin" +authors = [""] +compiler_version = ">=0.24.0" + +[dependencies] \ No newline at end of file diff --git a/test_programs/execution_success/array_dynamic_nested_blackbox_input/Prover.toml b/test_programs/execution_success/array_dynamic_nested_blackbox_input/Prover.toml new file mode 100644 index 00000000000..1f291532414 --- /dev/null +++ b/test_programs/execution_success/array_dynamic_nested_blackbox_input/Prover.toml @@ -0,0 +1,23 @@ +y = "3" +hash_result = [50, 53, 90, 252, 105, 236, 223, 30, 135, 229, 193, 172, 51, 139, 8, 32, 188, 104, 151, 115, 129, 168, 27, 71, 203, 47, 40, 228, 89, 177, 129, 100] + +[[x]] +a = "1" +b = ["2", "3", "20"] + +[x.bar] +inner = ["100", "101", "102"] + +[[x]] +a = "4" # idx = 3, flattened start idx = 7 +b = ["5", "6", "21"] # idx = 4, flattened start idx = 8 + +[x.bar] +inner = ["103", "104", "105"] # idx = 5, flattened start idx = 11 + +[[x]] +a = "7" +b = ["8", "9", "22"] + +[x.bar] +inner = ["106", "107", "108"] \ No newline at end of file diff --git a/test_programs/execution_success/array_dynamic_nested_blackbox_input/src/main.nr b/test_programs/execution_success/array_dynamic_nested_blackbox_input/src/main.nr new file mode 100644 index 00000000000..8faaf69dfc8 --- /dev/null +++ b/test_programs/execution_success/array_dynamic_nested_blackbox_input/src/main.nr @@ -0,0 +1,20 @@ +struct Bar { + inner: [u8; 3], +} + +struct Foo { + a: Field, + b: [Field; 3], + bar: Bar, +} + +fn main(mut x: [Foo; 3], y: pub Field, hash_result: pub [u8; 32]) { + // Simple dynamic array set for entire inner most array + x[y - 1].bar.inner = [106, 107, 10]; + let mut hash_input = x[y - 1].bar.inner; + // Make sure that we are passing a dynamic array to the black box function call + // by setting the array using a dynamic index here + hash_input[y - 1] = 0; + let hash = dep::std::hash::sha256(hash_input); + assert_eq(hash, hash_result); +} diff --git a/test_programs/execution_success/array_len/src/main.nr b/test_programs/execution_success/array_len/src/main.nr index b60762f4636..f846cfb9844 100644 --- a/test_programs/execution_success/array_len/src/main.nr +++ b/test_programs/execution_success/array_len/src/main.nr @@ -1,12 +1,12 @@ -fn len_plus_1(array: [T; N]) -> Field { +fn len_plus_1(array: [T; N]) -> u64 { array.len() + 1 } -fn add_lens(a: [T; N], b: [Field; M]) -> Field { +fn add_lens(a: [T; N], b: [Field; M]) -> u64 { a.len() + b.len() } -fn nested_call(b: [Field; N]) -> Field { +fn nested_call(b: [Field; N]) -> u64 { len_plus_1(b) } diff --git a/test_programs/execution_success/assert_statement_recursive/Nargo.toml b/test_programs/execution_success/assert_statement_recursive/Nargo.toml new file mode 100644 index 00000000000..2a5b02cad00 --- /dev/null +++ b/test_programs/execution_success/assert_statement_recursive/Nargo.toml @@ -0,0 +1,7 @@ +[package] +name = "assert_statement_recursive" +type = "bin" +authors = [""] +compiler_version = ">=0.23.0" + +[dependencies] \ No newline at end of file diff --git a/test_programs/execution_success/regression_2854/Prover.toml b/test_programs/execution_success/assert_statement_recursive/Prover.toml similarity index 50% rename from test_programs/execution_success/regression_2854/Prover.toml rename to test_programs/execution_success/assert_statement_recursive/Prover.toml index 07890234a19..5d1dc99124f 100644 --- a/test_programs/execution_success/regression_2854/Prover.toml +++ b/test_programs/execution_success/assert_statement_recursive/Prover.toml @@ -1 +1,2 @@ x = "3" +y = "3" diff --git a/test_programs/execution_success/assert_statement_recursive/src/main.nr b/test_programs/execution_success/assert_statement_recursive/src/main.nr new file mode 100644 index 00000000000..d89ea3d35bb --- /dev/null +++ b/test_programs/execution_success/assert_statement_recursive/src/main.nr @@ -0,0 +1,11 @@ +// Tests a very simple program. +// +// The features being tested is assertion +// This is the same as the `assert_statement` test except we specify +// that the backend should use a prover which will construct proofs +// friendly to recursive verification in another SNARK. +#[recursive] +fn main(x: Field, y: pub Field) { + assert(x == y, "x and y are not equal"); + assert_eq(x, y, "x and y are not equal"); +} diff --git a/test_programs/compile_success_empty/comptime_sort/Nargo.toml b/test_programs/execution_success/bigint/Nargo.toml similarity index 69% rename from test_programs/compile_success_empty/comptime_sort/Nargo.toml rename to test_programs/execution_success/bigint/Nargo.toml index 7d215a22496..eee0920f188 100644 --- a/test_programs/compile_success_empty/comptime_sort/Nargo.toml +++ b/test_programs/execution_success/bigint/Nargo.toml @@ -1,5 +1,6 @@ [package] -name = "comptime_sort" +name = "bigint" type = "bin" authors = [""] + [dependencies] diff --git a/test_programs/execution_success/bigint/Prover.toml b/test_programs/execution_success/bigint/Prover.toml new file mode 100644 index 00000000000..c50874a8613 --- /dev/null +++ b/test_programs/execution_success/bigint/Prover.toml @@ -0,0 +1,2 @@ +x = [34,3,5,8,4] +y = [44,7,1,8,8] \ No newline at end of file diff --git a/test_programs/execution_success/bigint/src/main.nr b/test_programs/execution_success/bigint/src/main.nr new file mode 100644 index 00000000000..b93fec370e5 --- /dev/null +++ b/test_programs/execution_success/bigint/src/main.nr @@ -0,0 +1,16 @@ +use dep::std::bigint; + +fn main(mut x: [u8; 5], y: [u8; 5]) { + let a = bigint::Secpk1Fq::from_le_bytes([x[0], x[1], x[2], x[3], x[4]]); + let b = bigint::Secpk1Fq::from_le_bytes([y[0], y[1], y[2], y[3], y[4]]); + let a_bytes = a.to_le_bytes(); + let b_bytes = b.to_le_bytes(); + for i in 0..5 { + assert(a_bytes[i] == x[i]); + assert(b_bytes[i] == y[i]); + } + + let d = a * b - b; + let d1 = bigint::Secpk1Fq::from_le_bytes(597243850900842442924.to_le_bytes(10)); + assert(d1 == d); +} diff --git a/test_programs/execution_success/bit_not/Nargo.toml b/test_programs/execution_success/bit_not/Nargo.toml new file mode 100644 index 00000000000..e89a338595b --- /dev/null +++ b/test_programs/execution_success/bit_not/Nargo.toml @@ -0,0 +1,7 @@ +[package] +name = "bit_not" +type = "bin" +authors = [""] +compiler_version = ">=0.23.0" + +[dependencies] diff --git a/test_programs/execution_success/bit_not/Prover.toml b/test_programs/execution_success/bit_not/Prover.toml new file mode 100644 index 00000000000..b4bcbcec177 --- /dev/null +++ b/test_programs/execution_success/bit_not/Prover.toml @@ -0,0 +1 @@ +four_as_u32 = 4 diff --git a/test_programs/execution_success/bit_not/src/main.nr b/test_programs/execution_success/bit_not/src/main.nr new file mode 100644 index 00000000000..30b78d330ce --- /dev/null +++ b/test_programs/execution_success/bit_not/src/main.nr @@ -0,0 +1,8 @@ +fn main(four_as_u32: u32) { + let four_as_u8: u8 = 4; + let not_four_as_u8: u8 = !four_as_u8; + assert_eq(not_four_as_u8, 251); + + let not_four_as_u32: u32 = !four_as_u32; + assert_eq(not_four_as_u32, 4294967291); +} diff --git a/test_programs/execution_success/bit_shifts_comptime/src/main.nr b/test_programs/execution_success/bit_shifts_comptime/src/main.nr index 9bb1028173d..9184b5bd5e6 100644 --- a/test_programs/execution_success/bit_shifts_comptime/src/main.nr +++ b/test_programs/execution_success/bit_shifts_comptime/src/main.nr @@ -14,7 +14,7 @@ fn main(x: u64) { //regression for 3481 assert(x << 63 == 0); - assert_eq((1 as u56) << (32 as u56), 0x0100000000); + assert_eq((1 as u64) << (32 as u64), 0x0100000000); } fn regression_2250() { diff --git a/test_programs/execution_success/bit_shifts_runtime/src/main.nr b/test_programs/execution_success/bit_shifts_runtime/src/main.nr index 33d68765598..28b3ef656c1 100644 --- a/test_programs/execution_success/bit_shifts_runtime/src/main.nr +++ b/test_programs/execution_success/bit_shifts_runtime/src/main.nr @@ -16,4 +16,5 @@ fn main(x: u64, y: u64) { assert(a << 7 == -128); assert(a << -a == -2); + assert(x >> x == 0); } diff --git a/test_programs/execution_success/brillig_assert/src/main.nr b/test_programs/execution_success/brillig_assert/src/main.nr index 91e4cebd9d3..16fe7b29061 100644 --- a/test_programs/execution_success/brillig_assert/src/main.nr +++ b/test_programs/execution_success/brillig_assert/src/main.nr @@ -6,7 +6,7 @@ fn main(x: Field) { } unconstrained fn conditional(x: bool) -> Field { - assert(x, "x is false"); - assert_eq(x, true, "x is false"); + assert(x, f"Expected x to be false but got {x}"); + assert_eq(x, true, f"Expected x to be false but got {x}"); 1 } diff --git a/test_programs/execution_success/brillig_bit_shifts_runtime/Nargo.toml b/test_programs/execution_success/brillig_bit_shifts_runtime/Nargo.toml new file mode 100644 index 00000000000..ed8200d8a95 --- /dev/null +++ b/test_programs/execution_success/brillig_bit_shifts_runtime/Nargo.toml @@ -0,0 +1,6 @@ +[package] +name = "brillig_bit_shifts_runtime" +type = "bin" +authors = [""] + +[dependencies] diff --git a/test_programs/execution_success/brillig_bit_shifts_runtime/Prover.toml b/test_programs/execution_success/brillig_bit_shifts_runtime/Prover.toml new file mode 100644 index 00000000000..98d8630792e --- /dev/null +++ b/test_programs/execution_success/brillig_bit_shifts_runtime/Prover.toml @@ -0,0 +1,2 @@ +x = 64 +y = 1 \ No newline at end of file diff --git a/test_programs/execution_success/brillig_bit_shifts_runtime/src/main.nr b/test_programs/execution_success/brillig_bit_shifts_runtime/src/main.nr new file mode 100644 index 00000000000..f22166b5993 --- /dev/null +++ b/test_programs/execution_success/brillig_bit_shifts_runtime/src/main.nr @@ -0,0 +1,20 @@ +unconstrained fn main(x: u64, y: u64) { + // runtime shifts on compile-time known values + assert(64 << y == 128); + assert(64 >> y == 32); + // runtime shifts on runtime values + assert(x << y == 128); + assert(x >> y == 32); + + // Bit-shift with signed integers + let mut a :i8 = y as i8; + let mut b: i8 = x as i8; + assert(b << 1 == -128); + assert(b >> 2 == 16); + assert(b >> a == 32); + a = -a; + assert(a << 7 == -128); + assert(a << -a == -2); + + assert(x >> x == 0); +} diff --git a/test_programs/execution_success/brillig_cow/src/main.nr b/test_programs/execution_success/brillig_cow/src/main.nr index 7d847e085fe..52ce8b8be3c 100644 --- a/test_programs/execution_success/brillig_cow/src/main.nr +++ b/test_programs/execution_success/brillig_cow/src/main.nr @@ -10,42 +10,37 @@ struct ExecutionResult { impl ExecutionResult { fn is_equal(self, other: ExecutionResult) -> bool { - (self.original == other.original) & - (self.modified_once == other.modified_once) & - (self.modified_twice == other.modified_twice) + (self.original == other.original) + & (self.modified_once == other.modified_once) + & (self.modified_twice == other.modified_twice) } } fn modify_in_inlined_constrained(original: [Field; ARRAY_SIZE], index: u64) -> ExecutionResult { let mut modified = original; - + modified[index] = 27; let modified_once = modified; modified[index+1] = 27; - ExecutionResult { - original, - modified_once, - modified_twice: modified - } + ExecutionResult { original, modified_once, modified_twice: modified } } -unconstrained fn modify_in_unconstrained(original: [Field; ARRAY_SIZE], index: u64) -> ExecutionResult { +unconstrained fn modify_in_unconstrained( + original: [Field; ARRAY_SIZE], + index: u64 +) -> ExecutionResult { let mut modified = original; - + modified[index] = 27; let modified_once = modified; modified[index+1] = 27; - ExecutionResult { - original, - modified_once, - modified_twice: modified - } + ExecutionResult { original, modified_once, modified_twice: modified } } unconstrained fn main(original: [Field; ARRAY_SIZE], index: u64, expected_result: ExecutionResult) { diff --git a/test_programs/execution_success/brillig_cow_regression/Nargo.toml b/test_programs/execution_success/brillig_cow_regression/Nargo.toml new file mode 100644 index 00000000000..c5bf60a1e78 --- /dev/null +++ b/test_programs/execution_success/brillig_cow_regression/Nargo.toml @@ -0,0 +1,6 @@ +[package] +name = "brillig_cow_regression" +type = "bin" +authors = [""] + +[dependencies] diff --git a/test_programs/execution_success/brillig_cow_regression/Prover.toml b/test_programs/execution_success/brillig_cow_regression/Prover.toml new file mode 100644 index 00000000000..44813823448 --- /dev/null +++ b/test_programs/execution_success/brillig_cow_regression/Prover.toml @@ -0,0 +1,229 @@ +[kernel_data] +encrypted_logs_hash = [ + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", +] +new_note_hashes = [ + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", +] +new_l2_to_l1_msgs = [ + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", +] +new_nullifiers = [ + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", +] +unencrypted_logs_hash = [ + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", +] + +[[kernel_data.new_contracts]] +contract_address = "0x0000000000000000000000000000000000000000000000000000000000000000" +portal_contract_address = "0x0000000000000000000000000000000000000000000000000000000000000000" + +[[kernel_data.public_data_update_requests]] +leaf_slot = "0x0000000000000000000000000000000000000000000000000000000000000000" +new_value = "0x0000000000000000000000000000000000000000000000000000000000000000" +old_value = "0x0000000000000000000000000000000000000000000000000000000000000000" + +[[kernel_data.public_data_update_requests]] +leaf_slot = "0x0000000000000000000000000000000000000000000000000000000000000000" +new_value = "0x0000000000000000000000000000000000000000000000000000000000000000" +old_value = "0x0000000000000000000000000000000000000000000000000000000000000000" + +[[kernel_data.public_data_update_requests]] +leaf_slot = "0x0000000000000000000000000000000000000000000000000000000000000000" +new_value = "0x0000000000000000000000000000000000000000000000000000000000000000" +old_value = "0x0000000000000000000000000000000000000000000000000000000000000000" + +[[kernel_data.public_data_update_requests]] +leaf_slot = "0x0000000000000000000000000000000000000000000000000000000000000000" +new_value = "0x0000000000000000000000000000000000000000000000000000000000000000" +old_value = "0x0000000000000000000000000000000000000000000000000000000000000000" + +[[kernel_data.public_data_update_requests]] +leaf_slot = "0x0000000000000000000000000000000000000000000000000000000000000000" +new_value = "0x0000000000000000000000000000000000000000000000000000000000000000" +old_value = "0x0000000000000000000000000000000000000000000000000000000000000000" + +[[kernel_data.public_data_update_requests]] +leaf_slot = "0x0000000000000000000000000000000000000000000000000000000000000000" +new_value = "0x0000000000000000000000000000000000000000000000000000000000000000" +old_value = "0x0000000000000000000000000000000000000000000000000000000000000000" + +[[kernel_data.public_data_update_requests]] +leaf_slot = "0x0000000000000000000000000000000000000000000000000000000000000000" +new_value = "0x0000000000000000000000000000000000000000000000000000000000000000" +old_value = "0x0000000000000000000000000000000000000000000000000000000000000000" + +[[kernel_data.public_data_update_requests]] +leaf_slot = "0x0000000000000000000000000000000000000000000000000000000000000000" +new_value = "0x0000000000000000000000000000000000000000000000000000000000000000" +old_value = "0x0000000000000000000000000000000000000000000000000000000000000000" + +[[kernel_data.public_data_update_requests]] +leaf_slot = "0x0000000000000000000000000000000000000000000000000000000000000000" +new_value = "0x0000000000000000000000000000000000000000000000000000000000000000" +old_value = "0x0000000000000000000000000000000000000000000000000000000000000000" + +[[kernel_data.public_data_update_requests]] +leaf_slot = "0x0000000000000000000000000000000000000000000000000000000000000000" +new_value = "0x0000000000000000000000000000000000000000000000000000000000000000" +old_value = "0x0000000000000000000000000000000000000000000000000000000000000000" + +[[kernel_data.public_data_update_requests]] +leaf_slot = "0x0000000000000000000000000000000000000000000000000000000000000000" +new_value = "0x0000000000000000000000000000000000000000000000000000000000000000" +old_value = "0x0000000000000000000000000000000000000000000000000000000000000000" + +[[kernel_data.public_data_update_requests]] +leaf_slot = "0x0000000000000000000000000000000000000000000000000000000000000000" +new_value = "0x0000000000000000000000000000000000000000000000000000000000000000" +old_value = "0x0000000000000000000000000000000000000000000000000000000000000000" + +[[kernel_data.public_data_update_requests]] +leaf_slot = "0x0000000000000000000000000000000000000000000000000000000000000000" +new_value = "0x0000000000000000000000000000000000000000000000000000000000000000" +old_value = "0x0000000000000000000000000000000000000000000000000000000000000000" + +[[kernel_data.public_data_update_requests]] +leaf_slot = "0x0000000000000000000000000000000000000000000000000000000000000000" +new_value = "0x0000000000000000000000000000000000000000000000000000000000000000" +old_value = "0x0000000000000000000000000000000000000000000000000000000000000000" + +[[kernel_data.public_data_update_requests]] +leaf_slot = "0x0000000000000000000000000000000000000000000000000000000000000000" +new_value = "0x0000000000000000000000000000000000000000000000000000000000000000" +old_value = "0x0000000000000000000000000000000000000000000000000000000000000000" + +[[kernel_data.public_data_update_requests]] +leaf_slot = "0x0000000000000000000000000000000000000000000000000000000000000000" +new_value = "0x0000000000000000000000000000000000000000000000000000000000000000" +old_value = "0x0000000000000000000000000000000000000000000000000000000000000000" diff --git a/test_programs/execution_success/brillig_cow_regression/src/main.nr b/test_programs/execution_success/brillig_cow_regression/src/main.nr new file mode 100644 index 00000000000..ba51548d9dd --- /dev/null +++ b/test_programs/execution_success/brillig_cow_regression/src/main.nr @@ -0,0 +1,178 @@ +// Tests a performance regression found in aztec-packages with brillig cow optimization + +global MAX_NEW_NOTE_HASHES_PER_TX: u64 = 64; +global MAX_NEW_NULLIFIERS_PER_TX: u64 = 64; +global MAX_NEW_L2_TO_L1_MSGS_PER_TX: u64 = 2; +global MAX_PUBLIC_DATA_UPDATE_REQUESTS_PER_TX: u64 = 16; +global MAX_NEW_CONTRACTS_PER_TX: u64 = 1; +global NUM_ENCRYPTED_LOGS_HASHES_PER_TX: u64 = 1; +global NUM_UNENCRYPTED_LOGS_HASHES_PER_TX: u64 = 1; +global NUM_FIELDS_PER_SHA256 = 2; +global CALLDATA_HASH_INPUT_SIZE = 169; +global CALL_DATA_HASH_LOG_FIELDS = 4; +global CALL_DATA_HASH_FULL_FIELDS = 165; + +struct PublicDataUpdateRequest { + leaf_slot : Field, + old_value : Field, + new_value : Field +} + +struct NewContractData { + contract_address: Field, + portal_contract_address: Field, +} + +impl NewContractData { + fn hash(self) -> Field { + dep::std::hash::pedersen_hash([self.contract_address, self.portal_contract_address]) + } +} + +struct DataToHash { + new_note_hashes: [Field; MAX_NEW_NOTE_HASHES_PER_TX], + new_nullifiers: [Field; MAX_NEW_NULLIFIERS_PER_TX], + public_data_update_requests: [PublicDataUpdateRequest; MAX_PUBLIC_DATA_UPDATE_REQUESTS_PER_TX], + new_l2_to_l1_msgs: [Field; MAX_NEW_L2_TO_L1_MSGS_PER_TX], + encrypted_logs_hash: [Field; NUM_FIELDS_PER_SHA256], + unencrypted_logs_hash: [Field; NUM_FIELDS_PER_SHA256], + new_contracts: [NewContractData; MAX_NEW_CONTRACTS_PER_TX], +} + +struct U256 { + // This is in big-endian order, typically because + // sha256 is usually in big endian order. + // Note: this means that inner[0] has the most significant 64 bits. + inner : [u64; 4] +} + +impl U256 { + pub fn from_bytes32(bytes: [u8; 32]) -> U256 { + // We use addition rather than a bitwise OR as the bitshifts ensure that none of the bytes overlap each other. + let high_0 = ((bytes[0] as u64) << 56) + + ((bytes[1] as u64) << 48) + + ((bytes[2] as u64) << 40) + + ((bytes[3] as u64) << 32) + + ((bytes[4] as u64) << 24) + + ((bytes[5] as u64) << 16) + + ((bytes[6] as u64) << 8) + + (bytes[7] as u64); + + let high_1 = ((bytes[8] as u64) << 56) + + ((bytes[9] as u64) << 48) + + ((bytes[10] as u64) << 40) + + ((bytes[11] as u64) << 32) + + ((bytes[12] as u64) << 24) + + ((bytes[13] as u64) << 16) + + ((bytes[14] as u64) << 8) + + (bytes[15] as u64); + + let low_0 = ((bytes[16] as u64) << 56) + + ((bytes[17] as u64) << 48) + + ((bytes[18] as u64) << 40) + + ((bytes[19] as u64) << 32) + + ((bytes[20] as u64) << 24) + + ((bytes[21] as u64) << 16) + + ((bytes[22] as u64) << 8) + + (bytes[23] as u64); + + let low_1 = ((bytes[24] as u64) << 56) + + ((bytes[25] as u64) << 48) + + ((bytes[26] as u64) << 40) + + ((bytes[27] as u64) << 32) + + ((bytes[28] as u64) << 24) + + ((bytes[29] as u64) << 16) + + ((bytes[30] as u64) << 8) + + (bytes[31] as u64); + + U256 { inner: [high_0, high_1, low_0, low_1] } + } + + pub fn to_u128_limbs(self) -> [Field; 2] { + let two_pow_64 = 2.pow_32(64); + + let high = (self.inner[0] as Field) * two_pow_64 + self.inner[1] as Field; + let low = (self.inner[2] as Field) * two_pow_64 + self.inner[3] as Field; + + [high, low] + } +} + +unconstrained fn main(kernel_data: DataToHash) -> pub [Field; NUM_FIELDS_PER_SHA256] { + let mut calldata_hash_inputs = [0; CALLDATA_HASH_INPUT_SIZE]; + + let new_note_hashes = kernel_data.new_note_hashes; + let new_nullifiers = kernel_data.new_nullifiers; + let public_data_update_requests = kernel_data.public_data_update_requests; + let newL2ToL1msgs = kernel_data.new_l2_to_l1_msgs; + let encryptedLogsHash = kernel_data.encrypted_logs_hash; + let unencryptedLogsHash = kernel_data.unencrypted_logs_hash; + + let mut offset = 0; + + for j in 0..MAX_NEW_NOTE_HASHES_PER_TX { + calldata_hash_inputs[offset + j] = new_note_hashes[j]; + } + offset += MAX_NEW_NOTE_HASHES_PER_TX ; + + for j in 0..MAX_NEW_NULLIFIERS_PER_TX { + calldata_hash_inputs[offset + j] = new_nullifiers[j]; + } + offset += MAX_NEW_NULLIFIERS_PER_TX ; + + for j in 0..MAX_PUBLIC_DATA_UPDATE_REQUESTS_PER_TX { + calldata_hash_inputs[offset + j * 2] = + public_data_update_requests[j].leaf_slot; + calldata_hash_inputs[offset + j * 2 + 1] = + public_data_update_requests[j].new_value; + } + offset += MAX_PUBLIC_DATA_UPDATE_REQUESTS_PER_TX * 2; + + for j in 0..MAX_NEW_L2_TO_L1_MSGS_PER_TX { + calldata_hash_inputs[offset + j] = newL2ToL1msgs[j]; + } + offset += MAX_NEW_L2_TO_L1_MSGS_PER_TX; + + let contract_leaf = kernel_data.new_contracts[0]; + calldata_hash_inputs[offset] = contract_leaf.hash(); + + offset += MAX_NEW_CONTRACTS_PER_TX; + + let new_contracts = kernel_data.new_contracts; + calldata_hash_inputs[offset] = new_contracts[0].contract_address; + + calldata_hash_inputs[offset + 1] = new_contracts[0].portal_contract_address; + + offset += MAX_NEW_CONTRACTS_PER_TX * 2; + + for j in 0..NUM_FIELDS_PER_SHA256 { + calldata_hash_inputs[offset + j] = encryptedLogsHash[j]; + } + + offset += NUM_ENCRYPTED_LOGS_HASHES_PER_TX * NUM_FIELDS_PER_SHA256; + + for j in 0..NUM_FIELDS_PER_SHA256 { + calldata_hash_inputs[offset + j] = unencryptedLogsHash[j]; + } + + offset += NUM_UNENCRYPTED_LOGS_HASHES_PER_TX * NUM_FIELDS_PER_SHA256; + assert_eq(offset, CALLDATA_HASH_INPUT_SIZE); // Sanity check + + let mut hash_input_flattened = [0; CALL_DATA_HASH_FULL_FIELDS * 32 + CALL_DATA_HASH_LOG_FIELDS * 16]; + for offset in 0..CALL_DATA_HASH_FULL_FIELDS { + let input_as_bytes = calldata_hash_inputs[offset].to_be_bytes(32); + for byte_index in 0..32 { + hash_input_flattened[offset * 32 + byte_index] = input_as_bytes[byte_index]; + } + } + + for log_field_index in 0..CALL_DATA_HASH_LOG_FIELDS { + let input_as_bytes = calldata_hash_inputs[CALL_DATA_HASH_FULL_FIELDS + log_field_index].to_be_bytes(16); + for byte_index in 0..16 { + hash_input_flattened[CALL_DATA_HASH_FULL_FIELDS * 32 + log_field_index * 16 + byte_index] = input_as_bytes[byte_index]; + } + } + + let sha_digest = dep::std::hash::sha256(hash_input_flattened); + U256::from_bytes32(sha_digest).to_u128_limbs() +} diff --git a/test_programs/execution_success/brillig_fns_as_values/Prover.toml b/test_programs/execution_success/brillig_fns_as_values/Prover.toml index 11497a473bc..4dd6b405159 100644 --- a/test_programs/execution_success/brillig_fns_as_values/Prover.toml +++ b/test_programs/execution_success/brillig_fns_as_values/Prover.toml @@ -1 +1 @@ -x = "0" +x = "1" diff --git a/test_programs/execution_success/brillig_fns_as_values/src/main.nr b/test_programs/execution_success/brillig_fns_as_values/src/main.nr index 2f5d14583d5..9248bff2f4c 100644 --- a/test_programs/execution_success/brillig_fns_as_values/src/main.nr +++ b/test_programs/execution_success/brillig_fns_as_values/src/main.nr @@ -7,14 +7,14 @@ struct MyStruct { fn main(x: u32) { assert(wrapper(increment, x) == x + 1); assert(wrapper(increment_acir, x) == x + 1); - assert(wrapper(decrement, x) == std::wrapping_sub(x, 1)); + assert(wrapper(decrement, x) == x - 1); assert(wrapper_with_struct(MyStruct { operation: increment }, x) == x + 1); - assert(wrapper_with_struct(MyStruct { operation: decrement }, x) == std::wrapping_sub(x, 1)); + assert(wrapper_with_struct(MyStruct { operation: decrement }, x) == x - 1); // https://github.com/noir-lang/noir/issues/1975 assert(increment(x) == x + 1); } -unconstrained fn wrapper(func: fn (u32) -> u32, param: u32) -> u32 { +unconstrained fn wrapper(func: fn(u32) -> u32, param: u32) -> u32 { func(param) } diff --git a/test_programs/execution_success/brillig_nested_arrays/src/main.nr b/test_programs/execution_success/brillig_nested_arrays/src/main.nr index d0a60ac0a58..5a5657246a8 100644 --- a/test_programs/execution_success/brillig_nested_arrays/src/main.nr +++ b/test_programs/execution_success/brillig_nested_arrays/src/main.nr @@ -12,11 +12,19 @@ unconstrained fn access_nested(notes: [MyNote; 2], x: Field, y: Field) -> Field notes[x].array[y] + notes[y].array[x] + notes[x].plain + notes[y].header.params[x] } -unconstrained fn create_inside_brillig(x: Field, y: Field) { +unconstrained fn create_inside_brillig() -> [MyNote; 2] { let header = Header { params: [1, 2, 3] }; let note0 = MyNote { array: [1, 2], plain: 3, header }; let note1 = MyNote { array: [4, 5], plain: 6, header }; - assert(access_nested([note0, note1], x, y) == (2 + 4 + 3 + 1)); + [note0, note1] +} + +unconstrained fn assert_inside_brillig(notes: [MyNote; 2], x: Field, y: Field) { + assert(access_nested(notes, x, y) == (2 + 4 + 3 + 1)); +} + +unconstrained fn create_and_assert_inside_brillig(x: Field, y: Field) { + assert_inside_brillig(create_inside_brillig(), x, y); } fn main(x: Field, y: Field) { @@ -24,7 +32,10 @@ fn main(x: Field, y: Field) { let note0 = MyNote { array: [1, 2], plain: 3, header }; let note1 = MyNote { array: [4, 5], plain: 6, header }; - create_inside_brillig(x, y); assert(access_nested([note0, note1], x, y) == (2 + 4 + 3 + 1)); + + let notes = create_inside_brillig(); + assert_inside_brillig(notes, x, y); + create_and_assert_inside_brillig(x, y); } diff --git a/test_programs/execution_success/brillig_oracle/Prover.toml b/test_programs/execution_success/brillig_oracle/Prover.toml index 2b26a4ce471..161f4fb62c0 100644 --- a/test_programs/execution_success/brillig_oracle/Prover.toml +++ b/test_programs/execution_success/brillig_oracle/Prover.toml @@ -1,2 +1,2 @@ -x = "10" +_x = "10" diff --git a/test_programs/execution_success/brillig_oracle/src/main.nr b/test_programs/execution_success/brillig_oracle/src/main.nr index 490b7b605e3..6a9e5806621 100644 --- a/test_programs/execution_success/brillig_oracle/src/main.nr +++ b/test_programs/execution_success/brillig_oracle/src/main.nr @@ -2,7 +2,7 @@ use dep::std::slice; use dep::std::test::OracleMock; // Tests oracle usage in brillig/unconstrained functions -fn main(x: Field) { +fn main(_x: Field) { let size = 20; // TODO: Add a method along the lines of `(0..size).to_array()`. let mut mock_oracle_response = [0; 20]; @@ -17,7 +17,7 @@ fn main(x: Field) { let _ = OracleMock::mock("get_number_sequence").with_params(size).returns((20, mock_oracle_response)); let _ = OracleMock::mock("get_reverse_number_sequence").with_params(size).returns((20, reversed_mock_oracle_response)); - get_number_sequence_wrapper(size); + get_number_sequence_wrapper(size as Field); } // Define oracle functions which we have mocked above diff --git a/test_programs/execution_success/brillig_scalar_mul/src/main.nr b/test_programs/execution_success/brillig_scalar_mul/src/main.nr index ab2f79eb815..c7c3a85a4ff 100644 --- a/test_programs/execution_success/brillig_scalar_mul/src/main.nr +++ b/test_programs/execution_success/brillig_scalar_mul/src/main.nr @@ -20,4 +20,13 @@ unconstrained fn main( let res = std::scalar_mul::fixed_base_embedded_curve(priv_key, 0); assert(res[0] == pub_x); assert(res[1] == pub_y); + + let pub_point= std::scalar_mul::EmbeddedCurvePoint { x: pub_x, y: pub_y }; + let g1_y = 17631683881184975370165255887551781615748388533673675138860; + let g1= std::scalar_mul::EmbeddedCurvePoint { x: 1, y: g1_y }; + + let res = pub_point.double(); + let double = g1.add(g1); + + assert(double.x == res.x); } diff --git a/test_programs/execution_success/brillig_slices/src/main.nr b/test_programs/execution_success/brillig_slices/src/main.nr index 48bc8a76bb8..847c41de25c 100644 --- a/test_programs/execution_success/brillig_slices/src/main.nr +++ b/test_programs/execution_success/brillig_slices/src/main.nr @@ -131,7 +131,7 @@ unconstrained fn merge_slices_mutate_in_loop(x: Field, y: Field) -> [Field] { let mut slice = [0; 2]; if x != y { for i in 0..5 { - slice = slice.push_back(i); + slice = slice.push_back(i as Field); } } else { slice = slice.push_back(x); diff --git a/test_programs/execution_success/regression_2854/Nargo.toml b/test_programs/execution_success/brillig_wrapping/Nargo.toml similarity index 67% rename from test_programs/execution_success/regression_2854/Nargo.toml rename to test_programs/execution_success/brillig_wrapping/Nargo.toml index fb2b3c42fdd..a52246ba908 100644 --- a/test_programs/execution_success/regression_2854/Nargo.toml +++ b/test_programs/execution_success/brillig_wrapping/Nargo.toml @@ -1,5 +1,5 @@ [package] -name = "regression_2854" +name = "brillig_wrapping" type = "bin" authors = [""] diff --git a/test_programs/execution_success/brillig_wrapping/Prover.toml b/test_programs/execution_success/brillig_wrapping/Prover.toml new file mode 100644 index 00000000000..346fd2764a7 --- /dev/null +++ b/test_programs/execution_success/brillig_wrapping/Prover.toml @@ -0,0 +1,2 @@ +x = 0 +y = 255 diff --git a/test_programs/execution_success/brillig_wrapping/src/main.nr b/test_programs/execution_success/brillig_wrapping/src/main.nr new file mode 100644 index 00000000000..4153a466057 --- /dev/null +++ b/test_programs/execution_success/brillig_wrapping/src/main.nr @@ -0,0 +1,8 @@ +use dep::std; + +unconstrained fn main(x: u8, y: u8) { + assert(std::wrapping_sub(x, 1) == y); + assert(std::wrapping_add(y, 1) == x); + assert(std::wrapping_mul(y, y) == 1); +} + diff --git a/test_programs/execution_success/conditional_regression_661/src/main.nr b/test_programs/execution_success/conditional_regression_661/src/main.nr index 03102eb775e..26521a88358 100644 --- a/test_programs/execution_success/conditional_regression_661/src/main.nr +++ b/test_programs/execution_success/conditional_regression_661/src/main.nr @@ -16,11 +16,11 @@ fn test5(a: u32) { } } -fn issue_661_foo(array: [u32;4], b: u32) -> [u32;1] { +fn issue_661_foo(array: [u32; 4], b: u32) -> [u32; 1] { [array[0] + b] } -fn issue_661_bar(a: [u32;4]) -> [u32;4] { +fn issue_661_bar(a: [u32; 4]) -> [u32; 4] { let mut b: [u32; 4] = [0; 4]; b[0]=a[0]+1; b diff --git a/test_programs/execution_success/conditional_regression_underflow/src/main.nr b/test_programs/execution_success/conditional_regression_underflow/src/main.nr index a101af32505..aaf3754a20f 100644 --- a/test_programs/execution_success/conditional_regression_underflow/src/main.nr +++ b/test_programs/execution_success/conditional_regression_underflow/src/main.nr @@ -1,12 +1,12 @@ // Regression test for https://github.com/noir-lang/noir/issues/3493 -fn main(x: u4) { +fn main(x: u8) { if x == 10 { - x + 15; + x + 255; } if x == 9 { - x << 3; + x << 7; } - if x == 8 { + if x == 128 { x * 3; } if x == 7 { diff --git a/test_programs/execution_success/databus/src/main.nr b/test_programs/execution_success/databus/src/main.nr index 61a9637f5fe..1cf95be8a22 100644 --- a/test_programs/execution_success/databus/src/main.nr +++ b/test_programs/execution_success/databus/src/main.nr @@ -1,12 +1,12 @@ use dep::std; -fn main(mut x: u32, y: call_data u32, z: call_data [u32;4]) -> return_data u32 { - let a = z[x]; - a+foo(y) +fn main(mut x: u32, y: call_data u32, z: call_data [u32; 4]) -> return_data u32 { + let a = z[x]; + a + foo(y) } // Use an unconstrained function to force the compiler to avoid inlining unconstrained fn foo(x: u32) -> u32 { - x+1 + x + 1 } diff --git a/test_programs/execution_success/debug_logs/src/main.nr b/test_programs/execution_success/debug_logs/src/main.nr index 52c910065c1..ec24b0cc8e8 100644 --- a/test_programs/execution_success/debug_logs/src/main.nr +++ b/test_programs/execution_success/debug_logs/src/main.nr @@ -1,64 +1,76 @@ -use dep::std; - fn main(x: Field, y: pub Field) { let string = "i: {i}, j: {j}"; - std::println(string); + println(string); + + // TODO: fmtstr cannot be printed + // let fmt_str: fmtstr<14, (Field, Field)> = f"i: {x}, j: {y}"; + // let fmt_fmt_str = f"fmtstr: {fmt_str}, i: {x}"; + // println(fmt_fmt_str); + // A `fmtstr` lets you easily perform string interpolation. let fmt_str: fmtstr<14, (Field, Field)> = f"i: {x}, j: {y}"; + let fmt_str = string_identity(fmt_str); - std::println(fmt_str); + println(fmt_str); let fmt_str_no_type = f"i: {x}, j: {y}"; - std::println(fmt_str_no_type); + println(fmt_str_no_type); let fmt_str_generic = string_with_generics(fmt_str_no_type); - std::println(fmt_str_generic); + println(fmt_str_generic); let s = myStruct { y: x, x: y }; - std::println(s); + println(s); - std::println(f"randomstring{x}{x}"); + println(f"randomstring{x}{x}"); let fmt_str = string_with_partial_generics(f"i: {x}, s: {s}"); - std::println(fmt_str); + println(fmt_str); - std::println(x); - std::println([x, y]); + println(x); + println([x, y]); let foo = fooStruct { my_struct: s, foo: 15 }; - std::println(f"s: {s}, foo: {foo}"); + println(f"s: {s}, foo: {foo}"); - std::println(f"x: 0, y: 1"); + println(f"x: 0, y: 1"); let s_2 = myStruct { x: 20, y: 30 }; - std::println(f"s1: {s}, s2: {s_2}"); + println(f"s1: {s}, s2: {s_2}"); let bar = fooStruct { my_struct: s_2, foo: 20 }; - std::println(f"foo1: {foo}, foo2: {bar}"); + println(f"foo1: {foo}, foo2: {bar}"); let struct_string = if x != 5 { f"{foo}" } else { f"{bar}" }; - std::println(struct_string); + println(struct_string); let one_tuple = (1, 2, 3); let another_tuple = (4, 5, 6); - std::println(f"one_tuple: {one_tuple}, another_tuple: {another_tuple}"); - std::println(one_tuple); + println(f"one_tuple: {one_tuple}, another_tuple: {another_tuple}"); + println(one_tuple); let tuples_nested = (one_tuple, another_tuple); - std::println(f"tuples_nested: {tuples_nested}"); - std::println(tuples_nested); + println(f"tuples_nested: {tuples_nested}"); + println(tuples_nested); + regression_2903(); regression_2906(); + let first_array = [1, 2, 3]; + let second_array = [4, 5, 6]; + let arrays_nested = [first_array, second_array]; + println(f"first_array: {first_array}, second_array: {second_array}"); + println(f"arrays_nested: {arrays_nested}"); + let free_lambda = |x| x + 1; let sentinel: u32 = 8888; - std::println(f"free_lambda: {free_lambda}, sentinel: {sentinel}"); - std::println(free_lambda); + println(f"free_lambda: {free_lambda}, sentinel: {sentinel}"); + println(free_lambda); let one = 1; let closured_lambda = |x| x + one; - std::println(f"closured_lambda: {closured_lambda}, sentinel: {sentinel}"); - std::println(closured_lambda); + println(f"closured_lambda: {closured_lambda}, sentinel: {sentinel}"); + println(closured_lambda); } fn string_identity(string: fmtstr<14, (Field, Field)>) -> fmtstr<14, (Field, Field)> { @@ -83,19 +95,30 @@ struct fooStruct { foo: Field, } +fn regression_2903() { + let v : [str<1>; 1] = ["1"; 1]; + println(v); // will print [1] + + let a = v[0]; + println(a); // will print `1` + + let bytes = ["aaa", "bbb", "ccc"]; + println(bytes); +} + fn regression_2906() { let array_two_vals = [1, 2]; - dep::std::println(f"array_two_vals: {array_two_vals}"); + println(f"array_two_vals: {array_two_vals}"); let label_two_vals = "12"; - dep::std::println(f"label_two_vals: {label_two_vals}"); + println(f"label_two_vals: {label_two_vals}"); let array_five_vals = [1, 2, 3, 4, 5]; - dep::std::println(f"array_five_vals: {array_five_vals}"); + println(f"array_five_vals: {array_five_vals}"); let label_five_vals = "12345"; - dep::std::println(f"label_five_vals: {label_five_vals}"); + println(f"label_five_vals: {label_five_vals}"); - dep::std::println(f"array_five_vals: {array_five_vals}, label_five_vals: {label_five_vals}"); + println(f"array_five_vals: {array_five_vals}, label_five_vals: {label_five_vals}"); } diff --git a/test_programs/execution_success/distinct_keyword/src/main.nr b/test_programs/execution_success/distinct_keyword/src/main.nr index 0e55a011a48..8e9b5c008ed 100644 --- a/test_programs/execution_success/distinct_keyword/src/main.nr +++ b/test_programs/execution_success/distinct_keyword/src/main.nr @@ -1,4 +1,4 @@ // Example that uses the distinct keyword -fn main(x: pub Field) -> distinct pub [Field;2] { +fn main(x: pub Field) -> distinct pub [Field; 2] { [x + 1, x] } diff --git a/test_programs/execution_success/double_verify_nested_proof/Nargo.toml b/test_programs/execution_success/double_verify_nested_proof/Nargo.toml new file mode 100644 index 00000000000..3ead649c879 --- /dev/null +++ b/test_programs/execution_success/double_verify_nested_proof/Nargo.toml @@ -0,0 +1,7 @@ +[package] +name = "double_verify_nested_proof" +type = "bin" +authors = [""] +compiler_version = ">=0.24.0" + +[dependencies] \ No newline at end of file diff --git a/test_programs/execution_success/double_verify_nested_proof/Prover.toml b/test_programs/execution_success/double_verify_nested_proof/Prover.toml new file mode 100644 index 00000000000..2a2b4b33586 --- /dev/null +++ b/test_programs/execution_success/double_verify_nested_proof/Prover.toml @@ -0,0 +1,5 @@ +key_hash = "0x13fd5b632ce9e9d12c9ac56c150ed09413df3edf40d1b7ab8ced9f262ec61b29" +proof_b = ["0x0000000000000000000000000000000000000000000000042ab5d6d1986846cf","0x00000000000000000000000000000000000000000000000b75c020998797da78","0x0000000000000000000000000000000000000000000000005a107acb64952eca","0x000000000000000000000000000000000000000000000000000031e97a575e9d","0x00000000000000000000000000000000000000000000000b5666547acf8bd5a4","0x00000000000000000000000000000000000000000000000c410db10a01750aeb","0x00000000000000000000000000000000000000000000000d722669117f9758a4","0x000000000000000000000000000000000000000000000000000178cbf4206471","0x000000000000000000000000000000000000000000000000e91b8a11e7842c38","0x000000000000000000000000000000000000000000000007fd51009034b3357f","0x000000000000000000000000000000000000000000000009889939f81e9c7402","0x0000000000000000000000000000000000000000000000000000f94656a2ca48","0x000000000000000000000000000000000000000000000006fb128b46c1ddb67f","0x0000000000000000000000000000000000000000000000093fe27776f50224bd","0x000000000000000000000000000000000000000000000004a0c80c0da527a081","0x0000000000000000000000000000000000000000000000000001b52c2020d746","0x0000000000000000000000000000004bdfb9b586a637ceebd99ff26dcd3af427","0x0000000000000000000000000000000000265c2a5caf8e033e32d192807f5353","0x000000000000000000000000000000c0ab1db6ea40ac087cdc82c4a61ab00c86","0x0000000000000000000000000000000000010800ea8010f4bd3dd432d1cc11ed","0x000000000000000000000000000000eb3db3c41e3e636d686fd2903b1b913a01","0x000000000000000000000000000000000009bbab6b90377114c9e33d2a302226","0x000000000000000000000000000000758726e60ef4b211cf1c965fe08293365b","0x0000000000000000000000000000000000290ec193bc7f4f9006b9cea136bff5","0x0000000000000000000000000000005a2a389cd1702b3aa37f30ed974147d343","0x00000000000000000000000000000000001d83087d6efe0db3f482730b8d5e32","0x000000000000000000000000000000ad015051ed84c11d061e63eddbc3c0417a","0x0000000000000000000000000000000000155011c8b0167ff694740c48d67683","0x00000000000000000000000000000010c638a3b13dba3e77be3f10a3d096927c","0x00000000000000000000000000000000002372b9853214a1f76e5636dc26f146","0x00000000000000000000000000000005d9ca201c07bd4216689677feb9227715","0x000000000000000000000000000000000001dcf09921797dffb8eb21abef187b","0x00000000000000000000000000000070af16c9644b777dcf84d69e820e1ed895","0x00000000000000000000000000000000002d5e8f7eb7a4e20964dd94dc141534","0x0000000000000000000000000000003636871dbe453b366c3351be6e84144683","0x0000000000000000000000000000000000206464e290e4f4764365038ac77edf","0x000000000000000000000000000000175c20da35cc833dd542af57de9b62a2da","0x00000000000000000000000000000000001d2e31de3715e05ff6278f88e5a0db","0x000000000000000000000000000000328610e4eabb48be78d3c75f7c159205c5","0x000000000000000000000000000000000026720634b8076fee0a17b358b04653","0x0000000000000000000000000000000e5f48906892ffbff91e8b58ceabba0949","0x000000000000000000000000000000000013c349df687926ccb712622fc72a36","0x000000000000000000000000000000a4b8c9046c7e7e4cc19bbf9a367668eac7","0x00000000000000000000000000000000002a81128e53672c33bb0dae0ff18f41","0x000000000000000000000000000000edb79df57c4a2303ed1e5c2d7ed1e1bdaf","0x000000000000000000000000000000000018d3cea4ce204eafd70c0ded024650","0x000000000000000000000000000000e5f82856854fe0a2d587f6a9ae8555f321","0x0000000000000000000000000000000000235480ec2adc05f04261054345e568","0x00000000000000000000000000000083607465f60b70b092f606853f4d9e96eb","0x000000000000000000000000000000000006569e3a3174bcb71efe46f7fb7e0f","0x000000000000000000000000000000cb4d5fc546f20f63e3b7cf60341956f36f","0x00000000000000000000000000000000000e14b1932630bf606a637eabb7c80f","0x000000000000000000000000000000786f31c2e082aa7e398e6323bb48a27472","0x00000000000000000000000000000000002dd72746f5e5a4a438def122ae6bba","0x000000000000000000000000000000d007be60a28b744e49279fab277c8bd623","0x00000000000000000000000000000000000e52e2b940b9cd8d001209cc40f7c8","0x000000000000000000000000000000dd4357e24a1bda0b5a6c5eee657cfe9091","0x0000000000000000000000000000000000047bb24b20feb0b66089a96671c901","0x0000000000000000000000000000003fe7f42f34e3360ef0fa8bd9c17e6190a3","0x0000000000000000000000000000000000161d17a3848118e91b435b553d34e9","0x216fa2905e105e0c767687f9b5e81c2e4ce03abe2993ac8dcd9e8d89e088966f","0x1288ba942d41c7f4b048e125454253bc7d7ffc0875365c0b8f75a2bb3ea90b42","0x1ad706f84cffcc62fa030f1bc57cb478a687aa74c1019beeda9bab4e40d35373","0x03050c8016b8041a557a46840ab4166a9c2531eb7c3985a447996a334e0caf5f","0x2b3c485da75bdaef8cec120bd08bc21e3ff717740114d13d3811006215a1fb24","0x008fc8c76c4d8cbba8653bf0919c047d379941be60c7afc7250bc6bfc5f29ad5","0x1993ae2a0da54e5e643533fdefbf54a0df21115b2ee79a63a7f477c2c9c4a5d5","0x22520fa7fde2d72b9776c07c9b897ef7ce48f8a7937ec0cacb01d3e23f72b78a","0x259b7b9c1dbfe88d613102f0e8548f0c770a1c83876b26a5cb4b6790740cb487","0x043006102e519b8011d089f51811337fbdedc856a73842f7c8197be176b08d38","0x2222bd509df909ce38b67b3172b24c8ce1e0e1dd0d811f4fae6957e3418415ac","0x1b1204474652fa85979f0274145680718bed80466f4c91ad58f37df1b4fe2395","0x08d57251b42c0697535617ae239d7f3ef9d1558c1bb71fa01c68e7b5fd266139","0x04ca7f21f1d0ba50ecf00c615d18bf8f7291bb04f513cbef78fb6d03ed9b0cb2","0x070ae1119c80846863a4cd971e535ff87fe34473eb5730b14e5b30212b7b78a1","0x1128027ded5032cc265c96ff81d76e2ce06420702fd4e5bc4e24fda695961651","0x1ef7a9e5885b934eee2b44335157309de2f60519e50a8471e5e24495dff2a9fe","0x2d0dad89e5633da796c0c897804575879bc5dc7ad3805b44260943101ac9609e","0x287edcbd60e9d636ba1cd1c9ff3ec2b71b694112c65876525f5e2f8209cd747f","0x24b1157a1cb5bdbd2829de066b8c5573584f9b8638bf9bad476a1fe1172da4b9","0x1f9825731638cd1c43f7cf035b808a1704f122453318cb88fe3b1164f034e170","0x07003a6552f3a6ab1ad3e0717be0af049767b554ff88986c4e48224632523405","0x288002c2ff29874077b2c216a35cb61ecc97d12750a3a86574d50acd42607095","0x0a12fc37918ce7dcbd0d354a05bdbb409a8e4530d86f3d8ce07231240590f65c","0x2ec631b05fc693b07286eecf6b6ac1aef0d073cdced8e050244ec7cf4e8f6e42","0x107bc98da225efe7749d51b9966c3edd6c245f2e5cf183a924ba982817e4525a","0x2ca603f77ea0ca42b6f38cd43bc3cc09442906373a2f197fdc976533066ac343","0x138ace5653809375aa9d95240fa9b6508860a471aed70bcc8b7dd52ae34809f3","0x21e1eb924951881c3d0ce5657d2e26a3e5150ce8f49c9e4d0476c5fdf1e43a54","0x2e2daec93f5e94f6784ce569883cf285da12244b38fb001b94bfb99bb4de060c","0x186a8d30c973bef6286115865a690a2528adbeea8376e5221fffeb6a135d9904","0x1e0d9d90628be31ebc16ef1d85d5f9e6fb8cb57e6a74e576f958cf21db45042e","0x124ceb5e1d9da6d0fe163e961643bb0423c926ef4e0c583eaba9e32d99ec6c7c","0x2db34cc38a50bfea50750830710c13b4d80f4ec0e8df8f186047ee36f338eeeb","0x0b174aa403b42235d5bdde8e9f5bb6c52ae62fec2884334cbe3e53418bd2463d","0x1571ebd9c3854c2f63418f206c6937495450ab9238a238b9c63fbf5867378c5b","0x24f92d1ab27e5810e5b7f4b31254526822f866602922258135c5eb5a2b21ca04","0x20cc7f5ba8df67d9c95642e2662654eb2305c6a280ce1747aec88a581ee50647","0x24112b99f63bbda7487709396dff22aae89ae809263021b65503ff7f809c7e38","0x06805c80f64efd1fa7f08382c9981aad9cecad78808da670477566674141bc48","0x146d4801d6f5898051ee0d7c95375a65ea0e6deeac6ffee1d9b9cf64da72dc3e","0x000000000000000000000000000000425b99a5c96b22ba0286d9ebeecf8e4559","0x0000000000000000000000000000000000110be4b8fe46a96303c205d3a1d61d","0x000000000000000000000000000000d9ff7ae757f2f0c91d1f1e71fac1b27b74","0x000000000000000000000000000000000009b0c285f6c221f6eba93b1e330ac4","0x0000000000000000000000000000004055cd5738a25ab1860a1e35555962dc19","0x00000000000000000000000000000000001a8726ccf54e17cf1b005e3e04879a","0x0000000000000000000000000000007be4dc343e9c2e0d4a9156f1ef9769f65a","0x00000000000000000000000000000000002b0e96f68f6509615ca0544dfa3107"] +public_inputs = ["0x0000000000000000000000000000000000000000000000000000000000000003"] +verification_key = ["0x2260e724844bca5251829353968e4915305258418357473a5c1d597f613f6cbd","0x0000000000000000000000000000000000000000000000000000000000080000","0x0000000000000000000000000000000000000000000000000000000000000005","0x0000000000000000000000000000000000000000000000000000000000080000","0x0000000000000000000000000000000000000000000000000000000000000011","0x0000000000000000000000000000000000000000000000000000000000000001","0x0000000000000000000000000000000000000000000000000000000000000001","0x0000000000000000000000000000000000000000000000000000000000000002","0x0000000000000000000000000000000000000000000000000000000000000003","0x0000000000000000000000000000000000000000000000000000000000000004","0x0000000000000000000000000000000000000000000000000000000000000005","0x0000000000000000000000000000000000000000000000000000000000000006","0x0000000000000000000000000000000000000000000000000000000000000007","0x0000000000000000000000000000000000000000000000000000000000000008","0x0000000000000000000000000000000000000000000000000000000000000009","0x000000000000000000000000000000000000000000000000000000000000000a","0x000000000000000000000000000000000000000000000000000000000000000b","0x000000000000000000000000000000000000000000000000000000000000000c","0x000000000000000000000000000000000000000000000000000000000000000d","0x000000000000000000000000000000000000000000000000000000000000000e","0x000000000000000000000000000000000000000000000000000000000000000f","0x0000000000000000000000000000000000000000000000000000000000000010","0x000000000000000000000000000000ba765ed919550454064328e0fd7c51ff4a","0x00000000000000000000000000000000000418b2f4104c289eb20cb95344c850","0x0000000000000000000000000000006defa500aab13c8cf3c00117db573bef2c","0x000000000000000000000000000000000026dea3ea8fb7e77b5bfa8443397dc7","0x0000000000000000000000000000009a5c33c4054817f3402e68aeca4728a405","0x00000000000000000000000000000000002abf5ea67ec384cb2e5998c7a48b3a","0x000000000000000000000000000000ee78817f7d959ea45abb27404e3371c708","0x0000000000000000000000000000000000260a979e8190a83b0bce1351b92d3c","0x000000000000000000000000000000ec447bd83a83883ce4c11573ce24845c74","0x000000000000000000000000000000000005b23c2076f50d10baa061a67b9200","0x00000000000000000000000000000058ffc16cfb64ec06a56a2b1a9047fb8f0c","0x000000000000000000000000000000000011d2f5833d720e1d0a02749471e7ad","0x000000000000000000000000000000416dd6c8c0d1cbb185b3c3197eac767d0b","0x000000000000000000000000000000000023b9c5a4e525926d64247ec92e0baf","0x000000000000000000000000000000a55f5f52ebc8936a58e413a1068d94d376","0x00000000000000000000000000000000000be3f377ccc88a6cb5df6f230da95e","0x00000000000000000000000000000070a162a08d4d4800f450af94888f8f3480","0x0000000000000000000000000000000000085883b02590372a7b36a1c57db4c3","0x00000000000000000000000000000045b0b661ea73930ee3327ccff8a0ca9ce1","0x00000000000000000000000000000000002854cab8629792eb07e9ef81bc46ee","0x00000000000000000000000000000067f365021e0e42117c43a39419d1d9cc73","0x000000000000000000000000000000000022c370b38f0a97eb3d718146f2284b","0x00000000000000000000000000000016de6670aba605233072b8eecfa9069b06","0x000000000000000000000000000000000002c29c49d66457bcbd4fa5bf6096fd","0x000000000000000000000000000000e32e8ce4f18ba30ce53245044d0c60508a","0x00000000000000000000000000000000001170220489121b8eedd58a4b5599df","0x000000000000000000000000000000139ed828b410380d053ec0f056656f5703","0x0000000000000000000000000000000000072aebdce25ba333c86769adec1362","0x000000000000000000000000000000aa352ee565f91fc2b73323fc824bc14636","0x00000000000000000000000000000000001f3e272a192808ec9283ee3bb4df4b","0x00000000000000000000000000000005c72c8c88be0259ae226ccb0488452b4b","0x00000000000000000000000000000000001c68407d694502b929b77cbbab8374","0x0000000000000000000000000000003716bda8267f29931ed0aa811e4607f1c6","0x000000000000000000000000000000000007d888936af2141bb2f6823a587e81","0x0000000000000000000000000000004cf1a4f39c5363f70ecc9e433d751ea529","0x00000000000000000000000000000000002e8a81232ec84e48032178f1ee6edb","0x000000000000000000000000000000388e8265061fa0c92c96fc85d99bac7891","0x00000000000000000000000000000000002e3c516222565332e6e7362400bc5f","0x0000000000000000000000000000003a68d13661a0906e5828fe8271a336bf64","0x00000000000000000000000000000000001412d3e67497c98e5ec2aaee8779f5","0x000000000000000000000000000000b5d123498733b5279d8bcbade0d8345ef7","0x00000000000000000000000000000000000fa572890537089a5fb36953e7a1ca","0x0000000000000000000000000000004d8ff057fc9936a693035266c80c6ea57d","0x00000000000000000000000000000000001907a614968d777fcc506f639799f6","0x00000000000000000000000000000010769533212d3cafbf6ac378c8055c33a2","0x00000000000000000000000000000000000eac32851272327acdc0890792dfb7","0x000000000000000000000000000000e3e32f343643d319a977beb0c2b0ab9b31","0x00000000000000000000000000000000000c10c4c9dce6ff648ef70f54d45ba6","0x00000000000000000000000000000025721304165b9b313b94cf2c77b61dc1ef","0x000000000000000000000000000000000024b8083b0f323c2703a7255caa7078","0x0000000000000000000000000000002b860372c65049c88f6532cbd360917b11","0x000000000000000000000000000000000011ee2ac2bc36cdfdc107eca47369f3","0x0000000000000000000000000000001c1b0233882acb5a78a977642e4dce91d5","0x000000000000000000000000000000000020922a70853993b3516eeb01d7c8a4","0x0000000000000000000000000000001f90b5fade69a55a2da8d2db3c62b62d7c","0x0000000000000000000000000000000000173312bb89c6722b548ff87a7487a2","0x0000000000000000000000000000009d618ffd933cf58a8a0953dc76f97cf108","0x00000000000000000000000000000000000ddc3b6d8e59cf0996ca71ad4132ca","0x000000000000000000000000000000ec4c6a253f431d3f3fc06aa0e5b0448b8c","0x0000000000000000000000000000000000153193287060386695f4f2d0d3525d","0x0000000000000000000000000000004bd25585edb9319128045c005d48491b1e","0x00000000000000000000000000000000001170f0ece62f8c572bca96b141d27f","0x0000000000000000000000000000003dd2e37b8edb1f56b785809d7710bf1c88","0x0000000000000000000000000000000000246cd041690f653f88ed0c56ad282a","0x00000000000000000000000000000034bc8a00ce9d452888e5fc2b5a7e14fed7","0x000000000000000000000000000000000026153c937447356a0c6d6be09d85eb","0x000000000000000000000000000000555388ad9364679246b07992f84b4e91b2","0x0000000000000000000000000000000000189da022421fbd8dfd7973084d978e","0x000000000000000000000000000000e8c0f9753e2a5a35acec051fafe2cecce5","0x0000000000000000000000000000000000285311c5e9a4cbb56a3f04f29d5443","0x00000000000000000000000000000092d2d0ac76a1be7f1fad96cbd997175312","0x00000000000000000000000000000000002436400260c9d3180beedd0bf49fec","0x000000000000000000000000000000887d86d95387bbb29616cc5c41ee4a2669","0x0000000000000000000000000000000000063bf32f8addf7a3e1cf6cd223cb71","0x000000000000000000000000000000d841dc7d9da6cc699e8377b2a04723fea0","0x00000000000000000000000000000000002ce091428268c212a2bcfea0edb338","0x00000000000000000000000000000012fe4771092fa47e4d6050701527133f09","0x00000000000000000000000000000000002f36672865c5ae4976486fdaf2d81d","0x0000000000000000000000000000008e6bced56a3d94dfe9d476da3a424b8eff","0x00000000000000000000000000000000002d6303cf28aa721f4e5348a0d83642","0x0000000000000000000000000000008c5807dace05b2079d200f7f71caffdaf7","0x000000000000000000000000000000000008f7beb50cb16f3b6210aff1bdb05d","0x0000000000000000000000000000004f9ee08a49536eb54a238b982c4dfd5446","0x000000000000000000000000000000000014f55e7065eabacf1a7d6cbf1f6765","0x00000000000000000000000000000021150153ec654b02a66d9bea056185877e","0x00000000000000000000000000000000000e7bf50a142b21057bcfd340a5e77c","0x00000000000000000000000000000038110629263a662f10464b375f988cccda","0x00000000000000000000000000000000001964a0ab814f71282cd159df492710","0x000000000000000000000000000000b9310dd49ea52ba735b9654ebced7bc67b","0x000000000000000000000000000000000019ad72f92554ce44921ca3f420f995","0x000000000000000000000000000000d67d7e81fa6e1cdfae6d84510a8cb7e257","0x00000000000000000000000000000000000a6ec9d85c10a85e8f31eaedb4e459"] +proof = ["0x0000000000000000000000000000000000000000000000042ab5d6d1986846cf","0x00000000000000000000000000000000000000000000000b75c020998797da78","0x0000000000000000000000000000000000000000000000005a107acb64952eca","0x000000000000000000000000000000000000000000000000000031e97a575e9d","0x00000000000000000000000000000000000000000000000b5666547acf8bd5a4","0x00000000000000000000000000000000000000000000000c410db10a01750aeb","0x00000000000000000000000000000000000000000000000d722669117f9758a4","0x000000000000000000000000000000000000000000000000000178cbf4206471","0x000000000000000000000000000000000000000000000000e91b8a11e7842c38","0x000000000000000000000000000000000000000000000007fd51009034b3357f","0x000000000000000000000000000000000000000000000009889939f81e9c7402","0x0000000000000000000000000000000000000000000000000000f94656a2ca48","0x000000000000000000000000000000000000000000000006fb128b46c1ddb67f","0x0000000000000000000000000000000000000000000000093fe27776f50224bd","0x000000000000000000000000000000000000000000000004a0c80c0da527a081","0x0000000000000000000000000000000000000000000000000001b52c2020d746","0x00000000000000000000000000000063cb03b1d83ae3942e11ca8ec63055898b","0x00000000000000000000000000000000001edaf70d547a857fbed6a9ff8a38c9","0x000000000000000000000000000000097fb881332193ff4489e213f600e6a007","0x00000000000000000000000000000000001f2903742639c3595d22b96d4d9c21","0x000000000000000000000000000000bca7215bb1bcdde52ed9cf845b7e54072d","0x0000000000000000000000000000000000188bd12b19073eb01e8be5bda41b3e","0x0000000000000000000000000000007d1a114656606c391bfb286ea4e14062a5","0x000000000000000000000000000000000026d8a3b8821da41b6b1d6b85872260","0x000000000000000000000000000000c49078b857741b82cba39d8a394c1876c1","0x00000000000000000000000000000000002f9b9f76f80a4ff456e60c024f8d03","0x0000000000000000000000000000004bab3e60680935219213ea32be70ec5100","0x00000000000000000000000000000000002c45bda56f0115cfde2678889694ab","0x0000000000000000000000000000006434e56313172088d5a6b10fdd1b94b4ca","0x000000000000000000000000000000000007ad41e7980534fc2f89e8ad7366ad","0x00000000000000000000000000000023d769c68ef65f0b4f06a01e655fb265e7","0x0000000000000000000000000000000000008d3b5d5b201ed6773c369fe20d10","0x0000000000000000000000000000005eacdd2121ba4b1cf0df09632df6991fcf","0x0000000000000000000000000000000000005e98e857c8c1eb16cef913e44f90","0x0000000000000000000000000000003449da35dc7c0b67b0c3e99ced603ea381","0x000000000000000000000000000000000022347c8daec6739b183413a787fd13","0x000000000000000000000000000000df23d8f1ac4ddfced428737db15e63f603","0x000000000000000000000000000000000015e03670ba72d84269d764d8f8e725","0x000000000000000000000000000000457a7f854dbab545c8c94ccdb8e4b9ad45","0x00000000000000000000000000000000000a268fc41b7031912cec59dc0a7078","0x00000000000000000000000000000022fcb55824b67af33225f8f2e614fbbdb4","0x0000000000000000000000000000000000235f698e6aee7bf8ca94f4a44db006","0x000000000000000000000000000000a327da390bd3e01e4a7b639605fdfd9c42","0x0000000000000000000000000000000000210196c4fb53d660a3824867b2b1c5","0x000000000000000000000000000000728fb44750fa2b956221bd441fa61e32d6","0x0000000000000000000000000000000000073db9e2cafdf0fe22b5090855533e","0x0000000000000000000000000000004fe310e93730876891eebab46db9496dbc","0x000000000000000000000000000000000007d3574fe79c87011abdbd51a46670","0x000000000000000000000000000000adc522f42e085c51403fc50c83f35904b9","0x00000000000000000000000000000000000d2d9ef8fc0031b4568842a99b34eb","0x00000000000000000000000000000098586d928c8abc7cc56d571c8eded52168","0x000000000000000000000000000000000024279c001a40e94d3d149ec01a468a","0x00000000000000000000000000000066122aaf47d9d5060a2ce1d17cc5201be0","0x00000000000000000000000000000000001c21031d83d52e27867a611229d2ca","0x000000000000000000000000000000838dfc066499f7715682f755b42f3a4869","0x00000000000000000000000000000000001f816d2c5b2e903496f1443cb91de3","0x0000000000000000000000000000007ef917b6df805f430f8a0833942a7c3094","0x00000000000000000000000000000000000a9cefe716f31dbe37485179d60f0e","0x00000000000000000000000000000028adb1040bd0c07448de51d5cac9fd0495","0x00000000000000000000000000000000000c66b25a22c8b3ba82ec09ab4bdef3","0x2cc791d253f03f47cc88f7f0aeae481762f4aa6426712772544aaeca72466cb7","0x14197950f448f679eeff75c4e83dac9f0ebd5aa194709ea3875fb4e4b15bc2f2","0x1a92022c2ed8f8a41e3f392e22f1875f6916543bbb22c3aaf50d703de649c381","0x2ee77a26e78d5e1093dabd3612beee4b515a4f159992138e13ecd3f0afcfba18","0x2c280cba627b147142a2d333ee856000298708f9b5df0cc8d23c26d0936d6869","0x1b2569bb6f6b60b6f743ff892a39a490770d4ad40a961a06149d4968b0487a40","0x2f80351e43621d69b7e620338b2822e15dec9e6a2de16e8d04bb559153cd53a3","0x15a78b8ae9b3be431b609250b69c7cb746c6a689b2122150f258c6f7d67409fc","0x1334c47f273be542576813933e89a9130a342846272b39a2eab3ab7fc022d5fe","0x1031bdcafc5c0dad81c8b6c4931c9b442cd0c8a0bb9a729cc2f6bf0a18dc1b82","0x177f92f0cef76c5c45f55d16fa2be426354cdd4af6ac9aaad479c9b47f88656d","0x0064c0e0ec8984d612189e5287d59eedc1a6de52fc78bf72028f744350c27a0e","0x2c06222cf0d415c976e6904f1706b77cf438636ada3222e1c31c4957d6877dac","0x173da534b7001f44f19bb3e3f8601ac94fbf90b2e39b7d4079d8fac2d65102ea","0x012909bcdbd1167010cf0084028e851f3448f58946f4951b1b8d544d86b138c8","0x2975c3987f110c06bd8ced1d8bb0d398ac72c6f196ea639bdde58fa4b899d4a0","0x05c196fb2f6ccfd92a38ae526af85bccc3695ea0e2561e7a211c60360187602d","0x18a288590dd0cbfe5b7652458c9caddc9eac2f08e5822b64141ed1b4e805bda3","0x0cd08c41605b22a7ae31c3961486f645a32bff0ccaef63b6d661ef356db78560","0x05d5e48184693259f722f84ea48f9b84667d1e9db19e1381b2279fe24b01484b","0x2187a6f6a2398e5f0137880a983ff6b682b5a7c2b62e4bdfff6ff6becd0d53ab","0x1d4764ca9346e8ac48675320521e0daba651f480efe932302e8a9673580fc0d8","0x00cfcb920adeb0293acf26e63aeac4489622e4c806b93f1c72f8491cba3d0196","0x1bcd6a556800b8385ba1250afd69999fe2bb5518a6ba2cc461a4afba21ffbedb","0x11a15b3c8ef0e4ac0ff151fba72b922f6c005519151a4f88557352265944aeea","0x063d550a154f2ce80b08fb169d137fa96dcea6a6c489e98e1390aa9a5db18928","0x25da993132041b9f667de044194f5c6b0cdae961cdea5f6dbbda8595f213ac08","0x22fcecc2e3794814bbb84700031cd75ec9817201c8c88df2e86407a14412f902","0x01583d25d2f91d646da02a520d3dbf758b0a0590a533bd1417a717fd0cd18915","0x18ebacffdc81e15547232dfc1a0e31ec2848a1e5b9c8509a92432c2549d93091","0x20a3d15aa70d04a841802fe1d990f56c6b9e6eadc17da2c0dfd2a817e6cf0430","0x0b497cc2e54412ce07c52effdce6c01de2c1a0e1d095a2a37f5351232400c0a1","0x14419bb69d02675b8d58e60ce88a2f4b6a43674461e4015e2e302285a42c5784","0x0c84db03ff77d0729bb68eab2d6d697b7caebd4ea3db781499492a6f0ef67765","0x1a676b1c6b0ab1c85b31af681e05751296c3d0a1a883668f5fe971827ce86fc9","0x08da949bf7603bfe20f3c152abe727051c6306cff322197e8fa56b390f565b5b","0x1fd77e041239f94e907dc3ae3069a70cbff726b9d8b3a368a4910c8a070a9c9a","0x03755d83a4f0fdfbb4fd1b2b465842e1bb707a419c2952a2ca9faba50d4be379","0x0ee90c8166adcb238d85c72a85db2248353610c55390a2ed54e59dd1c35c12d2","0x170bcd78efaa1b19bcfd065c2ec60b48aa1e62465df73e62f3bd291115315144","0x015d60e5cc5c7d67853993261bd9e3c6e56f95dee8724ce79c7601ee10c1a731","0x000000000000000000000000000000f0a8b99d65fc1555bafb688233a6489aea","0x0000000000000000000000000000000000043849f038ec96c8c1c6e242351361","0x0000000000000000000000000000001ad41d3dfebb280623d5b325f0a7aa38f7","0x00000000000000000000000000000000002e5f2119536daa9e6d1f9b82b797dd","0x000000000000000000000000000000e5570c2b6e74d0994e2fc8be1a9dab4160","0x00000000000000000000000000000000002ed426a78ed52d4c13f2c651a6d4ec","0x000000000000000000000000000000aba14637487e4d3ca30dc397416696c85c","0x000000000000000000000000000000000005ae1eb3eee0cdf5e5c7bb0ac9be07"] diff --git a/test_programs/execution_success/double_verify_nested_proof/src/main.nr b/test_programs/execution_success/double_verify_nested_proof/src/main.nr new file mode 100644 index 00000000000..0466f2a226d --- /dev/null +++ b/test_programs/execution_success/double_verify_nested_proof/src/main.nr @@ -0,0 +1,28 @@ +use dep::std; + +fn main( + verification_key: [Field; 114], + // This is the proof without public inputs attached. + // + // This means: the size of this does not change with the number of public inputs. + proof: [Field; 109], + public_inputs: pub [Field; 1], + // This is currently not public. It is fine given that the vk is a part of the circuit definition. + // I believe we want to eventually make it public too though. + key_hash: Field, + proof_b: [Field; 109] +) { + std::verify_proof( + verification_key.as_slice(), + proof.as_slice(), + public_inputs.as_slice(), + key_hash + ); + + std::verify_proof( + verification_key.as_slice(), + proof_b.as_slice(), + public_inputs.as_slice(), + key_hash + ); +} diff --git a/test_programs/execution_success/double_verify_proof/src/main.nr b/test_programs/execution_success/double_verify_proof/src/main.nr index ce087dc4e61..e4c6926efbc 100644 --- a/test_programs/execution_success/double_verify_proof/src/main.nr +++ b/test_programs/execution_success/double_verify_proof/src/main.nr @@ -1,12 +1,13 @@ use dep::std; +#[recursive] fn main( verification_key: [Field; 114], // This is the proof without public inputs attached. // // This means: the size of this does not change with the number of public inputs. proof: [Field; 93], - public_inputs: [Field; 1], + public_inputs: pub [Field; 1], // This is currently not public. It is fine given that the vk is a part of the circuit definition. // I believe we want to eventually make it public too though. key_hash: Field, diff --git a/test_programs/execution_success/ecdsa_secp256k1/src/main.nr b/test_programs/execution_success/ecdsa_secp256k1/src/main.nr index 2f410755f74..ac0359e4bb8 100644 --- a/test_programs/execution_success/ecdsa_secp256k1/src/main.nr +++ b/test_programs/execution_success/ecdsa_secp256k1/src/main.nr @@ -1,11 +1,11 @@ use dep::std; fn main( - message: [u8;38], - hashed_message: [u8;32], - pub_key_x: [u8;32], - pub_key_y: [u8;32], - signature: [u8;64] + message: [u8; 38], + hashed_message: [u8; 32], + pub_key_x: [u8; 32], + pub_key_y: [u8; 32], + signature: [u8; 64] ) { // Hash the message, since secp256k1 expects a hashed_message let expected = std::hash::sha256(message); diff --git a/test_programs/execution_success/ecdsa_secp256r1/src/main.nr b/test_programs/execution_success/ecdsa_secp256r1/src/main.nr index d23573d13a6..c64e390d652 100644 --- a/test_programs/execution_success/ecdsa_secp256r1/src/main.nr +++ b/test_programs/execution_success/ecdsa_secp256r1/src/main.nr @@ -1,6 +1,6 @@ use dep::std; -fn main(hashed_message: [u8;32], pub_key_x: [u8;32], pub_key_y: [u8;32], signature: [u8;64]) { +fn main(hashed_message: [u8; 32], pub_key_x: [u8; 32], pub_key_y: [u8; 32], signature: [u8; 64]) { let valid_signature = std::ecdsa_secp256r1::verify_signature(pub_key_x, pub_key_y, signature, hashed_message); assert(valid_signature); } diff --git a/test_programs/execution_success/global_consts/src/baz.nr b/test_programs/execution_success/global_consts/src/baz.nr index 4271de81118..384cf9d3569 100644 --- a/test_programs/execution_success/global_consts/src/baz.nr +++ b/test_programs/execution_success/global_consts/src/baz.nr @@ -1,5 +1,5 @@ pub fn from_baz(x: [Field; crate::foo::MAGIC_NUMBER]) { for i in 0..crate::foo::MAGIC_NUMBER { - assert(x[i] == crate::foo::MAGIC_NUMBER); + assert(x[i] == crate::foo::MAGIC_NUMBER as Field); } } diff --git a/test_programs/execution_success/global_consts/src/foo.nr b/test_programs/execution_success/global_consts/src/foo.nr index 7b0ae75b74b..413b9c3a74b 100644 --- a/test_programs/execution_success/global_consts/src/foo.nr +++ b/test_programs/execution_success/global_consts/src/foo.nr @@ -1,11 +1,11 @@ mod bar; -global N: Field = 5; -global MAGIC_NUMBER: Field = 3; +global N: u64 = 5; +global MAGIC_NUMBER: u64 = 3; global TYPE_INFERRED = 42; pub fn from_foo(x: [Field; bar::N]) { for i in 0..bar::N { - assert(x[i] == bar::N); + assert(x[i] == bar::N as Field); } } diff --git a/test_programs/execution_success/global_consts/src/foo/bar.nr b/test_programs/execution_success/global_consts/src/foo/bar.nr index b8d0b85b0f3..5404c9cf1e3 100644 --- a/test_programs/execution_success/global_consts/src/foo/bar.nr +++ b/test_programs/execution_success/global_consts/src/foo/bar.nr @@ -1,5 +1,5 @@ -global N: Field = 5; +global N: u64 = 5; pub fn from_bar(x: Field) -> Field { - x * N + x * N as Field } diff --git a/test_programs/execution_success/global_consts/src/main.nr b/test_programs/execution_success/global_consts/src/main.nr index 70c7a745a22..3c8ecc67a0c 100644 --- a/test_programs/execution_success/global_consts/src/main.nr +++ b/test_programs/execution_success/global_consts/src/main.nr @@ -3,9 +3,12 @@ mod baz; global M: Field = 32; global L: Field = 10; // Unused globals currently allowed -global N: Field = 5; +global N: u64 = 5; global T_LEN = 2; // Type inference is allowed on globals -//global N: Field = 5; // Uncomment to see duplicate globals error + +// Globals can reference other globals +global DERIVED = M + L; + struct Dummy { x: [Field; N], y: [Field; foo::MAGIC_NUMBER] @@ -17,6 +20,13 @@ struct Test { global VALS: [Test; 1] = [Test { v: 100 }]; global NESTED = [VALS, VALS]; +unconstrained fn calculate_global_value() -> Field { + 42 +} + +// Regression test for https://github.com/noir-lang/noir/issues/4318 +global CALCULATED_GLOBAL: Field = calculate_global_value(); + fn main( a: [Field; M + N - N], b: [Field; 30 + N / 2], @@ -26,12 +36,12 @@ fn main( let test_struct = Dummy { x: d, y: c }; for i in 0..foo::MAGIC_NUMBER { - assert(c[i] == foo::MAGIC_NUMBER); - assert(test_struct.y[i] == foo::MAGIC_NUMBER); + assert(c[i] == foo::MAGIC_NUMBER as Field); + assert(test_struct.y[i] == foo::MAGIC_NUMBER as Field); assert(test_struct.y[i] != NESTED[1][0].v); } - assert(N != M); + assert(N as Field != M); let expected: u32 = 42; assert(foo::TYPE_INFERRED == expected); @@ -52,12 +62,12 @@ fn main( arrays_neq(a, b); - let t: [Field; T_LEN] = [N, M]; + let t: [Field; T_LEN] = [N as Field, M]; assert(t[1] == 32); assert(15 == my_submodule::my_helper()); - let add_submodules_N = my_submodule::N + foo::bar::N; + let add_submodules_N = my_submodule::N + foo::bar::N as Field; assert(15 == add_submodules_N); let add_from_bar_N = my_submodule::N + foo::bar::from_bar(1); assert(15 == add_from_bar_N); @@ -65,11 +75,14 @@ fn main( let sugared = [0; my_submodule::N + 2]; assert(sugared[my_submodule::N + 1] == 0); - let arr: [Field; my_submodule::N] = [N; 10]; + let arr: [Field; my_submodule::N] = [N as Field; 10]; assert((arr[0] == 5) & (arr[9] == 5)); foo::from_foo(d); baz::from_baz(c); + assert(DERIVED == M + L); + + assert(CALCULATED_GLOBAL == 42); } fn multiplyByM(x: Field) -> Field { diff --git a/test_programs/execution_success/hashmap/Nargo.toml b/test_programs/execution_success/hashmap/Nargo.toml new file mode 100644 index 00000000000..c09debc9833 --- /dev/null +++ b/test_programs/execution_success/hashmap/Nargo.toml @@ -0,0 +1,6 @@ +[package] +name = "hashmap" +type = "bin" +authors = [""] + +[dependencies] \ No newline at end of file diff --git a/test_programs/execution_success/hashmap/Prover.toml b/test_programs/execution_success/hashmap/Prover.toml new file mode 100644 index 00000000000..84d4c0733e4 --- /dev/null +++ b/test_programs/execution_success/hashmap/Prover.toml @@ -0,0 +1,26 @@ +# Input: 6 key-value entries for hashmap capacity of 8. +# These must be distinct (both key-to-key, and value-to-value) for correct testing. + +[[input]] +key = 2 +value = 17 + +[[input]] +key = 3 +value = 19 + +[[input]] +key = 5 +value = 23 + +[[input]] +key = 7 +value = 29 + +[[input]] +key = 11 +value = 31 + +[[input]] +key = 41 +value = 43 \ No newline at end of file diff --git a/test_programs/execution_success/hashmap/src/main.nr b/test_programs/execution_success/hashmap/src/main.nr new file mode 100644 index 00000000000..597a5c0b7de --- /dev/null +++ b/test_programs/execution_success/hashmap/src/main.nr @@ -0,0 +1,192 @@ +mod utils; + +use dep::std::collections::map::HashMap; +use dep::std::hash::BuildHasherDefault; +use dep::std::hash::pedersen::PedersenHasher; +use dep::std::cmp::Eq; + +use utils::cut; + +type K = Field; +type V = Field; + +// It is more convenient and readable to use structs as input. +struct Entry{ + key: Field, + value: Field +} + +global HASHMAP_CAP = 8; +global HASHMAP_LEN = 6; + +global FIELD_CMP = |a: Field, b: Field| a.lt(b); + +global K_CMP = FIELD_CMP; +global V_CMP = FIELD_CMP; +global KV_CMP = |a: (K, V), b: (K, V)| a.0.lt(b.0); + +global ALLOCATE_HASHMAP = || -> HashMap> + HashMap::default(); + +fn main(input: [Entry; HASHMAP_LEN]) { + test_sequential(input[0].key, input[0].value); + test_multiple_equal_insert(input[1].key, input[1].value); + test_value_override(input[2].key, input[2].value, input[3].value); + test_insert_and_methods(input); + test_hashmaps_equality(input); + test_retain(); + test_iterators(); + test_mut_iterators(); +} + +// Insert, get, remove. +fn test_sequential(key: K, value: V) { + let mut hashmap = ALLOCATE_HASHMAP(); + assert(hashmap.is_empty(), "New HashMap should be empty."); + + hashmap.insert(key, value); + assert(hashmap.len() == 1, "HashMap after one insert should have a length of 1 element."); + + let got = hashmap.get(key); + assert(got.is_some(), "Got none value."); + let got = got.unwrap_unchecked(); + assert(value == got, f"Inserted {value} but got {got} for the same key."); + + hashmap.remove(key); + assert(hashmap.is_empty(), "HashMap after one insert and corresponding removal should be empty."); + let got = hashmap.get(key); + assert(got.is_none(), "Value has been removed, but is still available (not none)."); +} + +// Insert same pair several times. +fn test_multiple_equal_insert(key: K, value: V) { + let mut hashmap = ALLOCATE_HASHMAP(); + assert(hashmap.is_empty(), "New HashMap should be empty."); + + for _ in 0..HASHMAP_LEN { + hashmap.insert(key, value); + } + + let len = hashmap.len(); + assert(len == 1, f"HashMap length must be 1, got {len}."); + + let got = hashmap.get(key); + assert(got.is_some(), "Got none value."); + let got = got.unwrap_unchecked(); + assert(value == got, f"Inserted {value} but got {got} for the same key."); +} + +// Override value for existing pair. +fn test_value_override(key: K, value: V, new_value: V) { + let mut hashmap = ALLOCATE_HASHMAP(); + assert(hashmap.is_empty(), "New hashmap should be empty."); + + hashmap.insert(key, value); + hashmap.insert(key, new_value); + assert(hashmap.len() == 1, "HashMap length is invalid."); + + let got = hashmap.get(key); + assert(got.is_some(), "Got none value."); + let got = got.unwrap_unchecked(); + assert(got == new_value, f"Expected {new_value}, but got {got}."); +} + +// Insert several distinct pairs and test auxiliary methods. +fn test_insert_and_methods(input: [Entry; HASHMAP_LEN]) { + let mut hashmap = ALLOCATE_HASHMAP(); + assert(hashmap.is_empty(), "New HashMap should be empty."); + + for entry in input { + hashmap.insert(entry.key, entry.value); + } + + assert(hashmap.len() == HASHMAP_LEN, "hashmap.len() does not match input lenght."); + + for entry in input { + assert(hashmap.contains_key(entry.key), f"Not found inserted key {entry.key}."); + } + + hashmap.clear(); + assert(hashmap.is_empty(), "HashMap after clear() should be empty."); +} + +// Insert several pairs and test retaining. +fn test_retain() { + let mut hashmap = ALLOCATE_HASHMAP(); + assert(hashmap.is_empty(), "New HashMap should be empty."); + + let (key, value) = (5, 11); + hashmap.insert(key, value); + let (key, value) = (2, 13); + hashmap.insert(key, value); + let (key, value) = (11, 5); + hashmap.insert(key, value); + + let predicate = |key: K, value: V| -> bool {key * value == 55}; + hashmap.retain(predicate); + + assert(hashmap.len() == 2, "HashMap should have retained 2 elements."); + assert(hashmap.get(2).is_none(), "Pair should have been removed, since it does not match predicate."); +} + +// Equality trait check. +fn test_hashmaps_equality(input: [Entry; HASHMAP_LEN]) { + let mut hashmap_1 = ALLOCATE_HASHMAP(); + let mut hashmap_2 = ALLOCATE_HASHMAP(); + + for entry in input { + hashmap_1.insert(entry.key, entry.value); + hashmap_2.insert(entry.key, entry.value); + } + + assert(hashmap_1 == hashmap_2, "HashMaps should be equal."); + + hashmap_2.remove(input[0].key); + + assert(hashmap_1 != hashmap_2, "HashMaps should not be equal."); +} + +// Test entries, keys, values. +fn test_iterators() { + let mut hashmap = ALLOCATE_HASHMAP(); + + hashmap.insert(2, 3); + hashmap.insert(5, 7); + hashmap.insert(11, 13); + + let keys: [K; 3] = cut(hashmap.keys()).map(|k: Option| k.unwrap_unchecked()).sort_via(K_CMP); + let values: [V; 3] = cut(hashmap.values()).map(|v: Option| v.unwrap_unchecked()).sort_via(V_CMP); + let entries: [(K, V); 3] = cut(hashmap.entries()).map(|e: Option<(K, V)>| e.unwrap_unchecked()).sort_via(KV_CMP); + + assert(keys == [2, 5, 11], "Got incorrect iteration of keys."); + assert(values == [3, 7, 13], "Got incorrect iteration of values."); + assert(entries == [(2, 3), (5, 7), (11, 13)], "Got incorrect iteration of entries."); +} + +// Test mutable iteration over keys, values and entries. +fn test_mut_iterators() { + let mut hashmap = ALLOCATE_HASHMAP(); + + hashmap.insert(2, 3); + hashmap.insert(5, 7); + hashmap.insert(11, 13); + + let f = |k: K| -> K{ k * 3}; + hashmap.iter_keys_mut(f); + + let f = |v: V| -> V{ v * 5}; + hashmap.iter_values_mut(f); + + let keys: [K; 3] = cut(hashmap.keys()).map(|k: Option| k.unwrap_unchecked()).sort_via(K_CMP); + let values: [V; 3] = cut(hashmap.values()).map(|v: Option| v.unwrap_unchecked()).sort_via(V_CMP); + + assert(keys == [6, 15, 33], f"Got incorrect iteration of keys: {keys}"); + assert(values == [15, 35, 65], "Got incorrect iteration of values."); + + let f = |k: K, v: V| -> (K, V){(k * 2, v * 2)}; + hashmap.iter_mut(f); + + let entries: [(K, V); 3] = cut(hashmap.entries()).map(|e: Option<(K, V)>| e.unwrap_unchecked()).sort_via(KV_CMP); + + assert(entries == [(12, 30), (30, 70), (66, 130)], "Got incorrect iteration of entries."); +} diff --git a/test_programs/execution_success/hashmap/src/utils.nr b/test_programs/execution_success/hashmap/src/utils.nr new file mode 100644 index 00000000000..45c9ca9bbf7 --- /dev/null +++ b/test_programs/execution_success/hashmap/src/utils.nr @@ -0,0 +1,10 @@ +// Compile-time: cuts the M first elements from the [T; N] array. +pub(crate) fn cut(input: [T; N]) -> [T; M] { + assert(M as u64 < N as u64, "M should be less than N."); + + let mut new = [dep::std::unsafe::zeroed(); M]; + for i in 0..M { + new[i] = input[i]; + } + new +} diff --git a/test_programs/execution_success/main_bool_arg/src/main.nr b/test_programs/execution_success/main_bool_arg/src/main.nr index 111a23ec0c2..2c50d7dee16 100644 --- a/test_programs/execution_success/main_bool_arg/src/main.nr +++ b/test_programs/execution_success/main_bool_arg/src/main.nr @@ -1,4 +1,4 @@ -fn main(x: bool, y: [bool;2]) { +fn main(x: bool, y: [bool; 2]) { if x { assert(1 != 2); } diff --git a/test_programs/execution_success/missing_closure_env/Nargo.toml b/test_programs/execution_success/missing_closure_env/Nargo.toml new file mode 100644 index 00000000000..284e61b1144 --- /dev/null +++ b/test_programs/execution_success/missing_closure_env/Nargo.toml @@ -0,0 +1,7 @@ +[package] +name = "missing_closure_env" +type = "bin" +authors = [""] +compiler_version = ">=0.23.0" + +[dependencies] \ No newline at end of file diff --git a/test_programs/execution_success/missing_closure_env/Prover.toml b/test_programs/execution_success/missing_closure_env/Prover.toml new file mode 100644 index 00000000000..2d76abaa89f --- /dev/null +++ b/test_programs/execution_success/missing_closure_env/Prover.toml @@ -0,0 +1 @@ +x = 42 diff --git a/test_programs/execution_success/missing_closure_env/src/main.nr b/test_programs/execution_success/missing_closure_env/src/main.nr new file mode 100644 index 00000000000..0bc99b0671c --- /dev/null +++ b/test_programs/execution_success/missing_closure_env/src/main.nr @@ -0,0 +1,16 @@ +fn main(x: Field) { + let x1 = &mut 42; + let set_x1 = |y| { *x1 = y; }; + + assert(*x1 == 42); + set_x1(44); + assert(*x1 == 44); + set_x1(*x1); + assert(*x1 == 44); + assert(x == 42); +} + +#[test] +fn test_main() { + main(42); +} diff --git a/test_programs/execution_success/operator_overloading/src/main.nr b/test_programs/execution_success/operator_overloading/src/main.nr index 3867531abca..d61e1da170e 100644 --- a/test_programs/execution_success/operator_overloading/src/main.nr +++ b/test_programs/execution_success/operator_overloading/src/main.nr @@ -1,4 +1,4 @@ -use dep::std::ops::{ Add, Sub, Mul, Div, Rem, BitAnd, BitOr, BitXor, Shl, Shr }; +use dep::std::ops::{Add, Sub, Mul, Div, Rem, BitAnd, BitOr, BitXor, Shl, Shr}; use dep::std::cmp::Ordering; // x = 3, y = 9 @@ -126,10 +126,6 @@ impl Ord for Wrapper { } } - - - - struct Pair { x: Wrapper, y: Wrapper, diff --git a/test_programs/execution_success/poseidon_bn254_hash/Prover.toml b/test_programs/execution_success/poseidon_bn254_hash/Prover.toml index 8eecf9a3db2..fa6fd05b0a3 100644 --- a/test_programs/execution_success/poseidon_bn254_hash/Prover.toml +++ b/test_programs/execution_success/poseidon_bn254_hash/Prover.toml @@ -2,3 +2,8 @@ x1 = [1,2] y1 = "0x115cc0f5e7d690413df64c6b9662e9cf2a3617f2743245519e19607a4417189a" x2 = [1,2,3,4] y2 = "0x299c867db6c1fdd79dcefa40e4510b9837e60ebb1ce0663dbaa525df65250465" +x3 = ["4218458030232820015255714794613421442512497197372123294583664908262453897094", + "4218458030232820015255714794613421442512497197372123294583664908262453897094", + "4218458030232820015255714794613421442512497197372123294583664908262453897094", + "4218458030232820015255714794613421442512497197372123294583664908262453897094"] + y3 = "0x2f43a0f83b51a6f5fc839dea0ecec74947637802a579fa9841930a25a0bcec11" diff --git a/test_programs/execution_success/poseidon_bn254_hash/src/main.nr b/test_programs/execution_success/poseidon_bn254_hash/src/main.nr index e742a440d1c..939b99595c7 100644 --- a/test_programs/execution_success/poseidon_bn254_hash/src/main.nr +++ b/test_programs/execution_success/poseidon_bn254_hash/src/main.nr @@ -1,11 +1,15 @@ // docs:start:poseidon use dep::std::hash::poseidon; +use dep::std::hash::poseidon2; -fn main(x1: [Field; 2], y1: pub Field, x2: [Field; 4], y2: pub Field) { +fn main(x1: [Field; 2], y1: pub Field, x2: [Field; 4], y2: pub Field, x3: [Field; 4], y3: Field) { let hash1 = poseidon::bn254::hash_2(x1); assert(hash1 == y1); let hash2 = poseidon::bn254::hash_4(x2); assert(hash2 == y2); + + let hash3 = poseidon2::Poseidon2::hash(x3, x3.len() as u32); + assert(hash3 == y3); } // docs:end:poseidon diff --git a/test_programs/execution_success/regression/src/main.nr b/test_programs/execution_success/regression/src/main.nr index 08112d4c616..c56f3ef4190 100644 --- a/test_programs/execution_success/regression/src/main.nr +++ b/test_programs/execution_success/regression/src/main.nr @@ -1,29 +1,49 @@ -global NIBBLE_LENGTH: Field = 16; +global NIBBLE_LENGTH: u64 = 16; -fn compact_decode(input: [u8; N], length: Field) -> ([u4; NIBBLE_LENGTH], Field) { - assert(2 * input.len() as u64 <= NIBBLE_LENGTH as u64); - assert(length as u64 <= input.len() as u64); +struct U4 { + inner: u8, +} + +impl U4 { + fn zero() -> U4 { + U4 { inner: 0 } + } + + fn from_u8(x: u8) -> U4 { + U4 { inner: x % 16 } + } +} - let mut nibble = [0 as u4; NIBBLE_LENGTH]; +impl Eq for U4 { + fn eq(self, other: Self) -> bool { + self.inner == other.inner + } +} + +fn compact_decode(input: [u8; N], length: Field) -> ([U4; NIBBLE_LENGTH], Field) { + assert(2 * input.len() <= NIBBLE_LENGTH); + assert(length as u64 <= input.len()); - let first_nibble = (input[0] >> 4) as u4; - let parity = first_nibble as u1; + let mut nibble = [U4::zero(); NIBBLE_LENGTH]; + + let first_nibble = U4::from_u8(input[0] >> 4); + let parity = first_nibble.inner as u1; if parity == 1 { - nibble[0] = (input[0] & 0x0f) as u4; + nibble[0] = U4::from_u8(input[0] & 0x0f); for i in 1..input.len() { if i as u64 < length as u64 { let x = input[i]; - nibble[2*i - 1] = (x >> 4) as u4; - nibble[2*i] = (x & 0x0f) as u4; + nibble[2*i - 1] = U4::from_u8(x >> 4); + nibble[2*i] = U4::from_u8(x & 0x0f); } } } else { for i in 0..2 { if (i as u64) < length as u64 - 1 { let x = input[i + 1]; - nibble[2*i] = (x >> 4) as u4; - nibble[2*i + 1] = (x & 0x0f) as u4; + nibble[2*i] = U4::from_u8(x >> 4); + nibble[2*i + 1] = U4::from_u8(x & 0x0f); } } } @@ -78,7 +98,10 @@ fn main(x: [u8; 5], z: Field) { //Issue 1144 let (nib, len) = compact_decode(x, z); assert(len == 5); - assert([nib[0], nib[1], nib[2], nib[3], nib[4]] == [15, 1, 12, 11, 8]); + assert( + [nib[0], nib[1], nib[2], nib[3], nib[4]] + == [U4::from_u8(15), U4::from_u8(1), U4::from_u8(12), U4::from_u8(11), U4::from_u8(8)] + ); // Issue 1169 let val1 = [ 0xb8, 0x8f, 0x61, 0xe6, 0xfb, 0xda, 0x83, 0xfb, 0xff, 0xfa, 0xbe, 0x36, 0x41, 0x12, 0x13, diff --git a/test_programs/execution_success/regression_2854/src/main.nr b/test_programs/execution_success/regression_2854/src/main.nr deleted file mode 100644 index eccff8225b6..00000000000 --- a/test_programs/execution_success/regression_2854/src/main.nr +++ /dev/null @@ -1,3 +0,0 @@ -fn main(x: Field) -> pub i127 { - x as i127 -} diff --git a/test_programs/execution_success/regression_3394/src/main.nr b/test_programs/execution_success/regression_3394/src/main.nr index cc45487b98b..94b6c818ff2 100644 --- a/test_programs/execution_success/regression_3394/src/main.nr +++ b/test_programs/execution_success/regression_3394/src/main.nr @@ -3,4 +3,4 @@ use dep::std; fn main() { let x : i8 = -128; std::println(x); -} \ No newline at end of file +} diff --git a/test_programs/execution_success/regression_3607/src/main.nr b/test_programs/execution_success/regression_3607/src/main.nr index c09211c2810..9c7ef243f60 100644 --- a/test_programs/execution_success/regression_3607/src/main.nr +++ b/test_programs/execution_success/regression_3607/src/main.nr @@ -5,4 +5,4 @@ fn main(mut x: u32) { x = (x+1) / x; } assert(x != 0); -} \ No newline at end of file +} diff --git a/test_programs/execution_success/regression_3889/src/main.nr b/test_programs/execution_success/regression_3889/src/main.nr index 10b8ecabee3..402a69a10da 100644 --- a/test_programs/execution_success/regression_3889/src/main.nr +++ b/test_programs/execution_success/regression_3889/src/main.nr @@ -17,7 +17,6 @@ mod Baz { use crate::Bar::NewType; } - fn main(works: Baz::Works, fails: Baz::BarStruct, also_fails: Bar::NewType) -> pub Field { works.a + fails.a + also_fails.a } diff --git a/test_programs/execution_success/regression_4124/src/main.nr b/test_programs/execution_success/regression_4124/src/main.nr index b47bf28d461..49ff68ee6ad 100644 --- a/test_programs/execution_success/regression_4124/src/main.nr +++ b/test_programs/execution_success/regression_4124/src/main.nr @@ -14,14 +14,14 @@ pub fn storage_read() -> [Field; N] { dep::std::unsafe::zeroed() } -struct PublicState { +struct PublicMutable { storage_slot: Field, } -impl PublicState { +impl PublicMutable { pub fn new(storage_slot: Field) -> Self { assert(storage_slot != 0, "Storage slot 0 not allowed. Storage slots must start from 1."); - PublicState { storage_slot } + PublicMutable { storage_slot } } pub fn read(_self: Self) -> T where T: MyDeserialize { @@ -32,7 +32,7 @@ impl PublicState { } fn main(value: Field) { - let ps: PublicState = PublicState::new(27); + let ps: PublicMutable = PublicMutable::new(27); // error here assert(ps.read() == value); diff --git a/test_programs/execution_success/regression_4202/Nargo.toml b/test_programs/execution_success/regression_4202/Nargo.toml new file mode 100644 index 00000000000..acfba12dd4f --- /dev/null +++ b/test_programs/execution_success/regression_4202/Nargo.toml @@ -0,0 +1,7 @@ +[package] +name = "regression_4202" +type = "bin" +authors = [""] +compiler_version = ">=0.23.0" + +[dependencies] \ No newline at end of file diff --git a/test_programs/execution_success/regression_4202/Prover.toml b/test_programs/execution_success/regression_4202/Prover.toml new file mode 100644 index 00000000000..e9319802dfd --- /dev/null +++ b/test_programs/execution_success/regression_4202/Prover.toml @@ -0,0 +1 @@ +input = [1, 2, 3, 4] diff --git a/test_programs/execution_success/regression_4202/src/main.nr b/test_programs/execution_success/regression_4202/src/main.nr new file mode 100644 index 00000000000..37d2ee4578d --- /dev/null +++ b/test_programs/execution_success/regression_4202/src/main.nr @@ -0,0 +1,14 @@ +fn main(input: [u32; 4]) { + let mut slice1: [u32] = [1, 2, 3, 4]; + if slice1[0] == 3 { + slice1[1] = 4; + } + + if slice1[1] == 5 { + slice1[3] = 6; + } + + for i in 0..4 { + assert(slice1[i] == input[i]); + } +} diff --git a/test_programs/execution_success/regression_4436/Nargo.toml b/test_programs/execution_success/regression_4436/Nargo.toml new file mode 100644 index 00000000000..0904d858596 --- /dev/null +++ b/test_programs/execution_success/regression_4436/Nargo.toml @@ -0,0 +1,5 @@ +[package] +name = "regression_4436" +type = "bin" +authors = [""] +compiler_version = ">=0.22.0" diff --git a/test_programs/execution_success/regression_4436/src/main.nr b/test_programs/execution_success/regression_4436/src/main.nr new file mode 100644 index 00000000000..834ea3250cc --- /dev/null +++ b/test_programs/execution_success/regression_4436/src/main.nr @@ -0,0 +1,31 @@ +trait LibTrait { + fn broadcast(); + fn get_constant() -> Field; +} + +global STRUCT_A_LEN: Field = 3; +global STRUCT_B_LEN: Field = 5; + +struct StructA; +struct StructB; + +impl LibTrait for StructA { + fn broadcast() { + Self::get_constant(); + } + + fn get_constant() -> Field { + 1 + } +} +impl LibTrait for StructB { + fn broadcast() { + Self::get_constant(); + } + + fn get_constant() -> Field { + 1 + } +} + +fn main() {} diff --git a/test_programs/execution_success/side_effects_constrain_array/src/main.nr b/test_programs/execution_success/side_effects_constrain_array/src/main.nr index fb3c346a460..c4a62603bc3 100644 --- a/test_programs/execution_success/side_effects_constrain_array/src/main.nr +++ b/test_programs/execution_success/side_effects_constrain_array/src/main.nr @@ -7,11 +7,11 @@ fn main(y: pub u32) { // The assert inside the if should be hit if y < 10 { - assert(bar.inner == [100, 101, 102]); + assert(bar.inner == [100, 101, 102]); } // The assert inside the if should not be hit if y > 10 { assert(bar.inner == [0, 1, 2]); } -} \ No newline at end of file +} diff --git a/test_programs/execution_success/slice_dynamic_index/src/main.nr b/test_programs/execution_success/slice_dynamic_index/src/main.nr index 374d2ba4c26..41fc9a645c1 100644 --- a/test_programs/execution_success/slice_dynamic_index/src/main.nr +++ b/test_programs/execution_success/slice_dynamic_index/src/main.nr @@ -6,7 +6,7 @@ fn main(x: Field) { fn regression_dynamic_slice_index(x: Field, y: Field) { let mut slice = []; for i in 0..5 { - slice = slice.push_back(i); + slice = slice.push_back(i as Field); } assert(slice.len() == 5); @@ -124,12 +124,12 @@ fn dynamic_slice_merge_if(mut slice: [Field], x: Field) { assert(first_elem == 12); assert(rest_of_slice.len() == 6); - slice = rest_of_slice.insert(x - 2, 20); + slice = rest_of_slice.insert(x as u64 - 2, 20); assert(slice[2] == 20); assert(slice[6] == 30); assert(slice.len() == 7); - let (removed_slice, removed_elem) = slice.remove(x - 1); + let (removed_slice, removed_elem) = slice.remove(x as u64 - 1); // The deconstructed tuple assigns to the slice but is not seen outside of the if statement // without a direct assignment slice = removed_slice; diff --git a/test_programs/execution_success/slices/src/main.nr b/test_programs/execution_success/slices/src/main.nr index c377d2e5b2f..eca42a660c4 100644 --- a/test_programs/execution_success/slices/src/main.nr +++ b/test_programs/execution_success/slices/src/main.nr @@ -167,7 +167,7 @@ fn merge_slices_mutate_in_loop(x: Field, y: Field) -> [Field] { let mut slice = [0; 2]; if x != y { for i in 0..5 { - slice = slice.push_back(i); + slice = slice.push_back(i as Field); } } else { slice = slice.push_back(x); diff --git a/test_programs/execution_success/struct/src/main.nr b/test_programs/execution_success/struct/src/main.nr index 45c5e347e5a..de08f42f79d 100644 --- a/test_programs/execution_success/struct/src/main.nr +++ b/test_programs/execution_success/struct/src/main.nr @@ -9,8 +9,8 @@ struct Pair { } impl Foo { - fn default(x: Field,y: Field) -> Self { - Self { bar: 0, array: [x,y] } + fn default(x: Field, y: Field) -> Self { + Self { bar: 0, array: [x, y] } } } diff --git a/test_programs/execution_success/u128/src/main.nr b/test_programs/execution_success/u128/src/main.nr index 4c734f3a8f9..dc586408795 100644 --- a/test_programs/execution_success/u128/src/main.nr +++ b/test_programs/execution_success/u128/src/main.nr @@ -39,6 +39,6 @@ fn main(mut x: u32, y: u32, z: u32, big_int: U128, hexa: str<7>) { assert(shift >> small_int == small_int); assert(shift >> U128::from_integer(127) == U128::from_integer(0)); assert(shift << U128::from_integer(127) == U128::from_integer(0)); - + assert(U128::from_integer(3).to_integer() == 3); } diff --git a/test_programs/format.sh b/test_programs/format.sh new file mode 100755 index 00000000000..3c679b8689e --- /dev/null +++ b/test_programs/format.sh @@ -0,0 +1,47 @@ +#!/usr/bin/env bash +set -e + +# These tests are incompatible with gas reporting +excluded_dirs=("workspace" "workspace_default_member" "workspace_reexport_bug") + +# These tests cause failures in CI with a stack overflow for some reason. +ci_excluded_dirs=("eddsa") + +current_dir=$(pwd) + +# We generate a Noir workspace which contains all of the test cases +# This allows us to generate a gates report using `nargo info` for all of them at once. + + +function collect_dirs { + test_dirs=$(ls $current_dir/$1) + + for dir in $test_dirs; do + if [[ " ${excluded_dirs[@]} " =~ " ${dir} " ]]; then + continue + fi + + if [[ ${CI-false} = "true" ]] && [[ " ${ci_excluded_dirs[@]} " =~ " ${dir} " ]]; then + continue + fi + + echo " \"$1/$dir\"," >> Nargo.toml +done +} + +echo "[workspace]" > Nargo.toml +echo "members = [" >> Nargo.toml + +collect_dirs compile_success_empty +collect_dirs execution_success + +echo "]" >> Nargo.toml + +if [ "$1" == "check" ]; then + nargo fmt --check +else + nargo fmt +fi + + +rm Nargo.toml diff --git a/test_programs/gates_report.sh b/test_programs/gates_report.sh index 4192c581376..3b0b4d9e148 100755 --- a/test_programs/gates_report.sh +++ b/test_programs/gates_report.sh @@ -2,7 +2,7 @@ set -e # These tests are incompatible with gas reporting -excluded_dirs=("workspace" "workspace_default_member") +excluded_dirs=("workspace" "workspace_default_member" "double_verify_nested_proof") # These tests cause failures in CI with a stack overflow for some reason. ci_excluded_dirs=("eddsa") diff --git a/test_programs/noir_test_success/bounded_vec/src/main.nr b/test_programs/noir_test_success/bounded_vec/src/main.nr index d51d2cc3685..22ec291f9d6 100644 --- a/test_programs/noir_test_success/bounded_vec/src/main.nr +++ b/test_programs/noir_test_success/bounded_vec/src/main.nr @@ -1,6 +1,34 @@ +#[test] +fn test_vec_new_foo() { + foo(); +} + +#[test(should_fail)] +fn test_vec_new_bad() { + bad(); +} + +// docs:start:new_example +fn foo() -> BoundedVec { + // Ok! MaxLen is specified with a type annotation + let v1: BoundedVec = BoundedVec::new(); + let v2 = BoundedVec::new(); + + // Ok! MaxLen is known from the type of foo's return value + v2 +} + +fn bad() { + let mut v3 = BoundedVec::new(); + + // Not Ok! We don't know if v3's MaxLen is at least 1, and the compiler often infers 0 by default. + v3.push(5); +} +// docs:end:new_example + #[test] fn test_vec_push_pop() { - let mut vec: BoundedVec = BoundedVec::new(0); + let mut vec: BoundedVec = BoundedVec::new(); assert(vec.len == 0); vec.push(2); assert(vec.len == 1); @@ -15,24 +43,134 @@ fn test_vec_push_pop() { assert(vec.get(1) == 4); } +#[test] +fn test_vec_get_unchecked() { + let mut vec: BoundedVec = BoundedVec::new(); + vec.extend_from_array([1, 2, 3, 4]); + let sum = sum_of_first_three(vec); + assert_eq(sum, 6); +} + +// docs:start:get_unchecked_example +fn sum_of_first_three(v: BoundedVec) -> u32 { + // Always ensure the length is larger than the largest + // index passed to get_unchecked + assert(v.len() > 2); + let first = v.get_unchecked(0); + let second = v.get_unchecked(1); + let third = v.get_unchecked(2); + first + second + third +} +// docs:end:get_unchecked_example + +#[test(should_fail_with = "push out of bounds")] +fn push_docs_example() { + // docs:start:bounded-vec-push-example + let mut v: BoundedVec = BoundedVec::new(); + + v.push(1); + v.push(2); + + // Panics with failed assertion "push out of bounds" + v.push(3); + // docs:end:bounded-vec-push-example +} + +#[test] +fn pop_docs_example() { + // docs:start:bounded-vec-pop-example + let mut v: BoundedVec = BoundedVec::new(); + v.push(1); + v.push(2); + + let two = v.pop(); + let one = v.pop(); + + assert(two == 2); + assert(one == 1); + // error: cannot pop from an empty vector + // let _ = v.pop(); + // docs:end:bounded-vec-pop-example +} + +#[test] +fn len_docs_example() { + // docs:start:bounded-vec-len-example + let mut v: BoundedVec = BoundedVec::new(); + assert(v.len() == 0); + + v.push(100); + assert(v.len() == 1); + + v.push(200); + v.push(300); + v.push(400); + assert(v.len() == 4); + + let _ = v.pop(); + let _ = v.pop(); + assert(v.len() == 2); + // docs:end:bounded-vec-len-example +} + +#[test] +fn max_len_docs_example() { + // docs:start:bounded-vec-max-len-example + let mut v: BoundedVec = BoundedVec::new(); + + assert(v.max_len() == 5); + v.push(10); + assert(v.max_len() == 5); + // docs:end:bounded-vec-max-len-example +} + +#[test] +fn storage_docs_example() { + // docs:start:bounded-vec-storage-example + let mut v: BoundedVec = BoundedVec::new(); + + assert(v.storage() == [0, 0, 0, 0, 0]); + + v.push(57); + assert(v.storage() == [57, 0, 0, 0, 0]); + // docs:end:bounded-vec-storage-example +} + #[test] fn test_vec_extend_from_array() { - let mut vec: BoundedVec = BoundedVec::new(0); + // docs:start:bounded-vec-extend-from-array-example + let mut vec: BoundedVec = BoundedVec::new(); vec.extend_from_array([2, 4]); + assert(vec.len == 2); assert(vec.get(0) == 2); assert(vec.get(1) == 4); + // docs:end:bounded-vec-extend-from-array-example +} + +#[test] +fn test_vec_extend_from_bounded_vec() { + // docs:start:bounded-vec-extend-from-bounded-vec-example + let mut v1: BoundedVec = BoundedVec::new(); + let mut v2: BoundedVec = BoundedVec::new(); + + v2.extend_from_array([1, 2, 3]); + v1.extend_from_bounded_vec(v2); + + assert(v1.storage() == [1, 2, 3, 0, 0]); + assert(v2.storage() == [1, 2, 3, 0, 0, 0, 0]); + // docs:end:bounded-vec-extend-from-bounded-vec-example } #[test(should_fail_with="extend_from_array out of bounds")] fn test_vec_extend_from_array_out_of_bound() { - let mut vec: BoundedVec = BoundedVec::new(0); + let mut vec: BoundedVec = BoundedVec::new(); vec.extend_from_array([2, 4, 6]); } #[test(should_fail_with="extend_from_array out of bounds")] fn test_vec_extend_from_array_twice_out_of_bound() { - let mut vec: BoundedVec = BoundedVec::new(0); + let mut vec: BoundedVec = BoundedVec::new(); vec.extend_from_array([2]); assert(vec.len == 1); vec.extend_from_array([4, 6]); @@ -40,36 +178,36 @@ fn test_vec_extend_from_array_twice_out_of_bound() { #[test(should_fail)] fn test_vec_get_out_of_bound() { - let mut vec: BoundedVec = BoundedVec::new(0); + let mut vec: BoundedVec = BoundedVec::new(); vec.extend_from_array([2, 4]); let _x = vec.get(2); } #[test(should_fail)] fn test_vec_get_not_declared() { - let mut vec: BoundedVec = BoundedVec::new(0); + let mut vec: BoundedVec = BoundedVec::new(); vec.extend_from_array([2]); let _x = vec.get(1); } #[test(should_fail)] fn test_vec_get_uninitialized() { - let mut vec: BoundedVec = BoundedVec::new(0); + let mut vec: BoundedVec = BoundedVec::new(); let _x = vec.get(0); } #[test(should_fail_with="push out of bounds")] fn test_vec_push_out_of_bound() { - let mut vec: BoundedVec = BoundedVec::new(0); + let mut vec: BoundedVec = BoundedVec::new(); vec.push(1); vec.push(2); } #[test(should_fail_with="extend_from_bounded_vec out of bounds")] fn test_vec_extend_from_bounded_vec_out_of_bound() { - let mut vec: BoundedVec = BoundedVec::new(0); + let mut vec: BoundedVec = BoundedVec::new(); - let mut another_vec: BoundedVec = BoundedVec::new(0); + let mut another_vec: BoundedVec = BoundedVec::new(); another_vec.extend_from_array([1, 2, 3]); vec.extend_from_bounded_vec(another_vec); @@ -77,10 +215,10 @@ fn test_vec_extend_from_bounded_vec_out_of_bound() { #[test(should_fail_with="extend_from_bounded_vec out of bounds")] fn test_vec_extend_from_bounded_vec_twice_out_of_bound() { - let mut vec: BoundedVec = BoundedVec::new(0); + let mut vec: BoundedVec = BoundedVec::new(); vec.extend_from_array([1, 2]); - let mut another_vec: BoundedVec = BoundedVec::new(0); + let mut another_vec: BoundedVec = BoundedVec::new(); another_vec.push(3); vec.extend_from_bounded_vec(another_vec); @@ -88,18 +226,20 @@ fn test_vec_extend_from_bounded_vec_twice_out_of_bound() { #[test] fn test_vec_any() { - let mut vec: BoundedVec = BoundedVec::new(0); - vec.extend_from_array([2, 4, 6]); - assert(vec.any(|v| v == 2) == true); - assert(vec.any(|v| v == 4) == true); - assert(vec.any(|v| v == 6) == true); - assert(vec.any(|v| v == 3) == false); + // docs:start:bounded-vec-any-example + let mut v: BoundedVec = BoundedVec::new(); + v.extend_from_array([2, 4, 6]); + + let all_even = !v.any(|elem: u32| elem % 2 != 0); + assert(all_even); + // docs:end:bounded-vec-any-example } #[test] fn test_vec_any_not_default() { - let default_value = 1; - let mut vec: BoundedVec = BoundedVec::new(default_value); + let default_value = 0; + let mut vec: BoundedVec = BoundedVec::new(); vec.extend_from_array([2, 4]); - assert(vec.any(|v| v == default_value) == false); -} \ No newline at end of file + assert(!vec.any(|v| v == default_value)); +} + diff --git a/test_programs/noir_test_success/brillig_overflow_checks/Nargo.toml b/test_programs/noir_test_success/brillig_overflow_checks/Nargo.toml new file mode 100644 index 00000000000..b2d47d258ed --- /dev/null +++ b/test_programs/noir_test_success/brillig_overflow_checks/Nargo.toml @@ -0,0 +1,5 @@ +[package] +name = "brillig_overflow_checks" +type = "bin" +authors = [""] +[dependencies] diff --git a/test_programs/noir_test_success/brillig_overflow_checks/Prover.toml b/test_programs/noir_test_success/brillig_overflow_checks/Prover.toml new file mode 100644 index 00000000000..e69de29bb2d diff --git a/test_programs/noir_test_success/brillig_overflow_checks/src/main.nr b/test_programs/noir_test_success/brillig_overflow_checks/src/main.nr new file mode 100644 index 00000000000..5d73ef96d49 --- /dev/null +++ b/test_programs/noir_test_success/brillig_overflow_checks/src/main.nr @@ -0,0 +1,23 @@ +use dep::std::field::bn254::{TWO_POW_128, assert_gt}; + +#[test(should_fail_with = "attempt to add with overflow")] +unconstrained fn test_overflow_add() { + let a: u8 = 255; + let b: u8 = 1; + assert_eq(a + b, 0); +} + +#[test(should_fail_with = "attempt to subtract with overflow")] +unconstrained fn test_overflow_sub() { + let a: u8 = 0; + let b: u8 = 1; + assert_eq(a - b, 255); +} + +#[test(should_fail_with = "attempt to multiply with overflow")] +unconstrained fn test_overflow_mul() { + let a: u8 = 128; + let b: u8 = 2; + assert_eq(a * b, 0); +} + diff --git a/tooling/backend_interface/src/cli/info.rs b/tooling/backend_interface/src/cli/info.rs index 934351dd517..8ca3d4dd0a3 100644 --- a/tooling/backend_interface/src/cli/info.rs +++ b/tooling/backend_interface/src/cli/info.rs @@ -1,4 +1,5 @@ -use acvm::ExpressionWidth; +use acvm::acir::circuit::ExpressionWidth; + use serde::Deserialize; use std::path::{Path, PathBuf}; diff --git a/tooling/backend_interface/src/cli/prove.rs b/tooling/backend_interface/src/cli/prove.rs index c12e516db57..c63d8afab54 100644 --- a/tooling/backend_interface/src/cli/prove.rs +++ b/tooling/backend_interface/src/cli/prove.rs @@ -13,7 +13,6 @@ use super::string_from_stderr; /// The proof will be written to the specified output file. pub(crate) struct ProveCommand { pub(crate) crs_path: PathBuf, - pub(crate) is_recursive: bool, pub(crate) bytecode_path: PathBuf, pub(crate) witness_path: PathBuf, } @@ -33,10 +32,6 @@ impl ProveCommand { .arg("-o") .arg("-"); - if self.is_recursive { - command.arg("-r"); - } - let output = command.output()?; if output.status.success() { Ok(output.stdout) @@ -61,7 +56,7 @@ fn prove_command() -> Result<(), BackendError> { std::fs::File::create(&witness_path).expect("file should be created"); let crs_path = backend.backend_directory(); - let prove_command = ProveCommand { crs_path, bytecode_path, witness_path, is_recursive: false }; + let prove_command = ProveCommand { crs_path, bytecode_path, witness_path }; let proof = prove_command.run(backend.binary_path())?; assert_eq!(proof, "proof".as_bytes()); diff --git a/tooling/backend_interface/src/cli/verify.rs b/tooling/backend_interface/src/cli/verify.rs index a31f476d84c..1a4ba50b7de 100644 --- a/tooling/backend_interface/src/cli/verify.rs +++ b/tooling/backend_interface/src/cli/verify.rs @@ -6,7 +6,6 @@ use crate::BackendError; /// to verify a proof pub(crate) struct VerifyCommand { pub(crate) crs_path: PathBuf, - pub(crate) is_recursive: bool, pub(crate) proof_path: PathBuf, pub(crate) vk_path: PathBuf, } @@ -24,10 +23,6 @@ impl VerifyCommand { .arg("-k") .arg(self.vk_path); - if self.is_recursive { - command.arg("-r"); - } - let output = command.output()?; // We currently do not distinguish between an invalid proof and an error inside the backend. @@ -64,18 +59,12 @@ fn verify_command() -> Result<(), BackendError> { write_vk_command.run(backend.binary_path())?; - let prove_command = ProveCommand { - crs_path: crs_path.clone(), - is_recursive: false, - bytecode_path, - witness_path, - }; + let prove_command = ProveCommand { crs_path: crs_path.clone(), bytecode_path, witness_path }; let proof = prove_command.run(backend.binary_path())?; write_to_file(&proof, &proof_path); - let verify_command = - VerifyCommand { crs_path, is_recursive: false, proof_path, vk_path: vk_path_output }; + let verify_command = VerifyCommand { crs_path, proof_path, vk_path: vk_path_output }; let verified = verify_command.run(backend.binary_path())?; assert!(verified); diff --git a/tooling/backend_interface/src/download.rs b/tooling/backend_interface/src/download.rs index 27aab7ef351..60ecb14e642 100644 --- a/tooling/backend_interface/src/download.rs +++ b/tooling/backend_interface/src/download.rs @@ -17,8 +17,12 @@ pub fn download_backend(backend_url: &str, destination_path: &Path) -> std::io:: use tempfile::tempdir; // Download sources - let compressed_file: Cursor> = download_binary_from_url(backend_url) - .map_err(|_| std::io::Error::from(ErrorKind::Other))?; + let compressed_file: Cursor> = download_binary_from_url(backend_url).map_err(|_| { + std::io::Error::new( + ErrorKind::Other, + format!("Could not download backend from install url: {backend_url}"), + ) + })?; // Unpack the tarball let gz_decoder = GzDecoder::new(compressed_file); diff --git a/tooling/backend_interface/src/proof_system.rs b/tooling/backend_interface/src/proof_system.rs index 595cd7e2020..485381006df 100644 --- a/tooling/backend_interface/src/proof_system.rs +++ b/tooling/backend_interface/src/proof_system.rs @@ -2,8 +2,10 @@ use std::fs::File; use std::io::Write; use std::path::Path; -use acvm::acir::{circuit::Circuit, native_types::WitnessMap}; -use acvm::ExpressionWidth; +use acvm::acir::{ + circuit::{Circuit, ExpressionWidth}, + native_types::WitnessMap, +}; use acvm::FieldElement; use tempfile::tempdir; use tracing::warn; @@ -55,7 +57,6 @@ impl Backend { &self, circuit: &Circuit, witness_values: WitnessMap, - is_recursive: bool, ) -> Result, BackendError> { let binary_path = self.assert_binary_exists()?; self.assert_correct_version()?; @@ -76,13 +77,9 @@ impl Backend { write_to_file(&serialized_circuit, &bytecode_path); // Create proof and store it in the specified path - let proof_with_public_inputs = ProveCommand { - crs_path: self.crs_directory(), - is_recursive, - bytecode_path, - witness_path, - } - .run(binary_path)?; + let proof_with_public_inputs = + ProveCommand { crs_path: self.crs_directory(), bytecode_path, witness_path } + .run(binary_path)?; let proof = bb_abstraction_leaks::remove_public_inputs( circuit.public_inputs().0.len(), @@ -97,7 +94,6 @@ impl Backend { proof: &[u8], public_inputs: WitnessMap, circuit: &Circuit, - is_recursive: bool, ) -> Result { let binary_path = self.assert_binary_exists()?; self.assert_correct_version()?; @@ -127,8 +123,7 @@ impl Backend { .run(binary_path)?; // Verify the proof - VerifyCommand { crs_path: self.crs_directory(), is_recursive, proof_path, vk_path } - .run(binary_path) + VerifyCommand { crs_path: self.crs_directory(), proof_path, vk_path }.run(binary_path) } pub fn get_intermediate_proof_artifacts( diff --git a/tooling/backend_interface/src/smart_contract.rs b/tooling/backend_interface/src/smart_contract.rs index 2548079f8e3..5af75e48389 100644 --- a/tooling/backend_interface/src/smart_contract.rs +++ b/tooling/backend_interface/src/smart_contract.rs @@ -38,7 +38,7 @@ mod tests { use std::collections::BTreeSet; use acvm::acir::{ - circuit::{Circuit, Opcode, PublicInputs}, + circuit::{Circuit, ExpressionWidth, Opcode, PublicInputs}, native_types::{Expression, Witness}, }; @@ -51,11 +51,13 @@ mod tests { let circuit = Circuit { current_witness_index: 4, + expression_width: ExpressionWidth::Bounded { width: 3 }, opcodes: vec![constraint], private_parameters: BTreeSet::from([Witness(1), Witness(2)]), public_parameters: PublicInputs::default(), return_values: PublicInputs::default(), assert_messages: Default::default(), + recursive: false, }; let contract = get_mock_backend()?.eth_contract(&circuit)?; diff --git a/tooling/bb_abstraction_leaks/build.rs b/tooling/bb_abstraction_leaks/build.rs index 6197f52cb4b..f9effd5d991 100644 --- a/tooling/bb_abstraction_leaks/build.rs +++ b/tooling/bb_abstraction_leaks/build.rs @@ -10,7 +10,7 @@ use const_format::formatcp; const USERNAME: &str = "AztecProtocol"; const REPO: &str = "aztec-packages"; -const VERSION: &str = "0.21.0"; +const VERSION: &str = "0.24.0"; const TAG: &str = formatcp!("aztec-packages-v{}", VERSION); const API_URL: &str = diff --git a/tooling/debugger/Cargo.toml b/tooling/debugger/Cargo.toml index 4d240c61f90..30d11db8cf3 100644 --- a/tooling/debugger/Cargo.toml +++ b/tooling/debugger/Cargo.toml @@ -11,11 +11,12 @@ build-data.workspace = true [dependencies] acvm.workspace = true +fm.workspace = true nargo.workspace = true +noirc_frontend.workspace = true noirc_printable_type.workspace = true noirc_errors.workspace = true noirc_driver.workspace = true -fm.workspace = true thiserror.workspace = true codespan-reporting.workspace = true dap.workspace = true @@ -26,5 +27,4 @@ serde_json.workspace = true [dev-dependencies] assert_cmd = "2.0.12" rexpect = "0.5.0" -test-binary = "3.0.1" tempfile.workspace = true diff --git a/tooling/debugger/build.rs b/tooling/debugger/build.rs index cedeebcae86..26a8bc64b0e 100644 --- a/tooling/debugger/build.rs +++ b/tooling/debugger/build.rs @@ -1,3 +1,4 @@ +use std::collections::HashSet; use std::fs::File; use std::io::Write; use std::path::{Path, PathBuf}; @@ -6,9 +7,6 @@ use std::{env, fs}; const GIT_COMMIT: &&str = &"GIT_COMMIT"; fn main() { - // Rebuild if the tests have changed - println!("cargo:rerun-if-changed=tests"); - // Only use build_data if the environment variable isn't set // The environment variable is always set when working via Nix if std::env::var(GIT_COMMIT).is_err() { @@ -29,6 +27,11 @@ fn main() { }; let test_dir = root_dir.join("test_programs"); + // Rebuild if the tests have changed + println!("cargo:rerun-if-changed=tests"); + println!("cargo:rerun-if-changed=ignored-tests.txt"); + println!("cargo:rerun-if-changed={}", test_dir.as_os_str().to_str().unwrap()); + generate_debugger_tests(&mut test_file, &test_dir); } @@ -38,10 +41,13 @@ fn generate_debugger_tests(test_file: &mut File, test_data_dir: &Path) { let test_case_dirs = fs::read_dir(test_data_dir).unwrap().flatten().filter(|c| c.path().is_dir()); + let ignored_tests_contents = fs::read_to_string("ignored-tests.txt").unwrap(); + let ignored_tests = ignored_tests_contents.lines().collect::>(); for test_dir in test_case_dirs { let test_name = test_dir.file_name().into_string().expect("Directory can't be converted to string"); + let ignored = ignored_tests.contains(test_name.as_str()); if test_name.contains('-') { panic!( "Invalid test directory: {test_name}. Cannot include `-`, please convert to `_`" @@ -53,11 +59,13 @@ fn generate_debugger_tests(test_file: &mut File, test_data_dir: &Path) { test_file, r#" #[test] +{ignored} fn debug_{test_name}() {{ debugger_execution_success("{test_dir}"); }} "#, test_dir = test_dir.display(), + ignored = if ignored { "#[ignore]" } else { "" }, ) .expect("Could not write templated test file."); } diff --git a/tooling/debugger/ignored-tests.txt b/tooling/debugger/ignored-tests.txt new file mode 100644 index 00000000000..c472e828739 --- /dev/null +++ b/tooling/debugger/ignored-tests.txt @@ -0,0 +1,21 @@ +array_dynamic_blackbox_input +array_sort +assign_ex +bit_shifts_comptime +brillig_cow +brillig_nested_arrays +brillig_references +brillig_to_bytes_integration +debug_logs +double_verify_nested_proof +double_verify_proof +modulus +nested_array_dynamic +nested_array_in_slice +nested_arrays_from_brillig +references +scalar_mul +signed_comparison +simple_2d_array +to_bytes_integration +bigint diff --git a/tooling/debugger/src/context.rs b/tooling/debugger/src/context.rs index 12b55708b15..515edf0bb06 100644 --- a/tooling/debugger/src/context.rs +++ b/tooling/debugger/src/context.rs @@ -1,6 +1,8 @@ +use crate::foreign_calls::DebugForeignCallExecutor; use acvm::acir::circuit::{Circuit, Opcode, OpcodeLocation}; use acvm::acir::native_types::{Witness, WitnessMap}; -use acvm::brillig_vm::{brillig::Value, Registers}; +use acvm::brillig_vm::brillig::ForeignCallResult; +use acvm::brillig_vm::brillig::Value; use acvm::pwg::{ ACVMStatus, BrilligSolver, BrilligSolverStatus, ForeignCallWaitInfo, StepResult, ACVM, }; @@ -8,8 +10,8 @@ use acvm::{BlackBoxFunctionSolver, FieldElement}; use nargo::artifacts::debug::DebugArtifact; use nargo::errors::{ExecutionError, Location}; -use nargo::ops::ForeignCallExecutor; use nargo::NargoError; +use noirc_printable_type::{PrintableType, PrintableValue}; use std::collections::{hash_set::Iter, HashSet}; @@ -24,7 +26,7 @@ pub(super) enum DebugCommandResult { pub(super) struct DebugContext<'a, B: BlackBoxFunctionSolver> { acvm: ACVM<'a, B>, brillig_solver: Option>, - foreign_call_executor: Box, + foreign_call_executor: Box, debug_artifact: &'a DebugArtifact, breakpoints: HashSet, } @@ -35,7 +37,7 @@ impl<'a, B: BlackBoxFunctionSolver> DebugContext<'a, B> { circuit: &'a Circuit, debug_artifact: &'a DebugArtifact, initial_witness: WitnessMap, - foreign_call_executor: Box, + foreign_call_executor: Box, ) -> Self { Self { acvm: ACVM::new(blackbox_solver, &circuit.opcodes, initial_witness), @@ -76,15 +78,82 @@ impl<'a, B: BlackBoxFunctionSolver> DebugContext<'a, B> { } } + pub(super) fn get_call_stack(&self) -> Vec { + let instruction_pointer = self.acvm.instruction_pointer(); + if instruction_pointer >= self.get_opcodes().len() { + vec![] + } else if let Some(ref solver) = self.brillig_solver { + solver + .get_call_stack() + .iter() + .map(|program_counter| OpcodeLocation::Brillig { + acir_index: instruction_pointer, + brillig_index: *program_counter, + }) + .collect() + } else { + vec![OpcodeLocation::Acir(instruction_pointer)] + } + } + + pub(super) fn is_source_location_in_debug_module(&self, location: &Location) -> bool { + self.debug_artifact + .file_map + .get(&location.file) + .map(|file| file.path.starts_with("__debug/")) + .unwrap_or(false) + } + /// Returns the callstack in source code locations for the currently /// executing opcode. This can be `None` if the execution finished (and /// `get_current_opcode_location()` returns `None`) or if the opcode is not /// mapped to a specific source location in the debug artifact (which can - /// happen for certain opcodes inserted synthetically by the compiler) + /// happen for certain opcodes inserted synthetically by the compiler). + /// This function also filters source locations that are determined to be in + /// the internal debug module. pub(super) fn get_current_source_location(&self) -> Option> { self.get_current_opcode_location() .as_ref() - .and_then(|location| self.debug_artifact.debug_symbols[0].opcode_location(location)) + .map(|opcode_location| self.get_source_location_for_opcode_location(opcode_location)) + .filter(|v: &Vec| !v.is_empty()) + } + + /// Returns the (possible) stack of source locations corresponding to the + /// given opcode location. Due to compiler inlining it's possible for this + /// function to return multiple source locations. An empty vector means that + /// the given opcode location cannot be mapped back to a source location + /// (eg. it may be pure debug instrumentation code or other synthetically + /// produced opcode by the compiler) + pub(super) fn get_source_location_for_opcode_location( + &self, + opcode_location: &OpcodeLocation, + ) -> Vec { + self.debug_artifact.debug_symbols[0] + .opcode_location(opcode_location) + .map(|source_locations| { + source_locations + .into_iter() + .filter(|source_location| { + !self.is_source_location_in_debug_module(source_location) + }) + .collect() + }) + .unwrap_or_default() + } + + /// Returns the current call stack with expanded source locations. In + /// general, the matching between opcode location and source location is 1 + /// to 1, but due to the compiler inlining functions a single opcode + /// location may expand to multiple source locations. + pub(super) fn get_source_call_stack(&self) -> Vec<(OpcodeLocation, Location)> { + self.get_call_stack() + .iter() + .flat_map(|opcode_location| { + self.get_source_location_for_opcode_location(opcode_location) + .into_iter() + .map(|source_location| (*opcode_location, source_location)) + }) + .collect() } fn get_opcodes_sizes(&self) -> Vec { @@ -224,6 +293,9 @@ impl<'a, B: BlackBoxFunctionSolver> DebugContext<'a, B> { let foreign_call_result = self.foreign_call_executor.execute(&foreign_call); match foreign_call_result { Ok(foreign_call_result) => { + let foreign_call_result = foreign_call_result + .get_brillig_output() + .unwrap_or(ForeignCallResult::default()); if let Some(mut solver) = self.brillig_solver.take() { solver.resolve_pending_foreign_call(foreign_call_result); self.brillig_solver = Some(solver); @@ -323,7 +395,8 @@ impl<'a, B: BlackBoxFunctionSolver> DebugContext<'a, B> { } } - pub(super) fn next(&mut self) -> DebugCommandResult { + /// Steps debugging execution until the next source location + pub(super) fn next_into(&mut self) -> DebugCommandResult { let start_location = self.get_current_source_location(); loop { let result = self.step_into_opcode(); @@ -337,6 +410,38 @@ impl<'a, B: BlackBoxFunctionSolver> DebugContext<'a, B> { } } + /// Steps debugging execution until the next source location at the same (or + /// less) call stack depth (eg. don't dive into function calls) + pub(super) fn next_over(&mut self) -> DebugCommandResult { + let start_call_stack = self.get_source_call_stack(); + loop { + let result = self.next_into(); + if !matches!(result, DebugCommandResult::Ok) { + return result; + } + let new_call_stack = self.get_source_call_stack(); + if new_call_stack.len() <= start_call_stack.len() { + return DebugCommandResult::Ok; + } + } + } + + /// Steps debugging execution until the next source location with a smaller + /// call stack depth (eg. returning from the current function) + pub(super) fn next_out(&mut self) -> DebugCommandResult { + let start_call_stack = self.get_source_call_stack(); + loop { + let result = self.next_into(); + if !matches!(result, DebugCommandResult::Ok) { + return result; + } + let new_call_stack = self.get_source_call_stack(); + if new_call_stack.len() < start_call_stack.len() { + return DebugCommandResult::Ok; + } + } + } + pub(super) fn cont(&mut self) -> DebugCommandResult { loop { let result = self.step_into_opcode(); @@ -352,16 +457,6 @@ impl<'a, B: BlackBoxFunctionSolver> DebugContext<'a, B> { acir_index < opcodes.len() && matches!(opcodes[acir_index], Opcode::Brillig(..)) } - pub(super) fn get_brillig_registers(&self) -> Option<&Registers> { - self.brillig_solver.as_ref().map(|solver| solver.get_registers()) - } - - pub(super) fn set_brillig_register(&mut self, register_index: usize, value: FieldElement) { - if let Some(solver) = self.brillig_solver.as_mut() { - solver.set_register(register_index, value.into()); - } - } - pub(super) fn get_brillig_memory(&self) -> Option<&[Value]> { self.brillig_solver.as_ref().map(|solver| solver.get_memory()) } @@ -372,6 +467,10 @@ impl<'a, B: BlackBoxFunctionSolver> DebugContext<'a, B> { } } + pub(super) fn get_variables(&self) -> Vec<(&str, &PrintableValue, &PrintableType)> { + return self.foreign_call_executor.get_variables(); + } + fn breakpoint_reached(&self) -> bool { if let Some(location) = self.get_current_opcode_location() { self.breakpoints.contains(&location) @@ -432,6 +531,7 @@ mod tests { use super::*; use crate::context::{DebugCommandResult, DebugContext}; + use crate::foreign_calls::DefaultDebugForeignCallExecutor; use acvm::{ acir::{ circuit::{ @@ -442,10 +542,10 @@ mod tests { }, blackbox_solver::StubbedBlackBoxSolver, brillig_vm::brillig::{ - BinaryFieldOp, Opcode as BrilligOpcode, RegisterIndex, RegisterOrMemory, + BinaryFieldOp, HeapValueType, MemoryAddress, Opcode as BrilligOpcode, ValueOrArray, }, }; - use nargo::{artifacts::debug::DebugArtifact, ops::DefaultForeignCallExecutor}; + use nargo::artifacts::debug::DebugArtifact; use std::collections::BTreeMap; #[test] @@ -461,16 +561,24 @@ mod tests { })], outputs: vec![], bytecode: vec![ + BrilligOpcode::CalldataCopy { + destination_address: MemoryAddress(0), + size: 1, + offset: 0, + }, BrilligOpcode::Const { - destination: RegisterIndex::from(1), + destination: MemoryAddress::from(1), value: Value::from(fe_0), + bit_size: 32, }, BrilligOpcode::ForeignCall { function: "clear_mock".into(), destinations: vec![], - inputs: vec![RegisterOrMemory::RegisterIndex(RegisterIndex::from(0))], + destination_value_types: vec![], + inputs: vec![ValueOrArray::MemoryAddress(MemoryAddress::from(0))], + input_value_types: vec![HeapValueType::Simple], }, - BrilligOpcode::Stop, + BrilligOpcode::Stop { return_data_offset: 0, return_data_size: 0 }, ], predicate: None, }; @@ -485,17 +593,19 @@ mod tests { let initial_witness = BTreeMap::from([(Witness(1), fe_1)]).into(); + let foreign_call_executor = + Box::new(DefaultDebugForeignCallExecutor::from_artifact(true, debug_artifact)); let mut context = DebugContext::new( &StubbedBlackBoxSolver, circuit, debug_artifact, initial_witness, - Box::new(DefaultForeignCallExecutor::new(true, None)), + foreign_call_executor, ); assert_eq!(context.get_current_opcode_location(), Some(OpcodeLocation::Acir(0))); - // execute the first Brillig opcode (const) + // Execute the first Brillig opcode (calldata copy) let result = context.step_into_opcode(); assert!(matches!(result, DebugCommandResult::Ok)); assert_eq!( @@ -503,15 +613,15 @@ mod tests { Some(OpcodeLocation::Brillig { acir_index: 0, brillig_index: 1 }) ); - // try to execute the second Brillig opcode (and resolve the foreign call) + // execute the second Brillig opcode (const) let result = context.step_into_opcode(); assert!(matches!(result, DebugCommandResult::Ok)); assert_eq!( context.get_current_opcode_location(), - Some(OpcodeLocation::Brillig { acir_index: 0, brillig_index: 1 }) + Some(OpcodeLocation::Brillig { acir_index: 0, brillig_index: 2 }) ); - // retry the second Brillig opcode (foreign call should be finished) + // try to execute the third Brillig opcode (and resolve the foreign call) let result = context.step_into_opcode(); assert!(matches!(result, DebugCommandResult::Ok)); assert_eq!( @@ -519,6 +629,14 @@ mod tests { Some(OpcodeLocation::Brillig { acir_index: 0, brillig_index: 2 }) ); + // retry the third Brillig opcode (foreign call should be finished) + let result = context.step_into_opcode(); + assert!(matches!(result, DebugCommandResult::Ok)); + assert_eq!( + context.get_current_opcode_location(), + Some(OpcodeLocation::Brillig { acir_index: 0, brillig_index: 3 }) + ); + // last Brillig opcode let result = context.step_into_opcode(); assert!(matches!(result, DebugCommandResult::Done)); @@ -547,13 +665,18 @@ mod tests { ], outputs: vec![BrilligOutputs::Simple(w_z)], bytecode: vec![ + BrilligOpcode::CalldataCopy { + destination_address: MemoryAddress(0), + size: 2, + offset: 0, + }, BrilligOpcode::BinaryFieldOp { - destination: RegisterIndex::from(0), + destination: MemoryAddress::from(0), op: BinaryFieldOp::Add, - lhs: RegisterIndex::from(0), - rhs: RegisterIndex::from(1), + lhs: MemoryAddress::from(0), + rhs: MemoryAddress::from(1), }, - BrilligOpcode::Stop, + BrilligOpcode::Stop { return_data_offset: 0, return_data_size: 1 }, ], predicate: None, }; @@ -577,12 +700,14 @@ mod tests { let initial_witness = BTreeMap::from([(Witness(1), fe_1), (Witness(2), fe_1)]).into(); + let foreign_call_executor = + Box::new(DefaultDebugForeignCallExecutor::from_artifact(true, debug_artifact)); let mut context = DebugContext::new( &StubbedBlackBoxSolver, circuit, debug_artifact, initial_witness, - Box::new(DefaultForeignCallExecutor::new(true, None)), + foreign_call_executor, ); // set breakpoint @@ -611,14 +736,22 @@ mod tests { Opcode::Brillig(Brillig { inputs: vec![], outputs: vec![], - bytecode: vec![BrilligOpcode::Stop, BrilligOpcode::Stop, BrilligOpcode::Stop], + bytecode: vec![ + BrilligOpcode::Stop { return_data_offset: 0, return_data_size: 0 }, + BrilligOpcode::Stop { return_data_offset: 0, return_data_size: 0 }, + BrilligOpcode::Stop { return_data_offset: 0, return_data_size: 0 }, + ], predicate: None, }), Opcode::MemoryInit { block_id: BlockId(0), init: vec![] }, Opcode::Brillig(Brillig { inputs: vec![], outputs: vec![], - bytecode: vec![BrilligOpcode::Stop, BrilligOpcode::Stop, BrilligOpcode::Stop], + bytecode: vec![ + BrilligOpcode::Stop { return_data_offset: 0, return_data_size: 0 }, + BrilligOpcode::Stop { return_data_offset: 0, return_data_size: 0 }, + BrilligOpcode::Stop { return_data_offset: 0, return_data_size: 0 }, + ], predicate: None, }), Opcode::AssertZero(Expression::default()), @@ -631,7 +764,7 @@ mod tests { &circuit, &debug_artifact, WitnessMap::new(), - Box::new(DefaultForeignCallExecutor::new(true, None)), + Box::new(DefaultDebugForeignCallExecutor::new(true)), ); assert_eq!(context.offset_opcode_location(&None, 0), (None, 0)); diff --git a/tooling/debugger/src/dap.rs b/tooling/debugger/src/dap.rs index 803f9f108db..7e67a26b257 100644 --- a/tooling/debugger/src/dap.rs +++ b/tooling/debugger/src/dap.rs @@ -9,6 +9,7 @@ use codespan_reporting::files::{Files, SimpleFile}; use crate::context::DebugCommandResult; use crate::context::DebugContext; +use crate::foreign_calls::DefaultDebugForeignCallExecutor; use dap::errors::ServerError; use dap::events::StoppedEventBody; @@ -17,15 +18,14 @@ use dap::requests::{Command, Request, SetBreakpointsArguments}; use dap::responses::{ ContinueResponse, DisassembleResponse, ResponseBody, ScopesResponse, SetBreakpointsResponse, SetExceptionBreakpointsResponse, SetInstructionBreakpointsResponse, StackTraceResponse, - ThreadsResponse, + ThreadsResponse, VariablesResponse, }; use dap::server::Server; use dap::types::{ - Breakpoint, DisassembledInstruction, Source, StackFrame, SteppingGranularity, - StoppedEventReason, Thread, + Breakpoint, DisassembledInstruction, Scope, Source, StackFrame, SteppingGranularity, + StoppedEventReason, Thread, Variable, }; use nargo::artifacts::debug::DebugArtifact; -use nargo::ops::DefaultForeignCallExecutor; use fm::FileId; use noirc_driver::CompiledProgram; @@ -41,6 +41,22 @@ pub struct DapSession<'a, R: Read, W: Write, B: BlackBoxFunctionSolver> { source_breakpoints: BTreeMap>, } +enum ScopeReferences { + Locals = 1, + WitnessMap = 2, + InvalidScope = 0, +} + +impl From for ScopeReferences { + fn from(value: i64) -> Self { + match value { + 1 => Self::Locals, + 2 => Self::WitnessMap, + _ => Self::InvalidScope, + } + } +} + // BTreeMap impl<'a, R: Read, W: Write, B: BlackBoxFunctionSolver> DapSession<'a, R, W, B> { @@ -57,7 +73,7 @@ impl<'a, R: Read, W: Write, B: BlackBoxFunctionSolver> DapSession<'a, R, W, B> { circuit, debug_artifact, initial_witness, - Box::new(DefaultForeignCallExecutor::new(true, None)), + Box::new(DefaultDebugForeignCallExecutor::from_artifact(true, debug_artifact)), ); Self { server, @@ -99,7 +115,8 @@ impl<'a, R: Read, W: Write, B: BlackBoxFunctionSolver> DapSession<'a, R, W, B> { let source_location = source_locations[0]; let span = source_location.span; let file_id = source_location.file; - let Ok(line_index) = &simple_files[&file_id].line_index((), span.start() as usize) else { + let Ok(line_index) = &simple_files[&file_id].line_index((), span.start() as usize) + else { return; }; let line_number = line_index + 1; @@ -125,14 +142,14 @@ impl<'a, R: Read, W: Write, B: BlackBoxFunctionSolver> DapSession<'a, R, W, B> { } pub fn run_loop(&mut self) -> Result<(), ServerError> { - self.running = true; + self.running = self.context.get_current_opcode_location().is_some(); - if matches!(self.context.get_current_source_location(), None) { + if self.running && self.context.get_current_source_location().is_none() { // TODO: remove this? This is to ensure that the tool has a proper // source location to show when first starting the debugger, but // maybe the default behavior should be to start executing until the // first breakpoint set. - _ = self.context.next(); + _ = self.context.next_into(); } self.server.send_event(Event::Initialized)?; @@ -176,7 +193,7 @@ impl<'a, R: Read, W: Write, B: BlackBoxFunctionSolver> DapSession<'a, R, W, B> { args.granularity.as_ref().unwrap_or(&SteppingGranularity::Statement); match granularity { SteppingGranularity::Instruction => self.handle_step(req)?, - _ => self.handle_next(req)?, + _ => self.handle_next_into(req)?, } } Command::StepOut(ref args) => { @@ -184,7 +201,7 @@ impl<'a, R: Read, W: Write, B: BlackBoxFunctionSolver> DapSession<'a, R, W, B> { args.granularity.as_ref().unwrap_or(&SteppingGranularity::Statement); match granularity { SteppingGranularity::Instruction => self.handle_step(req)?, - _ => self.handle_next(req)?, + _ => self.handle_next_out(req)?, } } Command::Next(ref args) => { @@ -192,18 +209,17 @@ impl<'a, R: Read, W: Write, B: BlackBoxFunctionSolver> DapSession<'a, R, W, B> { args.granularity.as_ref().unwrap_or(&SteppingGranularity::Statement); match granularity { SteppingGranularity::Instruction => self.handle_step(req)?, - _ => self.handle_next(req)?, + _ => self.handle_next_over(req)?, } } Command::Continue(_) => { self.handle_continue(req)?; } Command::Scopes(_) => { - // FIXME: this needs a proper implementation when we can - // show the parameters and variables - self.server.respond( - req.success(ResponseBody::Scopes(ScopesResponse { scopes: vec![] })), - )?; + self.handle_scopes(req)?; + } + Command::Variables(ref _args) => { + self.handle_variables(req)?; } _ => { eprintln!("ERROR: unhandled command: {:?}", req.command); @@ -213,37 +229,38 @@ impl<'a, R: Read, W: Write, B: BlackBoxFunctionSolver> DapSession<'a, R, W, B> { Ok(()) } + fn build_stack_trace(&self) -> Vec { + self.context + .get_source_call_stack() + .iter() + .enumerate() + .map(|(index, (opcode_location, source_location))| { + let line_number = + self.debug_artifact.location_line_number(*source_location).unwrap(); + let column_number = + self.debug_artifact.location_column_number(*source_location).unwrap(); + StackFrame { + id: index as i64, + name: format!("frame #{index}"), + source: Some(Source { + path: self.debug_artifact.file_map[&source_location.file] + .path + .to_str() + .map(String::from), + ..Source::default() + }), + line: line_number as i64, + column: column_number as i64, + instruction_pointer_reference: Some(opcode_location.to_string()), + ..StackFrame::default() + } + }) + .rev() + .collect() + } + fn handle_stack_trace(&mut self, req: Request) -> Result<(), ServerError> { - let opcode_location = self.context.get_current_opcode_location(); - let source_location = self.context.get_current_source_location(); - let frames = match source_location { - None => vec![], - Some(locations) => locations - .iter() - .enumerate() - .map(|(index, location)| { - let line_number = self.debug_artifact.location_line_number(*location).unwrap(); - let column_number = - self.debug_artifact.location_column_number(*location).unwrap(); - let ip_reference = opcode_location.map(|location| location.to_string()); - StackFrame { - id: index as i64, - name: format!("frame #{index}"), - source: Some(Source { - path: self.debug_artifact.file_map[&location.file] - .path - .to_str() - .map(String::from), - ..Source::default() - }), - line: line_number as i64, - column: column_number as i64, - instruction_pointer_reference: ip_reference, - ..StackFrame::default() - } - }) - .collect(), - }; + let frames = self.build_stack_trace(); let total_frames = Some(frames.len() as i64); self.server.respond(req.success(ResponseBody::StackTrace(StackTraceResponse { stack_frames: frames, @@ -281,7 +298,7 @@ impl<'a, R: Read, W: Write, B: BlackBoxFunctionSolver> DapSession<'a, R, W, B> { } } // the actual opcodes - while count > 0 && !matches!(opcode_location, None) { + while count > 0 && opcode_location.is_some() { instructions.push(DisassembledInstruction { address: format!("{}", opcode_location.unwrap()), instruction: self.context.render_opcode_at_location(&opcode_location), @@ -315,9 +332,23 @@ impl<'a, R: Read, W: Write, B: BlackBoxFunctionSolver> DapSession<'a, R, W, B> { self.handle_execution_result(result) } - fn handle_next(&mut self, req: Request) -> Result<(), ServerError> { - let result = self.context.next(); - eprintln!("INFO: stepped by statement with result {result:?}"); + fn handle_next_into(&mut self, req: Request) -> Result<(), ServerError> { + let result = self.context.next_into(); + eprintln!("INFO: stepped into by statement with result {result:?}"); + self.server.respond(req.ack()?)?; + self.handle_execution_result(result) + } + + fn handle_next_out(&mut self, req: Request) -> Result<(), ServerError> { + let result = self.context.next_out(); + eprintln!("INFO: stepped out by statement with result {result:?}"); + self.server.respond(req.ack()?)?; + self.handle_execution_result(result) + } + + fn handle_next_over(&mut self, req: Request) -> Result<(), ServerError> { + let result = self.context.next_over(); + eprintln!("INFO: stepped over by statement with result {result:?}"); self.server.respond(req.ack()?)?; self.handle_execution_result(result) } @@ -416,29 +447,31 @@ impl<'a, R: Read, W: Write, B: BlackBoxFunctionSolver> DapSession<'a, R, W, B> { // compute breakpoints to set and return let mut breakpoints_to_set: Vec<(OpcodeLocation, i64)> = vec![]; - let breakpoints: Vec = args.breakpoints.iter().map(|breakpoint| { - let Ok(location) = OpcodeLocation::from_str(breakpoint.instruction_reference.as_str()) else { - return Breakpoint { - verified: false, - message: Some(String::from("Missing instruction reference")), - ..Breakpoint::default() - }; - }; - if !self.context.is_valid_opcode_location(&location) { - return Breakpoint { - verified: false, - message: Some(String::from("Invalid opcode location")), - ..Breakpoint::default() + let breakpoints: Vec = args + .breakpoints + .iter() + .map(|breakpoint| { + let Ok(location) = + OpcodeLocation::from_str(breakpoint.instruction_reference.as_str()) + else { + return Breakpoint { + verified: false, + message: Some(String::from("Missing instruction reference")), + ..Breakpoint::default() + }; }; - } - let id = self.get_next_breakpoint_id(); - breakpoints_to_set.push((location, id)); - Breakpoint { - id: Some(id), - verified: true, - ..Breakpoint::default() - } - }).collect(); + if !self.context.is_valid_opcode_location(&location) { + return Breakpoint { + verified: false, + message: Some(String::from("Invalid opcode location")), + ..Breakpoint::default() + }; + } + let id = self.get_next_breakpoint_id(); + breakpoints_to_set.push((location, id)); + Breakpoint { id: Some(id), verified: true, ..Breakpoint::default() } + }) + .collect(); // actually set the computed breakpoints self.instruction_breakpoints = breakpoints_to_set; @@ -480,7 +513,12 @@ impl<'a, R: Read, W: Write, B: BlackBoxFunctionSolver> DapSession<'a, R, W, B> { }; let found_index = match line_to_opcodes.binary_search_by(|x| x.0.cmp(&line)) { Ok(index) => line_to_opcodes[index].1, - Err(index) => line_to_opcodes[index].1, + Err(index) => { + if index >= line_to_opcodes.len() { + return None; + } + line_to_opcodes[index].1 + } }; Some(found_index) } @@ -504,7 +542,9 @@ impl<'a, R: Read, W: Write, B: BlackBoxFunctionSolver> DapSession<'a, R, W, B> { let Some(location) = self.find_opcode_for_source_location(&file_id, line) else { return Breakpoint { verified: false, - message: Some(String::from("Source location cannot be matched to opcode location")), + message: Some(String::from( + "Source location cannot be matched to opcode location", + )), ..Breakpoint::default() }; }; @@ -548,6 +588,73 @@ impl<'a, R: Read, W: Write, B: BlackBoxFunctionSolver> DapSession<'a, R, W, B> { )?; Ok(()) } + + fn handle_scopes(&mut self, req: Request) -> Result<(), ServerError> { + self.server.respond(req.success(ResponseBody::Scopes(ScopesResponse { + scopes: vec![ + Scope { + name: String::from("Locals"), + variables_reference: ScopeReferences::Locals as i64, + ..Scope::default() + }, + Scope { + name: String::from("Witness Map"), + variables_reference: ScopeReferences::WitnessMap as i64, + ..Scope::default() + }, + ], + })))?; + Ok(()) + } + + fn build_local_variables(&self) -> Vec { + let mut variables: Vec<_> = self + .context + .get_variables() + .iter() + .map(|(name, value, _var_type)| Variable { + name: String::from(*name), + value: format!("{:?}", *value), + ..Variable::default() + }) + .collect(); + variables.sort_by(|a, b| a.name.partial_cmp(&b.name).unwrap()); + variables + } + + fn build_witness_map(&self) -> Vec { + self.context + .get_witness_map() + .clone() + .into_iter() + .map(|(witness, value)| Variable { + name: format!("_{}", witness.witness_index()), + value: format!("{value:?}"), + ..Variable::default() + }) + .collect() + } + + fn handle_variables(&mut self, req: Request) -> Result<(), ServerError> { + let Command::Variables(ref args) = req.command else { + unreachable!("handle_variables called on a different request"); + }; + let scope: ScopeReferences = args.variables_reference.into(); + let variables: Vec<_> = match scope { + ScopeReferences::Locals => self.build_local_variables(), + ScopeReferences::WitnessMap => self.build_witness_map(), + _ => { + eprintln!( + "handle_variables with an unknown variables_reference {}", + args.variables_reference + ); + vec![] + } + }; + self.server + .respond(req.success(ResponseBody::Variables(VariablesResponse { variables })))?; + Ok(()) + } } pub fn run_session( diff --git a/tooling/debugger/src/errors.rs b/tooling/debugger/src/errors.rs new file mode 100644 index 00000000000..4578987d715 --- /dev/null +++ b/tooling/debugger/src/errors.rs @@ -0,0 +1,19 @@ +use thiserror::Error; + +#[derive(Debug, Error)] +pub enum DapError { + #[error("{0}")] + PreFlightGenericError(String), + + #[error(transparent)] + LoadError(#[from] LoadError), + + #[error(transparent)] + ServerError(#[from] dap::errors::ServerError), +} + +#[derive(Debug, Error)] +pub enum LoadError { + #[error("{0}")] + Generic(String), +} diff --git a/tooling/debugger/src/foreign_calls.rs b/tooling/debugger/src/foreign_calls.rs new file mode 100644 index 00000000000..68c4d3947b0 --- /dev/null +++ b/tooling/debugger/src/foreign_calls.rs @@ -0,0 +1,142 @@ +use acvm::{ + acir::brillig::{ForeignCallParam, ForeignCallResult, Value}, + pwg::ForeignCallWaitInfo, +}; +use nargo::{ + artifacts::debug::{DebugArtifact, DebugVars}, + ops::{DefaultForeignCallExecutor, ForeignCallExecutor, NargoForeignCallResult}, +}; +use noirc_errors::debug_info::DebugVarId; +use noirc_printable_type::{ForeignCallError, PrintableType, PrintableValue}; + +pub(crate) enum DebugForeignCall { + VarAssign, + VarDrop, + MemberAssign(u32), + DerefAssign, +} + +impl DebugForeignCall { + pub(crate) fn lookup(op_name: &str) -> Option { + let member_pre = "__debug_member_assign_"; + if let Some(op_suffix) = op_name.strip_prefix(member_pre) { + let arity = + op_suffix.parse::().expect("failed to parse debug_member_assign arity"); + return Some(DebugForeignCall::MemberAssign(arity)); + } + match op_name { + "__debug_var_assign" => Some(DebugForeignCall::VarAssign), + "__debug_var_drop" => Some(DebugForeignCall::VarDrop), + "__debug_deref_assign" => Some(DebugForeignCall::DerefAssign), + _ => None, + } + } +} + +pub trait DebugForeignCallExecutor: ForeignCallExecutor { + fn get_variables(&self) -> Vec<(&str, &PrintableValue, &PrintableType)>; +} + +pub struct DefaultDebugForeignCallExecutor { + executor: DefaultForeignCallExecutor, + pub debug_vars: DebugVars, +} + +impl DefaultDebugForeignCallExecutor { + pub fn new(show_output: bool) -> Self { + Self { + executor: DefaultForeignCallExecutor::new(show_output, None), + debug_vars: DebugVars::default(), + } + } + + pub fn from_artifact(show_output: bool, artifact: &DebugArtifact) -> Self { + let mut ex = Self::new(show_output); + ex.load_artifact(artifact); + ex + } + + pub fn load_artifact(&mut self, artifact: &DebugArtifact) { + artifact.debug_symbols.iter().for_each(|info| { + self.debug_vars.insert_variables(&info.variables); + self.debug_vars.insert_types(&info.types); + }); + } +} + +impl DebugForeignCallExecutor for DefaultDebugForeignCallExecutor { + fn get_variables(&self) -> Vec<(&str, &PrintableValue, &PrintableType)> { + self.debug_vars.get_variables() + } +} + +fn debug_var_id(value: &Value) -> DebugVarId { + DebugVarId(value.to_u128() as u32) +} + +impl ForeignCallExecutor for DefaultDebugForeignCallExecutor { + fn execute( + &mut self, + foreign_call: &ForeignCallWaitInfo, + ) -> Result { + let foreign_call_name = foreign_call.function.as_str(); + match DebugForeignCall::lookup(foreign_call_name) { + Some(DebugForeignCall::VarAssign) => { + let fcp_var_id = &foreign_call.inputs[0]; + if let ForeignCallParam::Single(var_id_value) = fcp_var_id { + let var_id = debug_var_id(var_id_value); + let values: Vec = + foreign_call.inputs[1..].iter().flat_map(|x| x.values()).collect(); + self.debug_vars.assign_var(var_id, &values); + } + Ok(ForeignCallResult::default().into()) + } + Some(DebugForeignCall::VarDrop) => { + let fcp_var_id = &foreign_call.inputs[0]; + if let ForeignCallParam::Single(var_id_value) = fcp_var_id { + let var_id = debug_var_id(var_id_value); + self.debug_vars.drop_var(var_id); + } + Ok(ForeignCallResult::default().into()) + } + Some(DebugForeignCall::MemberAssign(arity)) => { + if let Some(ForeignCallParam::Single(var_id_value)) = foreign_call.inputs.first() { + let arity = arity as usize; + let var_id = debug_var_id(var_id_value); + let n = foreign_call.inputs.len(); + let indexes: Vec = foreign_call.inputs[(n - arity)..n] + .iter() + .map(|fcp_v| { + if let ForeignCallParam::Single(v) = fcp_v { + v.to_u128() as u32 + } else { + panic!("expected ForeignCallParam::Single(v)"); + } + }) + .collect(); + let values: Vec = (0..n - 1 - arity) + .flat_map(|i| { + foreign_call + .inputs + .get(1 + i) + .map(|fci| fci.values()) + .unwrap_or_default() + }) + .collect(); + self.debug_vars.assign_field(var_id, indexes, &values); + } + Ok(ForeignCallResult::default().into()) + } + Some(DebugForeignCall::DerefAssign) => { + let fcp_var_id = &foreign_call.inputs[0]; + let fcp_value = &foreign_call.inputs[1]; + if let ForeignCallParam::Single(var_id_value) = fcp_var_id { + let var_id = debug_var_id(var_id_value); + self.debug_vars.assign_deref(var_id, &fcp_value.values()); + } + Ok(ForeignCallResult::default().into()) + } + None => self.executor.execute(foreign_call), + } + } +} diff --git a/tooling/debugger/src/lib.rs b/tooling/debugger/src/lib.rs index 21834e44f93..4a25e3417a0 100644 --- a/tooling/debugger/src/lib.rs +++ b/tooling/debugger/src/lib.rs @@ -1,5 +1,7 @@ mod context; mod dap; +pub mod errors; +mod foreign_calls; mod repl; mod source_code_printer; diff --git a/tooling/debugger/src/repl.rs b/tooling/debugger/src/repl.rs index 40ee6efdb86..8441dbde9be 100644 --- a/tooling/debugger/src/repl.rs +++ b/tooling/debugger/src/repl.rs @@ -4,9 +4,11 @@ use acvm::acir::circuit::{Circuit, Opcode, OpcodeLocation}; use acvm::acir::native_types::{Witness, WitnessMap}; use acvm::{BlackBoxFunctionSolver, FieldElement}; -use nargo::{artifacts::debug::DebugArtifact, ops::DefaultForeignCallExecutor, NargoError}; +use crate::foreign_calls::DefaultDebugForeignCallExecutor; +use nargo::{artifacts::debug::DebugArtifact, NargoError}; use easy_repl::{command, CommandStatus, Repl}; +use noirc_printable_type::PrintableValueDisplay; use std::cell::RefCell; use crate::source_code_printer::print_source_code_location; @@ -27,21 +29,22 @@ impl<'a, B: BlackBoxFunctionSolver> ReplDebugger<'a, B> { debug_artifact: &'a DebugArtifact, initial_witness: WitnessMap, ) -> Self { + let foreign_call_executor = + Box::new(DefaultDebugForeignCallExecutor::from_artifact(true, debug_artifact)); let context = DebugContext::new( blackbox_solver, circuit, debug_artifact, initial_witness.clone(), - Box::new(DefaultForeignCallExecutor::new(true, None)), + foreign_call_executor, ); - Self { - context, - blackbox_solver, - circuit, - debug_artifact, - initial_witness, - last_result: DebugCommandResult::Ok, - } + let last_result = if context.get_current_opcode_location().is_none() { + // handle circuit with no opcodes + DebugCommandResult::Done + } else { + DebugCommandResult::Ok + }; + Self { context, blackbox_solver, circuit, debug_artifact, initial_witness, last_result } } pub fn show_current_vm_status(&self) { @@ -73,9 +76,44 @@ impl<'a, B: BlackBoxFunctionSolver> ReplDebugger<'a, B> { ); } } + let locations = self.context.get_source_location_for_opcode_location(&location); + print_source_code_location(self.debug_artifact, &locations); + } + } + } - print_source_code_location(self.debug_artifact, &location); + fn show_stack_frame(&self, index: usize, location: &OpcodeLocation) { + let opcodes = self.context.get_opcodes(); + match location { + OpcodeLocation::Acir(instruction_pointer) => { + println!( + "Frame #{index}, opcode {}: {}", + instruction_pointer, opcodes[*instruction_pointer] + ) } + OpcodeLocation::Brillig { acir_index, brillig_index } => { + let Opcode::Brillig(ref brillig) = opcodes[*acir_index] else { + unreachable!("Brillig location does not contain a Brillig block"); + }; + println!( + "Frame #{index}, opcode {}.{}: {:?}", + acir_index, brillig_index, brillig.bytecode[*brillig_index] + ); + } + } + let locations = self.context.get_source_location_for_opcode_location(location); + print_source_code_location(self.debug_artifact, &locations); + } + + pub fn show_current_call_stack(&self) { + let call_stack = self.context.get_call_stack(); + if call_stack.is_empty() { + println!("Finished execution. Call stack empty."); + return; + } + + for (i, frame_location) in call_stack.iter().enumerate() { + self.show_stack_frame(i, frame_location); } } @@ -193,9 +231,23 @@ impl<'a, B: BlackBoxFunctionSolver> ReplDebugger<'a, B> { } } - fn next(&mut self) { + fn next_into(&mut self) { + if self.validate_in_progress() { + let result = self.context.next_into(); + self.handle_debug_command_result(result); + } + } + + fn next_over(&mut self) { if self.validate_in_progress() { - let result = self.context.next(); + let result = self.context.next_over(); + self.handle_debug_command_result(result); + } + } + + fn next_out(&mut self) { + if self.validate_in_progress() { + let result = self.context.next_out(); self.handle_debug_command_result(result); } } @@ -211,12 +263,14 @@ impl<'a, B: BlackBoxFunctionSolver> ReplDebugger<'a, B> { fn restart_session(&mut self) { let breakpoints: Vec = self.context.iterate_breakpoints().copied().collect(); + let foreign_call_executor = + Box::new(DefaultDebugForeignCallExecutor::from_artifact(true, self.debug_artifact)); self.context = DebugContext::new( self.blackbox_solver, self.circuit, self.debug_artifact, self.initial_witness.clone(), - Box::new(DefaultForeignCallExecutor::new(true, None)), + foreign_call_executor, ); for opcode_location in breakpoints { self.context.add_breakpoint(opcode_location); @@ -251,37 +305,6 @@ impl<'a, B: BlackBoxFunctionSolver> ReplDebugger<'a, B> { println!("_{} = {value}", index); } - pub fn show_brillig_registers(&self) { - if !self.context.is_executing_brillig() { - println!("Not executing a Brillig block"); - return; - } - - let Some(registers) = self.context.get_brillig_registers() else { - // this can happen when just entering the Brillig block since ACVM - // would have not initialized the Brillig VM yet; in fact, the - // Brillig code may be skipped altogether - println!("Brillig VM registers not available"); - return; - }; - - for (index, value) in registers.inner.iter().enumerate() { - println!("{index} = {}", value.to_field()); - } - } - - pub fn set_brillig_register(&mut self, index: usize, value: String) { - let Some(field_value) = FieldElement::try_from_str(&value) else { - println!("Invalid value: {value}"); - return; - }; - if !self.context.is_executing_brillig() { - println!("Not executing a Brillig block"); - return; - } - self.context.set_brillig_register(index, field_value); - } - pub fn show_brillig_memory(&self) { if !self.context.is_executing_brillig() { println!("Not executing a Brillig block"); @@ -313,6 +336,15 @@ impl<'a, B: BlackBoxFunctionSolver> ReplDebugger<'a, B> { self.context.write_brillig_memory(index, field_value); } + pub fn show_vars(&self) { + let vars = self.context.get_variables(); + for (var_name, value, var_type) in vars.iter() { + let printable_value = + PrintableValueDisplay::Plain((*value).clone(), (*var_type).clone()); + println!("{var_name}:{var_type:?} = {}", printable_value); + } + } + fn is_solved(&self) -> bool { self.context.is_solved() } @@ -360,11 +392,31 @@ pub fn run( command! { "step until a new source location is reached", () => || { - ref_context.borrow_mut().next(); + ref_context.borrow_mut().next_into(); Ok(CommandStatus::Done) } }, ) + .add( + "over", + command! { + "step until a new source location is reached without diving into function calls", + () => || { + ref_context.borrow_mut().next_over(); + Ok(CommandStatus::Done) + } + } + ) + .add( + "out", + command! { + "step until a new source location is reached and the current stack frame is finished", + () => || { + ref_context.borrow_mut().next_out(); + Ok(CommandStatus::Done) + } + } + ) .add( "continue", command! { @@ -446,41 +498,41 @@ pub fn run( }, ) .add( - "registers", + "memory", command! { - "show Brillig registers (valid when executing a Brillig block)", + "show Brillig memory (valid when executing a Brillig block)", () => || { - ref_context.borrow().show_brillig_registers(); + ref_context.borrow().show_brillig_memory(); Ok(CommandStatus::Done) } }, ) .add( - "regset", + "memset", command! { - "update a Brillig register with the given value", + "update a Brillig memory cell with the given value", (index: usize, value: String) => |index, value| { - ref_context.borrow_mut().set_brillig_register(index, value); + ref_context.borrow_mut().write_brillig_memory(index, value); Ok(CommandStatus::Done) } }, ) .add( - "memory", + "stacktrace", command! { - "show Brillig memory (valid when executing a Brillig block)", + "display the current stack trace", () => || { - ref_context.borrow().show_brillig_memory(); + ref_context.borrow().show_current_call_stack(); Ok(CommandStatus::Done) } }, ) .add( - "memset", + "vars", command! { - "update a Brillig memory cell with the given value", - (index: usize, value: String) => |index, value| { - ref_context.borrow_mut().write_brillig_memory(index, value); + "show variable values available at this point in execution", + () => || { + ref_context.borrow_mut().show_vars(); Ok(CommandStatus::Done) } }, diff --git a/tooling/debugger/src/source_code_printer.rs b/tooling/debugger/src/source_code_printer.rs index 1707f9066b7..b5ffdb12d01 100644 --- a/tooling/debugger/src/source_code_printer.rs +++ b/tooling/debugger/src/source_code_printer.rs @@ -1,4 +1,3 @@ -use acvm::acir::circuit::OpcodeLocation; use codespan_reporting::files::Files; use nargo::artifacts::debug::DebugArtifact; use noirc_errors::Location; @@ -31,13 +30,7 @@ struct LocationPrintContext { // Given a DebugArtifact and an OpcodeLocation, prints all the source code // locations the OpcodeLocation maps to, with some surrounding context and // visual aids to highlight the location itself. -pub(crate) fn print_source_code_location( - debug_artifact: &DebugArtifact, - location: &OpcodeLocation, -) { - let locations = debug_artifact.debug_symbols[0].opcode_location(location); - let Some(locations) = locations else { return; }; - +pub(crate) fn print_source_code_location(debug_artifact: &DebugArtifact, locations: &[Location]) { let locations = locations.iter(); for loc in locations { @@ -276,7 +269,8 @@ mod tests { let mut opcode_locations = BTreeMap::>::new(); opcode_locations.insert(OpcodeLocation::Acir(42), vec![loc]); - let debug_symbols = vec![DebugInfo::new(opcode_locations)]; + let debug_symbols = + vec![DebugInfo::new(opcode_locations, BTreeMap::default(), BTreeMap::default())]; let debug_artifact = DebugArtifact::new(debug_symbols, &fm); let location_rendered: Vec<_> = render_location(&debug_artifact, &loc).collect(); diff --git a/tooling/debugger/tests/debug.rs b/tooling/debugger/tests/debug.rs index e8b17b8a7af..4cb678192b8 100644 --- a/tooling/debugger/tests/debug.rs +++ b/tooling/debugger/tests/debug.rs @@ -5,8 +5,6 @@ mod tests { use rexpect::spawn_bash; - test_binary::build_test_binary_once!(mock_backend, "../backend_interface/test-binaries"); - // include tests generated by `build.rs` include!(concat!(env!("OUT_DIR"), "/debug.rs")); @@ -14,20 +12,20 @@ mod tests { let nargo_bin = cargo_bin("nargo").into_os_string().into_string().expect("Cannot parse nargo path"); - let mock_backend_path = - path_to_mock_backend().into_string().expect("Cannot parse mock_backend path"); - let mut dbg_session = spawn_bash(Some(10000)).expect("Could not start bash session"); + // Set backend to `/dev/null` to force an error if nargo tries to speak to a backend. dbg_session - .send_line(&format!("export NARGO_BACKEND_PATH={}", mock_backend_path)) + .send_line("export NARGO_BACKEND_PATH=/dev/null") .expect("Could not export NARGO_BACKEND_PATH."); dbg_session.wait_for_prompt().expect("Could not export NARGO_BACKEND_PATH."); // Start debugger and test that it loads for the given program. dbg_session .execute( - &format!("{} debug --program-dir {}", nargo_bin, test_program_dir), + &format!( + "{nargo_bin} debug --program-dir {test_program_dir} --force-brillig --expression-width 3" + ), ".*\\Starting debugger.*", ) .expect("Could not start debugger"); diff --git a/tooling/lsp/src/lib.rs b/tooling/lsp/src/lib.rs index a0e024c70fd..be9b83e02f6 100644 --- a/tooling/lsp/src/lib.rs +++ b/tooling/lsp/src/lib.rs @@ -222,11 +222,15 @@ pub(crate) fn resolve_workspace_for_source_path(file_path: &Path) -> Result ParsedFiles { cache_misses .into_iter() .map(|(id, _, _, parse_results)| (id, parse_results)) - .chain(cache_hits.into_iter()) + .chain(cache_hits) .collect() } else { parse_all(file_manager) diff --git a/tooling/lsp/src/requests/profile_run.rs b/tooling/lsp/src/requests/profile_run.rs index d866be8988b..89719947689 100644 --- a/tooling/lsp/src/requests/profile_run.rs +++ b/tooling/lsp/src/requests/profile_run.rs @@ -3,9 +3,12 @@ use std::{ future::{self, Future}, }; -use acvm::ExpressionWidth; +use acvm::acir::circuit::ExpressionWidth; use async_lsp::{ErrorCode, ResponseError}; -use nargo::{artifacts::debug::DebugArtifact, insert_all_files_for_workspace_into_file_manager}; +use nargo::{ + artifacts::debug::DebugArtifact, insert_all_files_for_workspace_into_file_manager, + ops::report_errors, +}; use nargo_toml::{find_package_manifest, resolve_workspace_from_toml, PackageSelection}; use noirc_driver::{ file_manager_with_stdlib, CompileOptions, DebugFile, NOIR_ARTIFACT_VERSION_STRING, @@ -60,11 +63,18 @@ fn on_profile_run_request_inner( Some(_package) => { let expression_width = ExpressionWidth::Bounded { width: 3 }; - let (compiled_programs, compiled_contracts) = nargo::ops::compile_workspace( + let compiled_workspace = nargo::ops::compile_workspace( &workspace_file_manager, &parsed_files, &workspace, &CompileOptions::default(), + ); + + let (compiled_programs, compiled_contracts) = report_errors( + compiled_workspace, + &workspace_file_manager, + CompileOptions::default().deny_warnings, + CompileOptions::default().silence_warnings, ) .map_err(|err| ResponseError::new(ErrorCode::REQUEST_FAILED, err))?; diff --git a/tooling/lsp/src/requests/test_run.rs b/tooling/lsp/src/requests/test_run.rs index 135090d7ed9..0b88d814265 100644 --- a/tooling/lsp/src/requests/test_run.rs +++ b/tooling/lsp/src/requests/test_run.rs @@ -83,7 +83,7 @@ fn on_test_run_request_inner( let test_result = run_test( &state.solver, - &context, + &mut context, test_function, false, None, diff --git a/tooling/lsp/src/solver.rs b/tooling/lsp/src/solver.rs index f001cebaa4d..d0acbf1aec5 100644 --- a/tooling/lsp/src/solver.rs +++ b/tooling/lsp/src/solver.rs @@ -49,4 +49,12 @@ impl BlackBoxFunctionSolver for WrapperSolver { ) -> Result<(acvm::FieldElement, acvm::FieldElement), acvm::BlackBoxResolutionError> { self.0.ec_add(input1_x, input1_y, input2_x, input2_y) } + + fn poseidon2_permutation( + &self, + inputs: &[acvm::FieldElement], + len: u32, + ) -> Result, acvm::BlackBoxResolutionError> { + self.0.poseidon2_permutation(inputs, len) + } } diff --git a/tooling/nargo/Cargo.toml b/tooling/nargo/Cargo.toml index cd97980b9e0..efd38a182e0 100644 --- a/tooling/nargo/Cargo.toml +++ b/tooling/nargo/Cargo.toml @@ -36,4 +36,3 @@ jsonrpc-http-server = "18.0" jsonrpc-core-client = "18.0" jsonrpc-derive = "18.0" jsonrpc-core = "18.0" -serial_test = "2.0" diff --git a/tooling/nargo/build.rs b/tooling/nargo/build.rs index 4fa7f58892a..ab2b7579132 100644 --- a/tooling/nargo/build.rs +++ b/tooling/nargo/build.rs @@ -2,8 +2,8 @@ use rustc_version::{version, Version}; fn check_rustc_version() { assert!( - version().unwrap() >= Version::parse("1.71.1").unwrap(), - "The minimal supported rustc version is 1.71.1." + version().unwrap() >= Version::parse("1.73.0").unwrap(), + "The minimal supported rustc version is 1.73.0." ); } diff --git a/tooling/nargo/src/artifacts/debug.rs b/tooling/nargo/src/artifacts/debug.rs index 2e2d98f279e..a249ecb03ad 100644 --- a/tooling/nargo/src/artifacts/debug.rs +++ b/tooling/nargo/src/artifacts/debug.rs @@ -8,6 +8,7 @@ use std::{ ops::Range, }; +pub use super::debug_vars::DebugVars; use fm::{FileId, FileManager, PathString}; /// A Debug Artifact stores, for a given program, the debug info for every function @@ -86,7 +87,8 @@ impl DebugArtifact { let line_index = self.line_index(location.file, location_start)?; let line_span = self.line_range(location.file, line_index)?; - let line_length = line_span.end - (line_span.start + 1); + let line_length = + if line_span.end > line_span.start { line_span.end - (line_span.start + 1) } else { 0 }; let start_in_line = location_start - line_span.start; // The location might continue beyond the line, @@ -229,7 +231,8 @@ mod tests { let mut opcode_locations = BTreeMap::>::new(); opcode_locations.insert(OpcodeLocation::Acir(42), vec![loc]); - let debug_symbols = vec![DebugInfo::new(opcode_locations)]; + let debug_symbols = + vec![DebugInfo::new(opcode_locations, BTreeMap::default(), BTreeMap::default())]; let debug_artifact = DebugArtifact::new(debug_symbols, &fm); let location_in_line = debug_artifact.location_in_line(loc).expect("Expected a range"); diff --git a/tooling/nargo/src/artifacts/debug_vars.rs b/tooling/nargo/src/artifacts/debug_vars.rs new file mode 100644 index 00000000000..20f2637f7d6 --- /dev/null +++ b/tooling/nargo/src/artifacts/debug_vars.rs @@ -0,0 +1,119 @@ +use acvm::brillig_vm::brillig::Value; +use noirc_errors::debug_info::{ + DebugTypeId, DebugTypes, DebugVarId, DebugVariable, DebugVariables, +}; +use noirc_printable_type::{decode_value, PrintableType, PrintableValue}; +use std::collections::{HashMap, HashSet}; + +#[derive(Debug, Default, Clone)] +pub struct DebugVars { + variables: HashMap, + types: HashMap, + active: HashSet, + values: HashMap, +} + +impl DebugVars { + pub fn get_variables(&self) -> Vec<(&str, &PrintableValue, &PrintableType)> { + self.active + .iter() + .filter_map(|var_id| { + self.variables.get(var_id).and_then(|debug_var| { + let Some(value) = self.values.get(var_id) else { + return None; + }; + let Some(ptype) = self.types.get(&debug_var.debug_type_id) else { + return None; + }; + Some((debug_var.name.as_str(), value, ptype)) + }) + }) + .collect() + } + + pub fn insert_variables(&mut self, vars: &DebugVariables) { + self.variables.extend(vars.clone()); + } + + pub fn insert_types(&mut self, types: &DebugTypes) { + self.types.extend(types.clone()); + } + + pub fn assign_var(&mut self, var_id: DebugVarId, values: &[Value]) { + self.active.insert(var_id); + let type_id = &self.variables.get(&var_id).unwrap().debug_type_id; + let ptype = self.types.get(type_id).unwrap(); + self.values.insert(var_id, decode_value(&mut values.iter().map(|v| v.to_field()), ptype)); + } + + pub fn assign_field(&mut self, var_id: DebugVarId, indexes: Vec, values: &[Value]) { + let mut cursor: &mut PrintableValue = self + .values + .get_mut(&var_id) + .unwrap_or_else(|| panic!("value unavailable for var_id {var_id:?}")); + let cursor_type_id = &self + .variables + .get(&var_id) + .unwrap_or_else(|| panic!("variable {var_id:?} not found")) + .debug_type_id; + let mut cursor_type = self + .types + .get(cursor_type_id) + .unwrap_or_else(|| panic!("type unavailable for type id {cursor_type_id:?}")); + for index in indexes.iter() { + (cursor, cursor_type) = match (cursor, cursor_type) { + (PrintableValue::Vec(array), PrintableType::Array { length, typ }) => { + if let Some(len) = length { + if *index as u64 >= *len { + panic!("unexpected field index past array length") + } + if *len != array.len() as u64 { + panic!("type/array length mismatch") + } + } + (array.get_mut(*index as usize).unwrap(), &*Box::leak(typ.clone())) + } + ( + PrintableValue::Struct(field_map), + PrintableType::Struct { name: _name, fields }, + ) => { + if *index as usize >= fields.len() { + panic!("unexpected field index past struct field length") + } + let (key, typ) = fields.get(*index as usize).unwrap(); + (field_map.get_mut(key).unwrap(), typ) + } + (PrintableValue::Vec(array), PrintableType::Tuple { types }) => { + if *index >= types.len() as u32 { + panic!( + "unexpected field index ({index}) past tuple length ({})", + types.len() + ); + } + if types.len() != array.len() { + panic!("type/array length mismatch") + } + let typ = types.get(*index as usize).unwrap(); + (array.get_mut(*index as usize).unwrap(), typ) + } + _ => { + panic!("unexpected assign field of {cursor_type:?} type"); + } + }; + } + *cursor = decode_value(&mut values.iter().map(|v| v.to_field()), cursor_type); + self.active.insert(var_id); + } + + pub fn assign_deref(&mut self, _var_id: DebugVarId, _values: &[Value]) { + unimplemented![] + } + + pub fn get_type(&self, var_id: DebugVarId) -> Option<&PrintableType> { + self.variables.get(&var_id).and_then(|debug_var| self.types.get(&debug_var.debug_type_id)) + } + + pub fn drop_var(&mut self, var_id: DebugVarId) { + self.active.remove(&var_id); + } +} diff --git a/tooling/nargo/src/artifacts/mod.rs b/tooling/nargo/src/artifacts/mod.rs index 180a900fd81..c7b3736f90b 100644 --- a/tooling/nargo/src/artifacts/mod.rs +++ b/tooling/nargo/src/artifacts/mod.rs @@ -5,4 +5,5 @@ //! to generate them using these artifacts as a starting point. pub mod contract; pub mod debug; +mod debug_vars; pub mod program; diff --git a/tooling/nargo/src/lib.rs b/tooling/nargo/src/lib.rs index 0fdff8b202f..3deced041f8 100644 --- a/tooling/nargo/src/lib.rs +++ b/tooling/nargo/src/lib.rs @@ -16,7 +16,7 @@ pub mod workspace; use std::collections::BTreeMap; -use fm::FileManager; +use fm::{FileManager, FILE_EXTENSION}; use noirc_driver::{add_dep, prepare_crate, prepare_dependency}; use noirc_frontend::{ graph::{CrateId, CrateName}, @@ -65,12 +65,11 @@ fn insert_all_files_for_package_into_file_manager( let entry_path_parent = package .entry_path .parent() - .unwrap_or_else(|| panic!("The entry path is expected to be a single file within a directory and so should have a parent {:?}", package.entry_path)) - .clone(); + .unwrap_or_else(|| panic!("The entry path is expected to be a single file within a directory and so should have a parent {:?}", package.entry_path)); // Get all files in the package and add them to the file manager - let paths = - get_all_paths_in_dir(entry_path_parent).expect("could not get all paths in the package"); + let paths = get_all_noir_source_in_dir(entry_path_parent) + .expect("could not get all paths in the package"); for path in paths { let source = std::fs::read_to_string(path.as_path()) .unwrap_or_else(|_| panic!("could not read file {:?} into string", path)); @@ -125,6 +124,15 @@ pub fn prepare_package<'file_manager, 'parsed_files>( (context, crate_id) } +// Get all Noir source files in the directory and subdirectories. +// +// Panics: If the path is not a path to a directory. +fn get_all_noir_source_in_dir(dir: &std::path::Path) -> std::io::Result> { + get_all_paths_in_dir(dir, |path| { + path.extension().map_or(false, |extension| extension == FILE_EXTENSION) + }) +} + // Get all paths in the directory and subdirectories. // // Panics: If the path is not a path to a directory. @@ -132,7 +140,10 @@ pub fn prepare_package<'file_manager, 'parsed_files>( // TODO: Along with prepare_package, this function is an abstraction leak // TODO: given that this crate should not know about the file manager. // TODO: We can clean this up in a future refactor -fn get_all_paths_in_dir(dir: &std::path::Path) -> std::io::Result> { +fn get_all_paths_in_dir( + dir: &std::path::Path, + predicate: fn(&std::path::Path) -> bool, +) -> std::io::Result> { assert!(dir.is_dir(), "directory {dir:?} is not a path to a directory"); let mut paths = Vec::new(); @@ -142,9 +153,9 @@ fn get_all_paths_in_dir(dir: &std::path::Path) -> std::io::Result Result<(Vec, Vec), CompileError> { +) -> CompilationResult<(Vec, Vec)> { let (binary_packages, contract_packages): (Vec<_>, Vec<_>) = workspace .into_iter() .filter(|package| !package.is_library()) @@ -35,31 +38,20 @@ pub fn compile_workspace( .map(|package| compile_contract(file_manager, parsed_files, package, compile_options)) .collect(); - // Report any warnings/errors which were encountered during compilation. - let compiled_programs: Vec = program_results - .into_iter() - .map(|compilation_result| { - report_errors( - compilation_result, - file_manager, - compile_options.deny_warnings, - compile_options.silence_warnings, - ) - }) - .collect::>()?; - let compiled_contracts: Vec = contract_results - .into_iter() - .map(|compilation_result| { - report_errors( - compilation_result, - file_manager, - compile_options.deny_warnings, - compile_options.silence_warnings, - ) - }) - .collect::>()?; - - Ok((compiled_programs, compiled_contracts)) + // Collate any warnings/errors which were encountered during compilation. + let compiled_programs = collect_errors(program_results); + let compiled_contracts = collect_errors(contract_results); + + match (compiled_programs, compiled_contracts) { + (Ok((programs, program_warnings)), Ok((contracts, contract_warnings))) => { + let warnings = [program_warnings, contract_warnings].concat(); + Ok(((programs, contracts), warnings)) + } + (Err(program_errors), Err(contract_errors)) => { + Err([program_errors, contract_errors].concat()) + } + (Err(errors), _) | (_, Err(errors)) => Err(errors), + } } pub fn compile_program( @@ -68,8 +60,29 @@ pub fn compile_program( package: &Package, compile_options: &CompileOptions, cached_program: Option, +) -> CompilationResult { + compile_program_with_debug_instrumenter( + file_manager, + parsed_files, + package, + compile_options, + cached_program, + DebugInstrumenter::default(), + ) +} + +pub fn compile_program_with_debug_instrumenter( + file_manager: &FileManager, + parsed_files: &ParsedFiles, + package: &Package, + compile_options: &CompileOptions, + cached_program: Option, + debug_instrumenter: DebugInstrumenter, ) -> CompilationResult { let (mut context, crate_id) = prepare_package(file_manager, parsed_files, package); + link_to_debug_crate(&mut context, crate_id); + context.debug_instrumenter = debug_instrumenter; + noirc_driver::compile_main(&mut context, crate_id, compile_options, cached_program) } @@ -83,7 +96,30 @@ pub fn compile_contract( noirc_driver::compile_contract(&mut context, crate_id, compile_options) } -pub(crate) fn report_errors( +/// Constructs a single `CompilationResult` for a collection of `CompilationResult`s, merging the set of warnings/errors. +pub fn collect_errors(results: Vec>) -> CompilationResult> { + let mut artifacts = Vec::new(); + let mut warnings = Vec::new(); + let mut errors = Vec::new(); + + for result in results { + match result { + Ok((new_artifact, new_warnings)) => { + artifacts.push(new_artifact); + warnings.extend(new_warnings); + } + Err(new_errors) => errors.extend(new_errors), + } + } + + if errors.is_empty() { + Ok((artifacts, warnings)) + } else { + Err(errors) + } +} + +pub fn report_errors( result: CompilationResult, file_manager: &FileManager, deny_warnings: bool, diff --git a/tooling/nargo/src/ops/execute.rs b/tooling/nargo/src/ops/execute.rs index 4fc7f7b599f..370393fea09 100644 --- a/tooling/nargo/src/ops/execute.rs +++ b/tooling/nargo/src/ops/execute.rs @@ -1,3 +1,4 @@ +use acvm::brillig_vm::brillig::ForeignCallResult; use acvm::pwg::{ACVMStatus, ErrorLocation, OpcodeResolutionError, ACVM}; use acvm::BlackBoxFunctionSolver; use acvm::{acir::circuit::Circuit, acir::native_types::WitnessMap}; @@ -5,7 +6,7 @@ use acvm::{acir::circuit::Circuit, acir::native_types::WitnessMap}; use crate::errors::ExecutionError; use crate::NargoError; -use super::foreign_calls::ForeignCallExecutor; +use super::foreign_calls::{ForeignCallExecutor, NargoForeignCallResult}; #[tracing::instrument(level = "trace", skip_all)] pub fn execute_circuit( @@ -16,6 +17,8 @@ pub fn execute_circuit( ) -> Result { let mut acvm = ACVM::new(blackbox_solver, &circuit.opcodes, initial_witness); + // This message should be resolved by a nargo foreign call only when we have an unsatisfied assertion. + let mut assert_message: Option = None; loop { let solver_status = acvm.solve(); @@ -37,7 +40,13 @@ pub fn execute_circuit( return Err(NargoError::ExecutionError(match call_stack { Some(call_stack) => { - if let Some(assert_message) = circuit.get_assert_message( + // First check whether we have a runtime assertion message that should be resolved on an ACVM failure + // If we do not have a runtime assertion message, we should check whether the circuit has any hardcoded + // messages associated with a specific `OpcodeLocation`. + // Otherwise return the provided opcode resolution error. + if let Some(assert_message) = assert_message { + ExecutionError::AssertionFailed(assert_message.to_owned(), call_stack) + } else if let Some(assert_message) = circuit.get_assert_message( *call_stack.last().expect("Call stacks should not be empty"), ) { ExecutionError::AssertionFailed(assert_message.to_owned(), call_stack) @@ -50,7 +59,19 @@ pub fn execute_circuit( } ACVMStatus::RequiresForeignCall(foreign_call) => { let foreign_call_result = foreign_call_executor.execute(&foreign_call)?; - acvm.resolve_pending_foreign_call(foreign_call_result); + match foreign_call_result { + NargoForeignCallResult::BrilligOutput(foreign_call_result) => { + acvm.resolve_pending_foreign_call(foreign_call_result); + } + NargoForeignCallResult::ResolvedAssertMessage(message) => { + if assert_message.is_some() { + unreachable!("Resolving an assert message should happen only once as the VM should have failed"); + } + assert_message = Some(message); + + acvm.resolve_pending_foreign_call(ForeignCallResult::default()); + } + } } } } diff --git a/tooling/nargo/src/ops/foreign_calls.rs b/tooling/nargo/src/ops/foreign_calls.rs index e3a3174f8dc..f7f36c65c90 100644 --- a/tooling/nargo/src/ops/foreign_calls.rs +++ b/tooling/nargo/src/ops/foreign_calls.rs @@ -9,13 +9,69 @@ pub trait ForeignCallExecutor { fn execute( &mut self, foreign_call: &ForeignCallWaitInfo, - ) -> Result; + ) -> Result; +} + +#[derive(Debug, PartialEq, Eq, Clone)] +pub enum NargoForeignCallResult { + BrilligOutput(ForeignCallResult), + ResolvedAssertMessage(String), +} + +impl NargoForeignCallResult { + pub fn get_assert_message(self) -> Option { + match self { + Self::ResolvedAssertMessage(msg) => Some(msg), + _ => None, + } + } + + pub fn get_brillig_output(self) -> Option { + match self { + Self::BrilligOutput(foreign_call_result) => Some(foreign_call_result), + _ => None, + } + } +} + +impl From for NargoForeignCallResult { + fn from(value: ForeignCallResult) -> Self { + Self::BrilligOutput(value) + } +} + +impl From for NargoForeignCallResult { + fn from(value: String) -> Self { + Self::ResolvedAssertMessage(value) + } +} + +impl From for NargoForeignCallResult { + fn from(value: Value) -> Self { + let foreign_call_result: ForeignCallResult = value.into(); + foreign_call_result.into() + } +} + +impl From> for NargoForeignCallResult { + fn from(values: Vec) -> Self { + let foreign_call_result: ForeignCallResult = values.into(); + foreign_call_result.into() + } +} + +impl From> for NargoForeignCallResult { + fn from(values: Vec) -> Self { + let foreign_call_result: ForeignCallResult = values.into(); + foreign_call_result.into() + } } /// This enumeration represents the Brillig foreign calls that are natively supported by nargo. /// After resolution of a foreign call, nargo will restart execution of the ACVM -pub(crate) enum ForeignCall { +pub enum ForeignCall { Print, + AssertMessage, CreateMock, SetMockParams, SetMockReturns, @@ -33,6 +89,7 @@ impl ForeignCall { pub(crate) fn name(&self) -> &'static str { match self { ForeignCall::Print => "print", + ForeignCall::AssertMessage => "assert_message", ForeignCall::CreateMock => "create_mock", ForeignCall::SetMockParams => "set_mock_params", ForeignCall::SetMockReturns => "set_mock_returns", @@ -44,6 +101,7 @@ impl ForeignCall { pub(crate) fn lookup(op_name: &str) -> Option { match op_name { "print" => Some(ForeignCall::Print), + "assert_message" => Some(ForeignCall::AssertMessage), "create_mock" => Some(ForeignCall::CreateMock), "set_mock_params" => Some(ForeignCall::SetMockParams), "set_mock_returns" => Some(ForeignCall::SetMockReturns), @@ -134,29 +192,49 @@ impl DefaultForeignCallExecutor { fn execute_print(foreign_call_inputs: &[ForeignCallParam]) -> Result<(), ForeignCallError> { let skip_newline = foreign_call_inputs[0].unwrap_value().is_zero(); - let display_values: PrintableValueDisplay = foreign_call_inputs - .split_first() - .ok_or(ForeignCallError::MissingForeignCallInputs)? - .1 - .try_into()?; - print!("{display_values}{}", if skip_newline { "" } else { "\n" }); + + let foreign_call_inputs = + foreign_call_inputs.split_first().ok_or(ForeignCallError::MissingForeignCallInputs)?.1; + let display_string = Self::format_printable_value(foreign_call_inputs, skip_newline)?; + + print!("{display_string}"); + Ok(()) } + + fn execute_assert_message( + foreign_call_inputs: &[ForeignCallParam], + ) -> Result { + let display_string = Self::format_printable_value(foreign_call_inputs, true)?; + Ok(display_string.into()) + } + + fn format_printable_value( + foreign_call_inputs: &[ForeignCallParam], + skip_newline: bool, + ) -> Result { + let display_values: PrintableValueDisplay = foreign_call_inputs.try_into()?; + + let result = format!("{display_values}{}", if skip_newline { "" } else { "\n" }); + + Ok(result) + } } impl ForeignCallExecutor for DefaultForeignCallExecutor { fn execute( &mut self, foreign_call: &ForeignCallWaitInfo, - ) -> Result { + ) -> Result { let foreign_call_name = foreign_call.function.as_str(); match ForeignCall::lookup(foreign_call_name) { Some(ForeignCall::Print) => { if self.show_output { Self::execute_print(&foreign_call.inputs)?; } - Ok(ForeignCallResult { values: vec![] }) + Ok(ForeignCallResult::default().into()) } + Some(ForeignCall::AssertMessage) => Self::execute_assert_message(&foreign_call.inputs), Some(ForeignCall::CreateMock) => { let mock_oracle_name = Self::parse_string(&foreign_call.inputs[0]); assert!(ForeignCall::lookup(&mock_oracle_name).is_none()); @@ -164,7 +242,7 @@ impl ForeignCallExecutor for DefaultForeignCallExecutor { self.mocked_responses.push(MockedCall::new(id, mock_oracle_name)); self.last_mock_id += 1; - Ok(ForeignCallResult { values: vec![Value::from(id).into()] }) + Ok(Value::from(id).into()) } Some(ForeignCall::SetMockParams) => { let (id, params) = Self::extract_mock_id(&foreign_call.inputs)?; @@ -172,7 +250,7 @@ impl ForeignCallExecutor for DefaultForeignCallExecutor { .unwrap_or_else(|| panic!("Unknown mock id {}", id)) .params = Some(params.to_vec()); - Ok(ForeignCallResult { values: vec![] }) + Ok(ForeignCallResult::default().into()) } Some(ForeignCall::SetMockReturns) => { let (id, params) = Self::extract_mock_id(&foreign_call.inputs)?; @@ -180,7 +258,7 @@ impl ForeignCallExecutor for DefaultForeignCallExecutor { .unwrap_or_else(|| panic!("Unknown mock id {}", id)) .result = ForeignCallResult { values: params.to_vec() }; - Ok(ForeignCallResult { values: vec![] }) + Ok(ForeignCallResult::default().into()) } Some(ForeignCall::SetMockTimes) => { let (id, params) = Self::extract_mock_id(&foreign_call.inputs)?; @@ -194,12 +272,12 @@ impl ForeignCallExecutor for DefaultForeignCallExecutor { .unwrap_or_else(|| panic!("Unknown mock id {}", id)) .times_left = Some(times); - Ok(ForeignCallResult { values: vec![] }) + Ok(ForeignCallResult::default().into()) } Some(ForeignCall::ClearMock) => { let (id, _) = Self::extract_mock_id(&foreign_call.inputs)?; self.mocked_responses.retain(|response| response.id != id); - Ok(ForeignCallResult { values: vec![] }) + Ok(ForeignCallResult::default().into()) } None => { let mock_response_position = self @@ -222,7 +300,7 @@ impl ForeignCallExecutor for DefaultForeignCallExecutor { } } - Ok(ForeignCallResult { values: result }) + Ok(result.into()) } (None, Some(external_resolver)) => { let encoded_params: Vec<_> = @@ -235,7 +313,7 @@ impl ForeignCallExecutor for DefaultForeignCallExecutor { let parsed_response: ForeignCallResult = response.result()?; - Ok(parsed_response) + Ok(parsed_response.into()) } (None, None) => panic!("Unknown foreign call {}", foreign_call_name), } @@ -255,7 +333,6 @@ mod tests { use jsonrpc_core::Result as RpcResult; use jsonrpc_derive::rpc; use jsonrpc_http_server::{Server, ServerBuilder}; - use serial_test::serial; use crate::ops::{DefaultForeignCallExecutor, ForeignCallExecutor}; @@ -291,15 +368,15 @@ mod tests { let mut io = jsonrpc_core::IoHandler::new(); io.extend_with(OracleResolverImpl.to_delegate()); + // Choosing port 0 results in a random port being assigned. let server = ServerBuilder::new(io) - .start_http(&"127.0.0.1:5555".parse().expect("Invalid address")) + .start_http(&"127.0.0.1:0".parse().expect("Invalid address")) .expect("Could not start server"); let url = format!("http://{}", server.address()); (server, url) } - #[serial] #[test] fn test_oracle_resolver_echo() { let (server, url) = build_oracle_server(); @@ -312,12 +389,11 @@ mod tests { }; let result = executor.execute(&foreign_call); - assert_eq!(result.unwrap(), ForeignCallResult { values: foreign_call.inputs }); + assert_eq!(result.unwrap(), ForeignCallResult { values: foreign_call.inputs }.into()); server.close(); } - #[serial] #[test] fn test_oracle_resolver_sum() { let (server, url) = build_oracle_server(); diff --git a/tooling/nargo/src/ops/mod.rs b/tooling/nargo/src/ops/mod.rs index 4f92faa73a4..55e9e927800 100644 --- a/tooling/nargo/src/ops/mod.rs +++ b/tooling/nargo/src/ops/mod.rs @@ -1,6 +1,11 @@ -pub use self::compile::{compile_contract, compile_program, compile_workspace}; +pub use self::compile::{ + collect_errors, compile_contract, compile_program, compile_program_with_debug_instrumenter, + compile_workspace, report_errors, +}; pub use self::execute::execute_circuit; -pub use self::foreign_calls::{DefaultForeignCallExecutor, ForeignCallExecutor}; +pub use self::foreign_calls::{ + DefaultForeignCallExecutor, ForeignCall, ForeignCallExecutor, NargoForeignCallResult, +}; pub use self::optimize::{optimize_contract, optimize_program}; pub use self::transform::{transform_contract, transform_program}; diff --git a/tooling/nargo/src/ops/test.rs b/tooling/nargo/src/ops/test.rs index f38dcad0c2f..92c09ec889e 100644 --- a/tooling/nargo/src/ops/test.rs +++ b/tooling/nargo/src/ops/test.rs @@ -1,5 +1,5 @@ use acvm::{acir::native_types::WitnessMap, BlackBoxFunctionSolver}; -use noirc_driver::{compile_no_check, CompileOptions}; +use noirc_driver::{compile_no_check, CompileError, CompileOptions}; use noirc_errors::{debug_info::DebugInfo, FileDiagnostic}; use noirc_evaluator::errors::RuntimeError; use noirc_frontend::hir::{def_map::TestFunction, Context}; @@ -16,7 +16,7 @@ pub enum TestStatus { pub fn run_test( blackbox_solver: &B, - context: &Context, + context: &mut Context, test_function: TestFunction, show_output: bool, foreign_call_resolver_url: Option<&str>, @@ -45,14 +45,18 @@ pub fn run_test( /// that a constraint was never satisfiable. /// An example of this is the program `assert(false)` /// In that case, we check if the test function should fail, and if so, we return `TestStatus::Pass`. -fn test_status_program_compile_fail(err: RuntimeError, test_function: TestFunction) -> TestStatus { +fn test_status_program_compile_fail(err: CompileError, test_function: TestFunction) -> TestStatus { // The test has failed compilation, but it should never fail. Report error. if !test_function.should_fail() { return TestStatus::CompileError(err.into()); } // The test has failed compilation, extract the assertion message if present and check if it's expected. - let assert_message = if let RuntimeError::FailedConstraint { assert_message, .. } = &err { + let assert_message = if let CompileError::RuntimeError(RuntimeError::FailedConstraint { + assert_message, + .. + }) = &err + { assert_message.clone() } else { None diff --git a/tooling/nargo/src/ops/transform.rs b/tooling/nargo/src/ops/transform.rs index f3efd82333e..9267ed7e045 100644 --- a/tooling/nargo/src/ops/transform.rs +++ b/tooling/nargo/src/ops/transform.rs @@ -1,4 +1,4 @@ -use acvm::ExpressionWidth; +use acvm::acir::circuit::ExpressionWidth; use iter_extended::vecmap; use noirc_driver::{CompiledContract, CompiledProgram}; diff --git a/tooling/nargo_cli/Cargo.toml b/tooling/nargo_cli/Cargo.toml index 6e022f090f0..57edbf5ae04 100644 --- a/tooling/nargo_cli/Cargo.toml +++ b/tooling/nargo_cli/Cargo.toml @@ -48,6 +48,7 @@ termcolor = "1.1.2" color-eyre = "0.6.2" tokio = { version = "1.0", features = ["io-std"] } dap.workspace = true +clap-markdown = { git = "https://github.com/noir-lang/clap-markdown", rev = "450d759532c88f0dba70891ceecdbc9ff8f25d2b", optional = true } # Backends backend-interface = { path = "../backend_interface" } @@ -83,3 +84,6 @@ harness = false [[bench]] name = "iai" harness = false + +[features] +codegen-docs = ["dep:clap-markdown"] \ No newline at end of file diff --git a/tooling/nargo_cli/build.rs b/tooling/nargo_cli/build.rs index 57aa487f66a..1ca12b75dfb 100644 --- a/tooling/nargo_cli/build.rs +++ b/tooling/nargo_cli/build.rs @@ -6,8 +6,8 @@ use std::{env, fs}; fn check_rustc_version() { assert!( - version().unwrap() >= Version::parse("1.71.1").unwrap(), - "The minimal supported rustc version is 1.71.1." + version().unwrap() >= Version::parse("1.73.0").unwrap(), + "The minimal supported rustc version is 1.73.0." ); } diff --git a/tooling/nargo_cli/src/cli/check_cmd.rs b/tooling/nargo_cli/src/cli/check_cmd.rs index a8b9dbdeeb2..242a640e484 100644 --- a/tooling/nargo_cli/src/cli/check_cmd.rs +++ b/tooling/nargo_cli/src/cli/check_cmd.rs @@ -5,8 +5,8 @@ use clap::Args; use fm::FileManager; use iter_extended::btree_map; use nargo::{ - errors::CompileError, insert_all_files_for_workspace_into_file_manager, package::Package, - parse_all, prepare_package, + errors::CompileError, insert_all_files_for_workspace_into_file_manager, ops::report_errors, + package::Package, parse_all, prepare_package, }; use nargo_toml::{get_package_manifest, resolve_workspace_from_toml, PackageSelection}; use noirc_abi::{AbiParameter, AbiType, MAIN_RETURN_NAME}; @@ -142,6 +142,19 @@ fn create_input_toml_template( toml::to_string(&map).unwrap() } +/// Run the lexing, parsing, name resolution, and type checking passes and report any warnings +/// and errors found. +pub(crate) fn check_crate_and_report_errors( + context: &mut Context, + crate_id: CrateId, + deny_warnings: bool, + disable_macros: bool, + silence_warnings: bool, +) -> Result<(), CompileError> { + let result = check_crate(context, crate_id, deny_warnings, disable_macros); + report_errors(result, &context.file_manager, deny_warnings, silence_warnings) +} + #[cfg(test)] mod tests { use noirc_abi::{AbiParameter, AbiType, AbiVisibility, Sign}; @@ -189,21 +202,3 @@ d2 = ["", "", ""] assert_eq!(toml_str, expected_toml_str); } } - -/// Run the lexing, parsing, name resolution, and type checking passes and report any warnings -/// and errors found. -pub(crate) fn check_crate_and_report_errors( - context: &mut Context, - crate_id: CrateId, - deny_warnings: bool, - disable_macros: bool, - silence_warnings: bool, -) -> Result<(), CompileError> { - let result = check_crate(context, crate_id, deny_warnings, disable_macros); - super::compile_cmd::report_errors( - result, - &context.file_manager, - deny_warnings, - silence_warnings, - ) -} diff --git a/tooling/nargo_cli/src/cli/codegen_verifier_cmd.rs b/tooling/nargo_cli/src/cli/codegen_verifier_cmd.rs index 63d27e30836..f0fe2e0ea78 100644 --- a/tooling/nargo_cli/src/cli/codegen_verifier_cmd.rs +++ b/tooling/nargo_cli/src/cli/codegen_verifier_cmd.rs @@ -1,11 +1,10 @@ use super::fs::{create_named_dir, write_to_file}; use super::NargoConfig; use crate::backends::Backend; -use crate::cli::compile_cmd::report_errors; use crate::errors::CliError; use clap::Args; -use nargo::ops::compile_program; +use nargo::ops::{compile_program, report_errors}; use nargo::{insert_all_files_for_workspace_into_file_manager, parse_all}; use nargo_toml::{get_package_manifest, resolve_workspace_from_toml, PackageSelection}; use noirc_driver::{file_manager_with_stdlib, CompileOptions, NOIR_ARTIFACT_VERSION_STRING}; diff --git a/tooling/nargo_cli/src/cli/compile_cmd.rs b/tooling/nargo_cli/src/cli/compile_cmd.rs index 34fb05249b5..4309f0db3ea 100644 --- a/tooling/nargo_cli/src/cli/compile_cmd.rs +++ b/tooling/nargo_cli/src/cli/compile_cmd.rs @@ -2,8 +2,7 @@ use std::path::Path; use fm::FileManager; use nargo::artifacts::program::ProgramArtifact; -use nargo::errors::CompileError; -use nargo::ops::{compile_contract, compile_program}; +use nargo::ops::{collect_errors, compile_contract, compile_program, report_errors}; use nargo::package::Package; use nargo::workspace::Workspace; use nargo::{insert_all_files_for_workspace_into_file_manager, parse_all}; @@ -65,11 +64,18 @@ pub(crate) fn run( .compile_options .expression_width .unwrap_or_else(|| backend.get_backend_info_or_default()); - let (compiled_program, compiled_contracts) = compile_workspace( + let compiled_workspace = compile_workspace( &workspace_file_manager, &parsed_files, &workspace, &args.compile_options, + ); + + let (compiled_programs, compiled_contracts) = report_errors( + compiled_workspace, + &workspace_file_manager, + args.compile_options.deny_warnings, + args.compile_options.silence_warnings, )?; let (binary_packages, contract_packages): (Vec<_>, Vec<_>) = workspace @@ -80,7 +86,7 @@ pub(crate) fn run( // Save build artifacts to disk. let only_acir = args.compile_options.only_acir; - for (package, program) in binary_packages.into_iter().zip(compiled_program) { + for (package, program) in binary_packages.into_iter().zip(compiled_programs) { let program = nargo::ops::transform_program(program, expression_width); save_program(program.clone(), &package, &workspace.target_directory_path(), only_acir); } @@ -97,7 +103,7 @@ pub(super) fn compile_workspace( parsed_files: &ParsedFiles, workspace: &Workspace, compile_options: &CompileOptions, -) -> Result<(Vec, Vec), CliError> { +) -> CompilationResult<(Vec, Vec)> { let (binary_packages, contract_packages): (Vec<_>, Vec<_>) = workspace .into_iter() .filter(|package| !package.is_library()) @@ -123,31 +129,20 @@ pub(super) fn compile_workspace( .map(|package| compile_contract(file_manager, parsed_files, package, compile_options)) .collect(); - // Report any warnings/errors which were encountered during compilation. - let compiled_programs: Vec = program_results - .into_iter() - .map(|compilation_result| { - report_errors( - compilation_result, - file_manager, - compile_options.deny_warnings, - compile_options.silence_warnings, - ) - }) - .collect::>()?; - let compiled_contracts: Vec = contract_results - .into_iter() - .map(|compilation_result| { - report_errors( - compilation_result, - file_manager, - compile_options.deny_warnings, - compile_options.silence_warnings, - ) - }) - .collect::>()?; - - Ok((compiled_programs, compiled_contracts)) + // Collate any warnings/errors which were encountered during compilation. + let compiled_programs = collect_errors(program_results); + let compiled_contracts = collect_errors(contract_results); + + match (compiled_programs, compiled_contracts) { + (Ok((programs, program_warnings)), Ok((contracts, contract_warnings))) => { + let warnings = [program_warnings, contract_warnings].concat(); + Ok(((programs, contracts), warnings)) + } + (Err(program_errors), Err(contract_errors)) => { + Err([program_errors, contract_errors].concat()) + } + (Err(errors), _) | (_, Err(errors)) => Err(errors), + } } pub(super) fn save_program( @@ -172,30 +167,3 @@ fn save_contract(contract: CompiledContract, package: &Package, circuit_dir: &Pa circuit_dir, ); } - -/// Helper function for reporting any errors in a `CompilationResult` -/// structure that is commonly used as a return result in this file. -pub(crate) fn report_errors( - result: CompilationResult, - file_manager: &FileManager, - deny_warnings: bool, - silence_warnings: bool, -) -> Result { - let (t, warnings) = result.map_err(|errors| { - noirc_errors::reporter::report_all( - file_manager.as_file_map(), - &errors, - deny_warnings, - silence_warnings, - ) - })?; - - noirc_errors::reporter::report_all( - file_manager.as_file_map(), - &warnings, - deny_warnings, - silence_warnings, - ); - - Ok(t) -} diff --git a/tooling/nargo_cli/src/cli/dap_cmd.rs b/tooling/nargo_cli/src/cli/dap_cmd.rs index 67322b1873e..ba4f91609ef 100644 --- a/tooling/nargo_cli/src/cli/dap_cmd.rs +++ b/tooling/nargo_cli/src/cli/dap_cmd.rs @@ -1,39 +1,54 @@ +use acvm::acir::circuit::ExpressionWidth; use acvm::acir::native_types::WitnessMap; -use acvm::ExpressionWidth; use backend_interface::Backend; use clap::Args; use nargo::constants::PROVER_INPUT_FILE; -use nargo::ops::compile_program; use nargo::workspace::Workspace; -use nargo::{insert_all_files_for_workspace_into_file_manager, parse_all}; use nargo_toml::{get_package_manifest, resolve_workspace_from_toml, PackageSelection}; use noirc_abi::input_parser::Format; -use noirc_driver::{ - file_manager_with_stdlib, CompileOptions, CompiledProgram, NOIR_ARTIFACT_VERSION_STRING, -}; +use noirc_driver::{CompileOptions, CompiledProgram, NOIR_ARTIFACT_VERSION_STRING}; use noirc_frontend::graph::CrateName; use std::io::{BufReader, BufWriter, Read, Write}; use std::path::Path; -use dap::errors::ServerError; use dap::requests::Command; use dap::responses::ResponseBody; use dap::server::Server; use dap::types::Capabilities; use serde_json::Value; -use super::compile_cmd::report_errors; +use super::debug_cmd::compile_bin_package_for_debugging; use super::fs::inputs::read_inputs_from_file; use crate::errors::CliError; use super::NargoConfig; +use noir_debugger::errors::{DapError, LoadError}; + #[derive(Debug, Clone, Args)] pub(crate) struct DapCommand { /// Override the expression width requested by the backend. #[arg(long, value_parser = parse_expression_width)] expression_width: Option, + + #[clap(long)] + preflight_check: bool, + + #[clap(long)] + preflight_project_folder: Option, + + #[clap(long)] + preflight_package: Option, + + #[clap(long)] + preflight_prover_name: Option, + + #[clap(long)] + preflight_generate_acir: bool, + + #[clap(long)] + preflight_skip_instrumentation: bool, } fn parse_expression_width(input: &str) -> Result { @@ -43,11 +58,12 @@ fn parse_expression_width(input: &str) -> Result() .map_err(|err| Error::new(ErrorKind::InvalidInput, err.to_string()))?; - Ok(ExpressionWidth::from(width)) + match width { + 0 => Ok(ExpressionWidth::Unbounded), + _ => Ok(ExpressionWidth::Bounded { width }), + } } -struct LoadError(&'static str); - fn find_workspace(project_folder: &str, package: Option<&str>) -> Option { let Ok(toml_path) = get_package_manifest(Path::new(project_folder)) else { eprintln!("ERROR: Failed to get package manifest"); @@ -68,45 +84,51 @@ fn find_workspace(project_folder: &str, package: Option<&str>) -> Option) -> String { + match package { + Some(pkg) => format!( + r#"Noir Debugger could not load program from {}, package {}"#, + project_folder, pkg + ), + None => format!(r#"Noir Debugger could not load program from {}"#, project_folder), + } +} + fn load_and_compile_project( project_folder: &str, package: Option<&str>, prover_name: &str, expression_width: ExpressionWidth, + acir_mode: bool, + skip_instrumentation: bool, ) -> Result<(CompiledProgram, WitnessMap), LoadError> { - let workspace = - find_workspace(project_folder, package).ok_or(LoadError("Cannot open workspace"))?; - + let workspace = find_workspace(project_folder, package) + .ok_or(LoadError::Generic(workspace_not_found_error_msg(project_folder, package)))?; let package = workspace .into_iter() .find(|p| p.is_binary()) - .ok_or(LoadError("No matching binary packages found in workspace"))?; + .ok_or(LoadError::Generic("No matching binary packages found in workspace".into()))?; - let mut workspace_file_manager = file_manager_with_stdlib(std::path::Path::new("")); - insert_all_files_for_workspace_into_file_manager(&workspace, &mut workspace_file_manager); - let parsed_files = parse_all(&workspace_file_manager); - - let compile_options = CompileOptions::default(); - let compilation_result = - compile_program(&workspace_file_manager, &parsed_files, package, &compile_options, None); - - let compiled_program = report_errors( - compilation_result, - &workspace_file_manager, - compile_options.deny_warnings, - compile_options.silence_warnings, + let compiled_program = compile_bin_package_for_debugging( + &workspace, + package, + acir_mode, + skip_instrumentation, + CompileOptions::default(), ) - .map_err(|_| LoadError("Failed to compile project"))?; + .map_err(|_| LoadError::Generic("Failed to compile project".into()))?; let compiled_program = nargo::ops::transform_program(compiled_program, expression_width); let (inputs_map, _) = read_inputs_from_file(&package.root_dir, prover_name, Format::Toml, &compiled_program.abi) - .map_err(|_| LoadError("Failed to read program inputs"))?; + .map_err(|_| { + LoadError::Generic(format!("Failed to read program inputs from {}", prover_name)) + })?; let initial_witness = compiled_program .abi .encode(&inputs_map, None) - .map_err(|_| LoadError("Failed to encode inputs"))?; + .map_err(|_| LoadError::Generic("Failed to encode inputs".into()))?; Ok((compiled_program, initial_witness)) } @@ -114,7 +136,7 @@ fn load_and_compile_project( fn loop_uninitialized_dap( mut server: Server, expression_width: ExpressionWidth, -) -> Result<(), ServerError> { +) -> Result<(), DapError> { loop { let req = match server.poll_request()? { Some(req) => req, @@ -137,7 +159,8 @@ fn loop_uninitialized_dap( server.respond(req.error("Missing launch arguments"))?; continue; }; - let Some(Value::String(ref project_folder)) = additional_data.get("projectFolder") else { + let Some(Value::String(ref project_folder)) = additional_data.get("projectFolder") + else { server.respond(req.error("Missing project folder argument"))?; continue; }; @@ -149,6 +172,13 @@ fn loop_uninitialized_dap( .and_then(|v| v.as_str()) .unwrap_or(PROVER_INPUT_FILE); + let generate_acir = + additional_data.get("generateAcir").and_then(|v| v.as_bool()).unwrap_or(false); + let skip_instrumentation = additional_data + .get("skipInstrumentation") + .and_then(|v| v.as_bool()) + .unwrap_or(generate_acir); + eprintln!("Project folder: {}", project_folder); eprintln!("Package: {}", package.unwrap_or("(default)")); eprintln!("Prover name: {}", prover_name); @@ -158,6 +188,8 @@ fn loop_uninitialized_dap( package, prover_name, expression_width, + generate_acir, + skip_instrumentation, ) { Ok((compiled_program, initial_witness)) => { server.respond(req.ack()?)?; @@ -172,8 +204,8 @@ fn loop_uninitialized_dap( )?; break; } - Err(LoadError(message)) => { - server.respond(req.error(message))?; + Err(LoadError::Generic(message)) => { + server.respond(req.error(message.as_str()))?; } } } @@ -192,17 +224,58 @@ fn loop_uninitialized_dap( Ok(()) } +fn run_preflight_check( + expression_width: ExpressionWidth, + args: DapCommand, +) -> Result<(), DapError> { + let project_folder = if let Some(project_folder) = args.preflight_project_folder { + project_folder + } else { + return Err(DapError::PreFlightGenericError("Noir Debugger could not initialize because the IDE (for example, VS Code) did not specify a project folder to debug.".into())); + }; + + let package = args.preflight_package.as_deref(); + let prover_name = args.preflight_prover_name.as_deref().unwrap_or(PROVER_INPUT_FILE); + + let _ = load_and_compile_project( + project_folder.as_str(), + package, + prover_name, + expression_width, + args.preflight_generate_acir, + args.preflight_skip_instrumentation, + )?; + + Ok(()) +} + pub(crate) fn run( backend: &Backend, args: DapCommand, _config: NargoConfig, ) -> Result<(), CliError> { + let expression_width = + args.expression_width.unwrap_or_else(|| backend.get_backend_info_or_default()); + + // When the --preflight-check flag is present, we run Noir's DAP server in "pre-flight mode", which test runs + // the DAP initialization code without actually starting the DAP server. + // + // This lets the client IDE present any initialization issues (compiler version mismatches, missing prover files, etc) + // in its own interface. + // + // This was necessary due to the VS Code project being reluctant to let extension authors capture + // stderr output generated by a DAP server wrapped in DebugAdapterExecutable. + // + // Exposing this preflight mode lets us gracefully handle errors that happen *before* + // the DAP loop is established, which otherwise are considered "out of band" by the maintainers of the DAP spec. + // More details here: https://github.com/microsoft/vscode/issues/108138 + if args.preflight_check { + return run_preflight_check(expression_width, args).map_err(CliError::DapError); + } + let output = BufWriter::new(std::io::stdout()); let input = BufReader::new(std::io::stdin()); let server = Server::new(input, output); - let expression_width = - args.expression_width.unwrap_or_else(|| backend.get_backend_info_or_default()); - loop_uninitialized_dap(server, expression_width).map_err(CliError::DapError) } diff --git a/tooling/nargo_cli/src/cli/debug_cmd.rs b/tooling/nargo_cli/src/cli/debug_cmd.rs index a0bac3bdac1..2c4937b6f16 100644 --- a/tooling/nargo_cli/src/cli/debug_cmd.rs +++ b/tooling/nargo_cli/src/cli/debug_cmd.rs @@ -4,10 +4,13 @@ use acvm::acir::native_types::WitnessMap; use bn254_blackbox_solver::Bn254BlackBoxSolver; use clap::Args; +use fm::FileManager; use nargo::artifacts::debug::DebugArtifact; use nargo::constants::PROVER_INPUT_FILE; -use nargo::ops::compile_program; +use nargo::errors::CompileError; +use nargo::ops::{compile_program, compile_program_with_debug_instrumenter, report_errors}; use nargo::package::Package; +use nargo::workspace::Workspace; use nargo::{insert_all_files_for_workspace_into_file_manager, parse_all}; use nargo_toml::{get_package_manifest, resolve_workspace_from_toml, PackageSelection}; use noirc_abi::input_parser::{Format, InputValue}; @@ -15,9 +18,10 @@ use noirc_abi::InputMap; use noirc_driver::{ file_manager_with_stdlib, CompileOptions, CompiledProgram, NOIR_ARTIFACT_VERSION_STRING, }; +use noirc_frontend::debug::DebugInstrumenter; use noirc_frontend::graph::CrateName; +use noirc_frontend::hir::ParsedFiles; -use super::compile_cmd::report_errors; use super::fs::{inputs::read_inputs_from_file, witness::save_witness_to_dir}; use super::NargoConfig; use crate::backends::Backend; @@ -39,6 +43,14 @@ pub(crate) struct DebugCommand { #[clap(flatten)] compile_options: CompileOptions, + + /// Force ACIR output (disabling instrumentation) + #[clap(long)] + acir_mode: bool, + + /// Disable vars debug instrumentation (enabled by default) + #[clap(long)] + skip_instrumentation: Option, } pub(crate) fn run( @@ -46,6 +58,9 @@ pub(crate) fn run( args: DebugCommand, config: NargoConfig, ) -> Result<(), CliError> { + let acir_mode = args.acir_mode; + let skip_instrumentation = args.skip_instrumentation.unwrap_or(acir_mode); + let toml_path = get_package_manifest(&config.program_dir)?; let selection = args.package.map_or(PackageSelection::DefaultOrAll, PackageSelection::Selected); let workspace = resolve_workspace_from_toml( @@ -59,10 +74,6 @@ pub(crate) fn run( .expression_width .unwrap_or_else(|| backend.get_backend_info_or_default()); - let mut workspace_file_manager = file_manager_with_stdlib(std::path::Path::new("")); - insert_all_files_for_workspace_into_file_manager(&workspace, &mut workspace_file_manager); - let parsed_files = parse_all(&workspace_file_manager); - let Some(package) = workspace.into_iter().find(|p| p.is_binary()) else { println!( "No matching binary packages found in workspace. Only binary packages can be debugged." @@ -70,19 +81,12 @@ pub(crate) fn run( return Ok(()); }; - let compilation_result = compile_program( - &workspace_file_manager, - &parsed_files, + let compiled_program = compile_bin_package_for_debugging( + &workspace, package, - &args.compile_options, - None, - ); - - let compiled_program = report_errors( - compilation_result, - &workspace_file_manager, - args.compile_options.deny_warnings, - args.compile_options.silence_warnings, + acir_mode, + skip_instrumentation, + args.compile_options.clone(), )?; let compiled_program = nargo::ops::transform_program(compiled_program, expression_width); @@ -90,6 +94,76 @@ pub(crate) fn run( run_async(package, compiled_program, &args.prover_name, &args.witness_name, target_dir) } +pub(crate) fn compile_bin_package_for_debugging( + workspace: &Workspace, + package: &Package, + acir_mode: bool, + skip_instrumentation: bool, + compile_options: CompileOptions, +) -> Result { + let mut workspace_file_manager = file_manager_with_stdlib(std::path::Path::new("")); + insert_all_files_for_workspace_into_file_manager(workspace, &mut workspace_file_manager); + let mut parsed_files = parse_all(&workspace_file_manager); + + let compile_options = CompileOptions { + instrument_debug: !skip_instrumentation, + force_brillig: !acir_mode, + ..compile_options + }; + + let compilation_result = if !skip_instrumentation { + let debug_state = + instrument_package_files(&mut parsed_files, &workspace_file_manager, package); + + compile_program_with_debug_instrumenter( + &workspace_file_manager, + &parsed_files, + package, + &compile_options, + None, + debug_state, + ) + } else { + compile_program(&workspace_file_manager, &parsed_files, package, &compile_options, None) + }; + + report_errors( + compilation_result, + &workspace_file_manager, + compile_options.deny_warnings, + compile_options.silence_warnings, + ) +} + +/// Add debugging instrumentation to all parsed files belonging to the package +/// being compiled +fn instrument_package_files( + parsed_files: &mut ParsedFiles, + file_manager: &FileManager, + package: &Package, +) -> DebugInstrumenter { + // Start off at the entry path and read all files in the parent directory. + let entry_path_parent = package + .entry_path + .parent() + .unwrap_or_else(|| panic!("The entry path is expected to be a single file within a directory and so should have a parent {:?}", package.entry_path)); + + let mut debug_instrumenter = DebugInstrumenter::default(); + + for (file_id, parsed_file) in parsed_files.iter_mut() { + let file_path = + file_manager.path(*file_id).expect("Parsed file ID not found in file manager"); + for ancestor in file_path.ancestors() { + if ancestor == entry_path_parent { + // file is in package + debug_instrumenter.instrument_module(&mut parsed_file.0); + } + } + } + + debug_instrumenter +} + fn run_async( package: &Package, program: CompiledProgram, diff --git a/tooling/nargo_cli/src/cli/execute_cmd.rs b/tooling/nargo_cli/src/cli/execute_cmd.rs index a3fcebab94f..85c0a4160a7 100644 --- a/tooling/nargo_cli/src/cli/execute_cmd.rs +++ b/tooling/nargo_cli/src/cli/execute_cmd.rs @@ -5,7 +5,7 @@ use clap::Args; use nargo::artifacts::debug::DebugArtifact; use nargo::constants::PROVER_INPUT_FILE; use nargo::errors::try_to_diagnose_runtime_error; -use nargo::ops::{compile_program, DefaultForeignCallExecutor}; +use nargo::ops::{compile_program, report_errors, DefaultForeignCallExecutor}; use nargo::package::Package; use nargo::{insert_all_files_for_workspace_into_file_manager, parse_all}; use nargo_toml::{get_package_manifest, resolve_workspace_from_toml, PackageSelection}; @@ -19,7 +19,6 @@ use noirc_frontend::graph::CrateName; use super::fs::{inputs::read_inputs_from_file, witness::save_witness_to_dir}; use super::NargoConfig; use crate::backends::Backend; -use crate::cli::compile_cmd::report_errors; use crate::errors::CliError; /// Executes a circuit to calculate its return value diff --git a/tooling/nargo_cli/src/cli/export_cmd.rs b/tooling/nargo_cli/src/cli/export_cmd.rs index feaa55857e5..044c2cb4ebb 100644 --- a/tooling/nargo_cli/src/cli/export_cmd.rs +++ b/tooling/nargo_cli/src/cli/export_cmd.rs @@ -1,4 +1,5 @@ use nargo::errors::CompileError; +use nargo::ops::report_errors; use noirc_errors::FileDiagnostic; use noirc_frontend::hir::ParsedFiles; use rayon::prelude::*; @@ -24,7 +25,6 @@ use crate::errors::CliError; use super::check_cmd::check_crate_and_report_errors; -use super::compile_cmd::report_errors; use super::fs::program::save_program_to_file; use super::NargoConfig; @@ -102,7 +102,7 @@ fn compile_exported_functions( exported_functions, |(function_name, function_id)| -> Result<(String, CompiledProgram), CompileError> { // TODO: We should to refactor how to deal with compilation errors to avoid this. - let program = compile_no_check(&context, compile_options, function_id, None, false) + let program = compile_no_check(&mut context, compile_options, function_id, None, false) .map_err(|error| vec![FileDiagnostic::from(error)]); let program = report_errors( diff --git a/tooling/nargo_cli/src/cli/fmt_cmd.rs b/tooling/nargo_cli/src/cli/fmt_cmd.rs index 0bd25a3a0ce..2e0ca5632f1 100644 --- a/tooling/nargo_cli/src/cli/fmt_cmd.rs +++ b/tooling/nargo_cli/src/cli/fmt_cmd.rs @@ -1,7 +1,7 @@ use std::{fs::DirEntry, path::Path}; use clap::Args; -use nargo::insert_all_files_for_workspace_into_file_manager; +use nargo::{insert_all_files_for_workspace_into_file_manager, ops::report_errors}; use nargo_toml::{get_package_manifest, resolve_workspace_from_toml, PackageSelection}; use noirc_driver::{file_manager_with_stdlib, NOIR_ARTIFACT_VERSION_STRING}; use noirc_errors::CustomDiagnostic; @@ -53,7 +53,7 @@ pub(crate) fn run(args: FormatCommand, config: NargoConfig) -> Result<(), CliErr }) .collect(); - let _ = super::compile_cmd::report_errors::<()>( + let _ = report_errors::<()>( Err(errors), &workspace_file_manager, false, diff --git a/tooling/nargo_cli/src/cli/info_cmd.rs b/tooling/nargo_cli/src/cli/info_cmd.rs index 131fd6ad214..300e1a35be2 100644 --- a/tooling/nargo_cli/src/cli/info_cmd.rs +++ b/tooling/nargo_cli/src/cli/info_cmd.rs @@ -1,12 +1,12 @@ use std::collections::HashMap; -use acvm::ExpressionWidth; +use acvm::acir::circuit::ExpressionWidth; use backend_interface::BackendError; use clap::Args; use iter_extended::vecmap; use nargo::{ artifacts::debug::DebugArtifact, insert_all_files_for_workspace_into_file_manager, - package::Package, parse_all, + ops::report_errors, package::Package, parse_all, }; use nargo_toml::{get_package_manifest, resolve_workspace_from_toml, PackageSelection}; use noirc_driver::{ @@ -73,11 +73,18 @@ pub(crate) fn run( .compile_options .expression_width .unwrap_or_else(|| backend.get_backend_info_or_default()); - let (compiled_programs, compiled_contracts) = compile_workspace( + let compiled_workspace = compile_workspace( &workspace_file_manager, &parsed_files, &workspace, &args.compile_options, + ); + + let (compiled_programs, compiled_contracts) = report_errors( + compiled_workspace, + &workspace_file_manager, + args.compile_options.deny_warnings, + args.compile_options.silence_warnings, )?; let compiled_programs = vecmap(compiled_programs, |program| { diff --git a/tooling/nargo_cli/src/cli/mod.rs b/tooling/nargo_cli/src/cli/mod.rs index 01adbe9da98..e8e17893815 100644 --- a/tooling/nargo_cli/src/cli/mod.rs +++ b/tooling/nargo_cli/src/cli/mod.rs @@ -82,6 +82,7 @@ enum NargoCommand { Dap(dap_cmd::DapCommand), } +#[cfg(not(feature = "codegen-docs"))] pub(crate) fn start_cli() -> eyre::Result<()> { let NargoCli { command, mut config } = NargoCli::parse(); @@ -126,3 +127,10 @@ pub(crate) fn start_cli() -> eyre::Result<()> { Ok(()) } + +#[cfg(feature = "codegen-docs")] +pub(crate) fn start_cli() -> eyre::Result<()> { + let markdown: String = clap_markdown::help_markdown::(); + println!("{markdown}"); + Ok(()) +} diff --git a/tooling/nargo_cli/src/cli/prove_cmd.rs b/tooling/nargo_cli/src/cli/prove_cmd.rs index 1d20e97af85..f0a9b3185b9 100644 --- a/tooling/nargo_cli/src/cli/prove_cmd.rs +++ b/tooling/nargo_cli/src/cli/prove_cmd.rs @@ -1,6 +1,6 @@ use clap::Args; use nargo::constants::{PROVER_INPUT_FILE, VERIFIER_INPUT_FILE}; -use nargo::ops::compile_program; +use nargo::ops::{compile_program, report_errors}; use nargo::package::Package; use nargo::workspace::Workspace; use nargo::{insert_all_files_for_workspace_into_file_manager, parse_all}; @@ -11,7 +11,6 @@ use noirc_driver::{ }; use noirc_frontend::graph::CrateName; -use super::compile_cmd::report_errors; use super::fs::{ inputs::{read_inputs_from_file, write_inputs_to_file}, proof::save_proof_to_dir, @@ -138,12 +137,11 @@ pub(crate) fn prove_package( Format::Toml, )?; - let proof = backend.prove(&compiled_program.circuit, solved_witness, false)?; + let proof = backend.prove(&compiled_program.circuit, solved_witness)?; if check_proof { let public_inputs = public_abi.encode(&public_inputs, return_value)?; - let valid_proof = - backend.verify(&proof, public_inputs, &compiled_program.circuit, false)?; + let valid_proof = backend.verify(&proof, public_inputs, &compiled_program.circuit)?; if !valid_proof { return Err(CliError::InvalidProof("".into())); diff --git a/tooling/nargo_cli/src/cli/test_cmd.rs b/tooling/nargo_cli/src/cli/test_cmd.rs index 9fee27b9172..503fd5afdd4 100644 --- a/tooling/nargo_cli/src/cli/test_cmd.rs +++ b/tooling/nargo_cli/src/cli/test_cmd.rs @@ -160,7 +160,7 @@ fn run_tests( let test_status = run_test( blackbox_solver, - &context, + &mut context, test_function, show_output, foreign_call_resolver_url, diff --git a/tooling/nargo_cli/src/cli/verify_cmd.rs b/tooling/nargo_cli/src/cli/verify_cmd.rs index ea4aaa051bb..1063b50ab6c 100644 --- a/tooling/nargo_cli/src/cli/verify_cmd.rs +++ b/tooling/nargo_cli/src/cli/verify_cmd.rs @@ -1,11 +1,10 @@ -use super::compile_cmd::report_errors; use super::fs::{inputs::read_inputs_from_file, load_hex_data}; use super::NargoConfig; use crate::{backends::Backend, errors::CliError}; use clap::Args; use nargo::constants::{PROOF_EXT, VERIFIER_INPUT_FILE}; -use nargo::ops::compile_program; +use nargo::ops::{compile_program, report_errors}; use nargo::package::Package; use nargo::workspace::Workspace; use nargo::{insert_all_files_for_workspace_into_file_manager, parse_all}; @@ -102,7 +101,7 @@ fn verify_package( let proof = load_hex_data(&proof_path)?; - let valid_proof = backend.verify(&proof, public_inputs, &compiled_program.circuit, false)?; + let valid_proof = backend.verify(&proof, public_inputs, &compiled_program.circuit)?; if valid_proof { Ok(()) diff --git a/tooling/nargo_cli/src/errors.rs b/tooling/nargo_cli/src/errors.rs index 4636772231b..c2996f53420 100644 --- a/tooling/nargo_cli/src/errors.rs +++ b/tooling/nargo_cli/src/errors.rs @@ -2,6 +2,7 @@ use acvm::acir::native_types::WitnessMapError; use hex::FromHexError; use nargo::{errors::CompileError, NargoError}; use nargo_toml::ManifestError; +use noir_debugger::errors::DapError; use noirc_abi::errors::{AbiError, InputParserError}; use std::path::PathBuf; use thiserror::Error; @@ -54,7 +55,7 @@ pub(crate) enum CliError { LspError(#[from] async_lsp::Error), #[error(transparent)] - DapError(#[from] dap::errors::ServerError), + DapError(#[from] DapError), /// Error from Nargo #[error(transparent)] diff --git a/tooling/nargo_fmt/src/items.rs b/tooling/nargo_fmt/src/items.rs new file mode 100644 index 00000000000..7f998f45b59 --- /dev/null +++ b/tooling/nargo_fmt/src/items.rs @@ -0,0 +1,117 @@ +use noirc_frontend::macros_api::Span; + +use crate::{ + utils::{comment_len, find_comment_end}, + visitor::{FmtVisitor, Shape}, +}; + +#[derive(Debug)] +pub(crate) struct Item { + pub(crate) leading: String, + pub(crate) value: String, + pub(crate) trailing: String, + pub(crate) different_line: bool, +} + +impl Item { + pub(crate) fn total_width(&self) -> usize { + comment_len(&self.leading) + self.value.chars().count() + comment_len(&self.trailing) + } + + pub(crate) fn is_multiline(&self) -> bool { + self.leading.contains('\n') || self.trailing.contains('\n') + } +} + +pub(crate) struct Items<'me, T> { + visitor: &'me FmtVisitor<'me>, + shape: Shape, + elements: std::iter::Peekable>, + last_position: u32, + end_position: u32, +} + +impl<'me, T: HasItem> Items<'me, T> { + pub(crate) fn new( + visitor: &'me FmtVisitor<'me>, + shape: Shape, + span: Span, + elements: Vec, + ) -> Self { + Self { + visitor, + shape, + last_position: span.start() + 1, + end_position: span.end() - 1, + elements: elements.into_iter().peekable(), + } + } +} + +impl Iterator for Items<'_, T> { + type Item = Item; + + fn next(&mut self) -> Option { + let element = self.elements.next()?; + let element_span = element.span(); + + let start = self.last_position; + let end = element_span.start(); + + let is_last = self.elements.peek().is_none(); + let next_start = self.elements.peek().map_or(self.end_position, |expr| expr.start()); + + let (leading, different_line) = self.leading(start, end); + let expr = element.format(self.visitor, self.shape); + let trailing = self.trailing(element_span.end(), next_start, is_last); + + Item { leading, value: expr, trailing, different_line }.into() + } +} + +impl<'me, T> Items<'me, T> { + pub(crate) fn leading(&mut self, start: u32, end: u32) -> (String, bool) { + let mut different_line = false; + + let leading = self.visitor.slice(start..end); + let leading_trimmed = leading.trim(); + + let starts_with_block_comment = leading_trimmed.starts_with("/*"); + let ends_with_block_comment = leading_trimmed.ends_with("*/"); + let starts_with_single_line_comment = leading_trimmed.starts_with("//"); + + if ends_with_block_comment { + let comment_end = leading_trimmed.rfind(|c| c == '/').unwrap(); + + if leading[comment_end..].contains('\n') { + different_line = true; + } + } else if starts_with_single_line_comment || starts_with_block_comment { + different_line = true; + }; + + (leading_trimmed.to_string(), different_line) + } + + pub(crate) fn trailing(&mut self, start: u32, end: u32, is_last: bool) -> String { + let slice = self.visitor.slice(start..end); + let comment_end = find_comment_end(slice, is_last); + let trailing = slice[..comment_end].trim_matches(',').trim(); + self.last_position = start + (comment_end as u32); + trailing.to_string() + } +} + +pub(crate) trait HasItem { + fn span(&self) -> Span; + + fn format(self, visitor: &FmtVisitor, shape: Shape) -> String; + + fn start(&self) -> u32 { + self.span().start() + } + + fn end(&self) -> u32 { + self.span().end() + } +} diff --git a/tooling/nargo_fmt/src/lib.rs b/tooling/nargo_fmt/src/lib.rs index d731934c3c3..0a7903f0ce1 100644 --- a/tooling/nargo_fmt/src/lib.rs +++ b/tooling/nargo_fmt/src/lib.rs @@ -20,6 +20,7 @@ /// in both placement and content during the formatting process. mod config; pub mod errors; +mod items; mod rewrite; mod utils; mod visitor; diff --git a/tooling/nargo_fmt/src/rewrite.rs b/tooling/nargo_fmt/src/rewrite.rs index 6a95eba8759..61792c7a7fa 100644 --- a/tooling/nargo_fmt/src/rewrite.rs +++ b/tooling/nargo_fmt/src/rewrite.rs @@ -1,11 +1,13 @@ mod array; mod expr; +mod imports; mod infix; mod parenthesized; mod typ; pub(crate) use array::rewrite as array; pub(crate) use expr::{rewrite as expr, rewrite_sub_expr as sub_expr}; +pub(crate) use imports::UseTree; pub(crate) use infix::rewrite as infix; pub(crate) use parenthesized::rewrite as parenthesized; pub(crate) use typ::rewrite as typ; diff --git a/tooling/nargo_fmt/src/rewrite/array.rs b/tooling/nargo_fmt/src/rewrite/array.rs index fc5b240f83e..77e5e756f19 100644 --- a/tooling/nargo_fmt/src/rewrite/array.rs +++ b/tooling/nargo_fmt/src/rewrite/array.rs @@ -1,7 +1,8 @@ use noirc_frontend::{hir::resolution::errors::Span, token::Token, Expression}; use crate::{ - utils::{Expr, FindToken}, + items::Item, + utils::FindToken, visitor::{expr::NewlineMode, FmtVisitor}, }; @@ -39,12 +40,12 @@ pub(crate) fn rewrite(mut visitor: FmtVisitor, array: Vec, array_spa let (leading, _) = visitor.format_comment_in_block(leading); let (trailing, _) = visitor.format_comment_in_block(trailing); - result.push(Expr { leading, value: item, trailing, different_line: false }); + result.push(Item { leading, value: item, trailing, different_line: false }); } let slice = visitor.slice(last_position..end_position); let (comment, _) = visitor.format_comment_in_block(slice); - result.push(Expr { + result.push(Item { leading: "".into(), value: "".into(), trailing: comment, diff --git a/tooling/nargo_fmt/src/rewrite/imports.rs b/tooling/nargo_fmt/src/rewrite/imports.rs new file mode 100644 index 00000000000..2788f778140 --- /dev/null +++ b/tooling/nargo_fmt/src/rewrite/imports.rs @@ -0,0 +1,115 @@ +use noirc_frontend::{PathKind, UseTreeKind}; + +use crate::{ + items::Item, + visitor::{ + expr::{format_exprs, Tactic}, + FmtVisitor, Shape, + }, +}; + +#[derive(Debug)] +pub(crate) enum UseSegment { + Ident(String, Option), + List(Vec), + Dep, + Crate, +} + +impl UseSegment { + fn rewrite(&self, visitor: &FmtVisitor, shape: Shape) -> String { + match self { + UseSegment::Ident(ident, None) => ident.clone(), + UseSegment::Ident(ident, Some(rename)) => format!("{ident} as {rename}"), + UseSegment::List(use_tree_list) => { + let mut nested_shape = shape; + nested_shape.indent.block_indent(visitor.config); + + let items: Vec<_> = use_tree_list + .iter() + .map(|item| Item { + leading: String::new(), + value: item.rewrite(visitor, shape).clone(), + trailing: String::new(), + different_line: false, + }) + .collect(); + + let list_str = + format_exprs(visitor.config, Tactic::Mixed, false, items, nested_shape, true); + + if list_str.contains('\n') { + format!( + "{{\n{}{list_str}\n{}}}", + nested_shape.indent.to_string(), + shape.indent.to_string() + ) + } else { + format!("{{{list_str}}}") + } + } + UseSegment::Dep => "dep".into(), + UseSegment::Crate => "crate".into(), + } + } +} + +#[derive(Debug)] +pub(crate) struct UseTree { + path: Vec, +} + +impl UseTree { + pub(crate) fn from_ast(use_tree: noirc_frontend::UseTree) -> Self { + let mut result = UseTree { path: vec![] }; + + match use_tree.prefix.kind { + PathKind::Crate => result.path.push(UseSegment::Crate), + PathKind::Dep => result.path.push(UseSegment::Dep), + PathKind::Plain => {} + }; + + result.path.extend( + use_tree + .prefix + .segments + .into_iter() + .map(|segment| UseSegment::Ident(segment.to_string(), None)), + ); + + match use_tree.kind { + UseTreeKind::Path(name, alias) => { + result.path.push(UseSegment::Ident( + name.to_string(), + alias.map(|rename| rename.to_string()), + )); + } + UseTreeKind::List(list) => { + let segment = UseSegment::List(list.into_iter().map(UseTree::from_ast).collect()); + result.path.push(segment); + } + } + + result + } + + pub(crate) fn rewrite_top_level(&self, visitor: &FmtVisitor, shape: Shape) -> String { + format!("use {};", self.rewrite(visitor, shape)) + } + + fn rewrite(&self, visitor: &FmtVisitor, shape: Shape) -> String { + let mut result = String::new(); + + let mut iter = self.path.iter().peekable(); + while let Some(segment) = iter.next() { + let segment_str = segment.rewrite(visitor, shape); + result.push_str(&segment_str); + + if iter.peek().is_some() { + result.push_str("::"); + } + } + + result + } +} diff --git a/tooling/nargo_fmt/src/rewrite/infix.rs b/tooling/nargo_fmt/src/rewrite/infix.rs index 15f5fe23aae..5d2b387496a 100644 --- a/tooling/nargo_fmt/src/rewrite/infix.rs +++ b/tooling/nargo_fmt/src/rewrite/infix.rs @@ -96,7 +96,9 @@ pub(crate) fn flatten( result.push(rewrite); - let Some(pop) = stack.pop() else { break; }; + let Some(pop) = stack.pop() else { + break; + }; match &pop.kind { ExpressionKind::Infix(infix) => { diff --git a/tooling/nargo_fmt/src/rewrite/typ.rs b/tooling/nargo_fmt/src/rewrite/typ.rs index 4c6411e92b8..aaa77b0bea5 100644 --- a/tooling/nargo_fmt/src/rewrite/typ.rs +++ b/tooling/nargo_fmt/src/rewrite/typ.rs @@ -59,7 +59,7 @@ pub(crate) fn rewrite(visitor: &FmtVisitor, _shape: Shape, typ: UnresolvedType) UnresolvedTypeData::FieldElement | UnresolvedTypeData::Integer(_, _) | UnresolvedTypeData::Bool - | UnresolvedTypeData::Named(_, _) + | UnresolvedTypeData::Named(_, _, _) | UnresolvedTypeData::Unit | UnresolvedTypeData::Expression(_) | UnresolvedTypeData::String(_) diff --git a/tooling/nargo_fmt/src/utils.rs b/tooling/nargo_fmt/src/utils.rs index 1160f01972f..94969d45e81 100644 --- a/tooling/nargo_fmt/src/utils.rs +++ b/tooling/nargo_fmt/src/utils.rs @@ -1,3 +1,6 @@ +use std::borrow::Cow; + +use crate::items::HasItem; use crate::rewrite; use crate::visitor::{FmtVisitor, Shape}; use noirc_frontend::hir::resolution::errors::Span; @@ -21,103 +24,6 @@ pub(crate) fn comments(source: &str) -> impl Iterator + '_ { }) } -#[derive(Debug)] -pub(crate) struct Expr { - pub(crate) leading: String, - pub(crate) value: String, - pub(crate) trailing: String, - pub(crate) different_line: bool, -} - -impl Expr { - pub(crate) fn total_width(&self) -> usize { - comment_len(&self.leading) + self.value.chars().count() + comment_len(&self.trailing) - } - - pub(crate) fn is_multiline(&self) -> bool { - self.leading.contains('\n') || self.trailing.contains('\n') - } -} - -pub(crate) struct Exprs<'me, T> { - pub(crate) visitor: &'me FmtVisitor<'me>, - shape: Shape, - pub(crate) elements: std::iter::Peekable>, - pub(crate) last_position: u32, - pub(crate) end_position: u32, -} - -impl<'me, T: Item> Exprs<'me, T> { - pub(crate) fn new( - visitor: &'me FmtVisitor<'me>, - shape: Shape, - span: Span, - elements: Vec, - ) -> Self { - Self { - visitor, - shape, - last_position: span.start() + 1, /*(*/ - end_position: span.end() - 1, /*)*/ - elements: elements.into_iter().peekable(), - } - } -} - -impl Iterator for Exprs<'_, T> { - type Item = Expr; - - fn next(&mut self) -> Option { - let element = self.elements.next()?; - let element_span = element.span(); - - let start = self.last_position; - let end = element_span.start(); - - let is_last = self.elements.peek().is_none(); - let next_start = self.elements.peek().map_or(self.end_position, |expr| expr.start()); - - let (leading, different_line) = self.leading(start, end); - let expr = element.format(self.visitor, self.shape); - let trailing = self.trailing(element_span.end(), next_start, is_last); - - Expr { leading, value: expr, trailing, different_line }.into() - } -} - -impl<'me, T> Exprs<'me, T> { - pub(crate) fn leading(&mut self, start: u32, end: u32) -> (String, bool) { - let mut different_line = false; - - let leading = self.visitor.slice(start..end); - let leading_trimmed = leading.trim(); - - let starts_with_block_comment = leading_trimmed.starts_with("/*"); - let ends_with_block_comment = leading_trimmed.ends_with("*/"); - let starts_with_single_line_comment = leading_trimmed.starts_with("//"); - - if ends_with_block_comment { - let comment_end = leading_trimmed.rfind(|c| c == '/').unwrap(); - - if leading[comment_end..].contains('\n') { - different_line = true; - } - } else if starts_with_single_line_comment || starts_with_block_comment { - different_line = true; - }; - - (leading_trimmed.to_string(), different_line) - } - - pub(crate) fn trailing(&mut self, start: u32, end: u32, is_last: bool) -> String { - let slice = self.visitor.slice(start..end); - let comment_end = find_comment_end(slice, is_last); - let trailing = slice[..comment_end].trim_matches(',').trim(); - self.last_position = start + (comment_end as u32); - trailing.to_string() - } -} - pub(crate) trait FindToken { fn find_token(&self, token: Token) -> Option; fn find_token_with(&self, f: impl Fn(&Token) -> bool) -> Option; @@ -183,7 +89,7 @@ pub(crate) fn find_comment_end(slice: &str, is_last: bool) -> usize { } } -fn comment_len(comment: &str) -> usize { +pub(crate) fn comment_len(comment: &str) -> usize { match comment { "" => 0, _ => { @@ -201,21 +107,7 @@ pub(crate) fn count_newlines(slice: &str) -> usize { bytecount::count(slice.as_bytes(), b'\n') } -pub(crate) trait Item { - fn span(&self) -> Span; - - fn format(self, visitor: &FmtVisitor, shape: Shape) -> String; - - fn start(&self) -> u32 { - self.span().start() - } - - fn end(&self) -> u32 { - self.span().end() - } -} - -impl Item for Expression { +impl HasItem for Expression { fn span(&self) -> Span { self.span } @@ -225,7 +117,7 @@ impl Item for Expression { } } -impl Item for (Ident, Expression) { +impl HasItem for (Ident, Expression) { fn span(&self) -> Span { let (name, value) = self; (name.span().start()..value.span.end()).into() @@ -245,25 +137,30 @@ impl Item for (Ident, Expression) { } } -impl Item for Param { +impl HasItem for Param { fn span(&self) -> Span { self.span } fn format(self, visitor: &FmtVisitor, shape: Shape) -> String { + let pattern = visitor.slice(self.pattern.span()); let visibility = match self.visibility { - Visibility::Public => "pub ", + Visibility::Public => "pub", Visibility::Private => "", Visibility::DataBus => "call_data", }; - let pattern = visitor.slice(self.pattern.span()); - let ty = rewrite::typ(visitor, shape, self.typ); - format!("{pattern}: {visibility}{ty}") + if self.pattern.is_synthesized() || self.typ.is_synthesized() { + pattern.to_string() + } else { + let ty = rewrite::typ(visitor, shape, self.typ); + let visibility = append_space_if_nonempty(visibility.into()); + format!("{pattern}: {visibility}{ty}") + } } } -impl Item for Ident { +impl HasItem for Ident { fn span(&self) -> Span { self.span() } @@ -289,6 +186,15 @@ pub(crate) fn last_line_contains_single_line_comment(s: &str) -> bool { s.lines().last().map_or(false, |line| line.contains("//")) } +pub(crate) fn append_space_if_nonempty(mut string: Cow) -> Cow { + if !string.is_empty() { + let inner = string.to_mut(); + inner.push(' '); + } + + string +} + pub(crate) fn last_line_used_width(s: &str, offset: usize) -> usize { if s.contains('\n') { last_line_width(s) diff --git a/tooling/nargo_fmt/src/visitor.rs b/tooling/nargo_fmt/src/visitor.rs index 85989db79d8..db084e5a49d 100644 --- a/tooling/nargo_fmt/src/visitor.rs +++ b/tooling/nargo_fmt/src/visitor.rs @@ -277,7 +277,7 @@ impl Indent { } } -#[derive(Clone, Copy, Debug)] +#[derive(Clone, Copy, Debug, Default)] pub(crate) struct Shape { pub(crate) width: usize, pub(crate) indent: Indent, diff --git a/tooling/nargo_fmt/src/visitor/expr.rs b/tooling/nargo_fmt/src/visitor/expr.rs index 586d9583e32..2cd0e881e84 100644 --- a/tooling/nargo_fmt/src/visitor/expr.rs +++ b/tooling/nargo_fmt/src/visitor/expr.rs @@ -5,8 +5,9 @@ use noirc_frontend::{ use super::{ExpressionType, FmtVisitor, Shape}; use crate::{ + items::{HasItem, Item, Items}, rewrite, - utils::{self, first_line_width, Expr, FindToken, Item}, + utils::{first_line_width, FindToken}, Config, }; @@ -81,8 +82,7 @@ impl FmtVisitor<'_> { let nested_indent = visitor.shape(); let exprs: Vec<_> = - utils::Exprs::new(&visitor, nested_indent, fields_span, constructor.fields) - .collect(); + Items::new(&visitor, nested_indent, fields_span, constructor.fields).collect(); let exprs = format_exprs( visitor.config, Tactic::HorizontalVertical, @@ -189,7 +189,7 @@ impl FmtVisitor<'_> { // TODO: fixme #[allow(clippy::too_many_arguments)] -pub(crate) fn format_seq( +pub(crate) fn format_seq( shape: Shape, prefix: &str, suffix: &str, @@ -202,11 +202,10 @@ pub(crate) fn format_seq( reduce: bool, ) -> String { let mut nested_indent = shape; - let shape = shape; nested_indent.indent.block_indent(visitor.config); - let exprs: Vec<_> = utils::Exprs::new(&visitor, nested_indent, span, exprs).collect(); + let exprs: Vec<_> = Items::new(&visitor, nested_indent, span, exprs).collect(); let exprs = format_exprs(visitor.config, tactic, trailing_comma, exprs, nested_indent, reduce); wrap_exprs(prefix, suffix, exprs, nested_indent, shape, mode) @@ -249,11 +248,11 @@ pub(crate) fn format_parens( format_seq(shape, "(", ")", visitor, trailing_comma, exprs, span, tactic, mode, reduce) } -fn format_exprs( +pub(crate) fn format_exprs( config: &Config, tactic: Tactic, trailing_comma: bool, - exprs: Vec, + exprs: Vec, shape: Shape, reduce: bool, ) -> String { @@ -396,7 +395,7 @@ pub(crate) enum Tactic { impl Tactic { fn definitive( self, - exprs: &[Expr], + exprs: &[Item], short_width_threshold: usize, reduce: bool, ) -> DefinitiveTactic { @@ -449,7 +448,7 @@ enum DefinitiveTactic { } impl DefinitiveTactic { - fn reduce(self, exprs: &[Expr], short_array_element_width_threshold: usize) -> Self { + fn reduce(self, exprs: &[Item], short_array_element_width_threshold: usize) -> Self { match self { DefinitiveTactic::Vertical if no_long_exprs(exprs, short_array_element_width_threshold) => @@ -467,7 +466,7 @@ fn has_single_line_comment(slice: &str) -> bool { slice.trim_start().starts_with("//") } -fn no_long_exprs(exprs: &[Expr], max_width: usize) -> bool { +fn no_long_exprs(exprs: &[Item], max_width: usize) -> bool { exprs.iter().all(|expr| expr.value.len() <= max_width) } diff --git a/tooling/nargo_fmt/src/visitor/item.rs b/tooling/nargo_fmt/src/visitor/item.rs index eb2086168ba..28aad3c551f 100644 --- a/tooling/nargo_fmt/src/visitor/item.rs +++ b/tooling/nargo_fmt/src/visitor/item.rs @@ -6,8 +6,11 @@ use noirc_frontend::{ }; use crate::{ - rewrite, - utils::{last_line_contains_single_line_comment, last_line_used_width, FindToken}, + rewrite::{self, UseTree}, + utils::{ + append_space_if_nonempty, last_line_contains_single_line_comment, last_line_used_width, + FindToken, + }, visitor::expr::{format_seq, NewlineMode}, }; @@ -119,9 +122,12 @@ impl super::FmtVisitor<'_> { result.push_str("distinct "); } - if let Visibility::Public = func.def.return_visibility { - result.push_str("pub "); - } + let visibility = match func.def.return_visibility { + Visibility::Public => "pub", + Visibility::DataBus => "return_data", + Visibility::Private => "", + }; + result.push_str(&append_space_if_nonempty(visibility.into())); let typ = rewrite::typ(self, self.shape(), func.return_type()); result.push_str(&typ); @@ -146,6 +152,9 @@ impl super::FmtVisitor<'_> { for Item { kind, span } in module.items { match kind { ItemKind::Function(func) => { + self.visit_function(span, func); + } + ItemKind::Submodules(module) => { self.format_missing_indent(span.start(), true); if std::mem::take(&mut self.ignore_next_node) { @@ -154,15 +163,27 @@ impl super::FmtVisitor<'_> { continue; } - let (fn_before_block, force_brace_newline) = - self.format_fn_before_block(func.clone(), span.start()); + let name = module.name; + let after_brace = self.span_after(span, Token::LeftBrace).start(); + self.last_position = after_brace; + + let keyword = if module.is_contract { "contract" } else { "mod" }; + + self.push_str(&format!("{keyword} {name} ")); - self.push_str(&fn_before_block); - self.push_str(if force_brace_newline { "\n" } else { " " }); + if module.contents.items.is_empty() { + self.visit_empty_block((after_brace - 1..span.end()).into()); + continue; + } else { + self.push_str("{"); + self.indent.block_indent(self.config); + self.visit_module(module.contents); + } - self.visit_block(func.def.body, func.def.span); + self.close_block((self.last_position..span.end() - 1).into()); + self.last_position = span.end(); } - ItemKind::Submodules(module) => { + ItemKind::Impl(impl_) => { self.format_missing_indent(span.start(), true); if std::mem::take(&mut self.ignore_next_node) { @@ -171,31 +192,37 @@ impl super::FmtVisitor<'_> { continue; } - let name = module.name; + let slice = + self.slice(self.last_position..impl_.object_type.span.unwrap().end()); let after_brace = self.span_after(span, Token::LeftBrace).start(); self.last_position = after_brace; - let keyword = if module.is_contract { "contract" } else { "mod" }; + self.push_str(&format!("{slice} ")); - self.push_str(&format!("{keyword} {name} ")); - - if module.contents.items.is_empty() { + if impl_.methods.is_empty() { self.visit_empty_block((after_brace - 1..span.end()).into()); continue; } else { self.push_str("{"); self.indent.block_indent(self.config); - self.visit_module(module.contents); - } - self.close_block((self.last_position..span.end() - 1).into()); + for (method, span) in impl_.methods { + self.visit_function(span, method); + } + + self.close_block((self.last_position..span.end() - 1).into()); + self.last_position = span.end(); + } + } + ItemKind::Import(use_tree) => { + let use_tree = + UseTree::from_ast(use_tree).rewrite_top_level(self, self.shape()); + self.push_rewrite(use_tree, span); self.last_position = span.end(); } - ItemKind::Import(_) - | ItemKind::Struct(_) + ItemKind::Struct(_) | ItemKind::Trait(_) | ItemKind::TraitImpl(_) - | ItemKind::Impl(_) | ItemKind::TypeAlias(_) | ItemKind::Global(_) | ItemKind::ModuleDecl(_) => { @@ -205,4 +232,18 @@ impl super::FmtVisitor<'_> { } } } + + fn visit_function(&mut self, span: Span, func: NoirFunction) { + self.format_missing_indent(span.start(), true); + if std::mem::take(&mut self.ignore_next_node) { + self.push_str(self.slice(span)); + self.last_position = span.end(); + return; + } + let (fn_before_block, force_brace_newline) = + self.format_fn_before_block(func.clone(), span.start()); + self.push_str(&fn_before_block); + self.push_str(if force_brace_newline { "\n" } else { " " }); + self.visit_block(func.def.body, func.def.span); + } } diff --git a/tooling/nargo_fmt/src/visitor/stmt.rs b/tooling/nargo_fmt/src/visitor/stmt.rs index 800a8656ef3..44c5dad6b5d 100644 --- a/tooling/nargo_fmt/src/visitor/stmt.rs +++ b/tooling/nargo_fmt/src/visitor/stmt.rs @@ -38,11 +38,13 @@ impl super::FmtVisitor<'_> { nested_shape.indent.block_indent(self.config); - let message = - message.map_or(String::new(), |message| format!(", \"{message}\"")); + let message = message.map_or(String::new(), |message| { + let message = rewrite::sub_expr(self, nested_shape, message); + format!(", {message}") + }); let (callee, args) = match kind { - ConstrainKind::Assert => { + ConstrainKind::Assert | ConstrainKind::Constrain => { let assertion = rewrite::sub_expr(self, nested_shape, expr); let args = format!("{assertion}{message}"); @@ -60,12 +62,6 @@ impl super::FmtVisitor<'_> { unreachable!() } } - ConstrainKind::Constrain => { - let expr = rewrite::sub_expr(self, self.shape(), expr); - let constrain = format!("constrain {expr};"); - self.push_rewrite(constrain, span); - return; - } }; let args = wrap_exprs( diff --git a/tooling/nargo_fmt/tests/expected/assert.nr b/tooling/nargo_fmt/tests/expected/assert.nr new file mode 100644 index 00000000000..805e069c9a7 --- /dev/null +++ b/tooling/nargo_fmt/tests/expected/assert.nr @@ -0,0 +1,5 @@ +fn main(x: Field) { + assert(x == 0, "with a message"); + assert_eq(x, 1); + assert(x, message); +} diff --git a/tooling/nargo_fmt/tests/expected/contract.nr b/tooling/nargo_fmt/tests/expected/contract.nr index 2e3f4d7c8c4..a03b8774700 100644 --- a/tooling/nargo_fmt/tests/expected/contract.nr +++ b/tooling/nargo_fmt/tests/expected/contract.nr @@ -5,30 +5,34 @@ contract Benchmarking { use dep::aztec::protocol_types::abis::function_selector::FunctionSelector; - use dep::value_note::{ - utils::{increment, decrement}, - value_note::{VALUE_NOTE_LEN, ValueNote, ValueNoteMethods}, - }; + use dep::value_note::{utils::{increment, decrement}, value_note::{VALUE_NOTE_LEN, ValueNote, ValueNoteMethods}}; use dep::aztec::{ context::{Context}, note::{utils as note_utils, note_getter_options::NoteGetterOptions, note_header::NoteHeader}, - log::emit_unencrypted_log, - state_vars::{map::Map, public_state::PublicState, set::Set}, + log::emit_unencrypted_log, state_vars::{Map, PublicMutable, PrivateSet}, types::type_serialization::field_serialization::{FieldSerializationMethods, FIELD_SERIALIZED_LEN}, - types::address::{AztecAddress}, + types::address::{AztecAddress} }; struct Storage { - notes: Map>, - balances: Map>, + notes: Map>, + balances: Map>, } impl Storage { fn init(context: Context) -> pub Self { Storage { - notes: Map::new(context, 1, |context, slot| { Set::new(context, slot, ValueNoteMethods) }), - balances: Map::new(context, 2, |context, slot| { PublicState::new(context, slot, FieldSerializationMethods) }), + notes: Map::new( + context, + 1, + |context, slot| { PrivateSet::new(context, slot, ValueNoteMethods) } + ), + balances: Map::new( + context, + 2, + |context, slot| { PublicMutable::new(context, slot, FieldSerializationMethods) } + ) } } } @@ -70,16 +74,6 @@ contract Benchmarking { fn broadcast(owner: Field) { emit_unencrypted_log(&mut context, storage.balances.at(owner).read()); } - - unconstrained fn compute_note_hash_and_nullifier( - contract_address: AztecAddress, - nonce: Field, - storage_slot: Field, - preimage: [Field; VALUE_NOTE_LEN] - ) -> [Field; 4] { - let note_header = NoteHeader::new(contract_address, nonce, storage_slot); - note_utils::compute_note_hash_and_nullifier(ValueNoteMethods, note_header, preimage) - } } // Uses the token bridge contract, which tells which input token we need to talk to and handles the exit funds to L1 diff --git a/tooling/nargo_fmt/tests/expected/databus.nr b/tooling/nargo_fmt/tests/expected/databus.nr new file mode 100644 index 00000000000..60934b60b2f --- /dev/null +++ b/tooling/nargo_fmt/tests/expected/databus.nr @@ -0,0 +1,2 @@ +fn main(x: pub u8, y: call_data u8) -> return_data u32 {} + diff --git a/tooling/nargo_fmt/tests/expected/impl.nr b/tooling/nargo_fmt/tests/expected/impl.nr new file mode 100644 index 00000000000..1c0d4564b5e --- /dev/null +++ b/tooling/nargo_fmt/tests/expected/impl.nr @@ -0,0 +1,21 @@ +impl Type {} + +impl Type {} + +impl Type {} + +impl Type { + fn method(self) {} + + fn method(mut self) {} + + fn method(&mut self) {} +} + +impl Type { + fn method(self) {} +} + +impl Type { + fn method(self) {} +} diff --git a/tooling/nargo_fmt/tests/expected/struct.nr b/tooling/nargo_fmt/tests/expected/struct.nr index cf1795892d2..8fc642f7cd5 100644 --- a/tooling/nargo_fmt/tests/expected/struct.nr +++ b/tooling/nargo_fmt/tests/expected/struct.nr @@ -9,8 +9,8 @@ struct Pair { } impl Foo { - fn default(x: Field,y: Field) -> Self { - Self { bar: 0, array: [x,y] } + fn default(x: Field, y: Field) -> Self { + Self { bar: 0, array: [x, y] } } } diff --git a/tooling/nargo_fmt/tests/expected/vec.nr b/tooling/nargo_fmt/tests/expected/vec.nr index 1c9a791961e..466c9844e74 100644 --- a/tooling/nargo_fmt/tests/expected/vec.nr +++ b/tooling/nargo_fmt/tests/expected/vec.nr @@ -20,12 +20,12 @@ impl Vec { /// points beyond the end of the vector. pub fn get(self, index: Field) -> T { self.slice[index] - } + } /// Push a new element to the end of the vector, returning a /// new vector with a length one greater than the /// original unmodified vector. - pub fn push(&mut self, elem: T) { + pub fn push(&mut self, elem: T) { self.slice = self.slice.push_back(elem); } @@ -33,7 +33,7 @@ impl Vec { /// a new vector with a length of one less than the given vector, /// as well as the popped element. /// Panics if the given vector's length is zero. - pub fn pop(&mut self) -> T { + pub fn pop(&mut self) -> T { let (popped_slice, last_elem) = self.slice.pop_back(); self.slice = popped_slice; last_elem @@ -43,7 +43,7 @@ impl Vec { /// after it to the right pub fn insert(&mut self, index: Field, elem: T) { self.slice = self.slice.insert(index, elem); - } + } /// Remove an element at a specified index, shifting all elements /// after it to the left, returning the removed element diff --git a/tooling/nargo_fmt/tests/input/assert.nr b/tooling/nargo_fmt/tests/input/assert.nr new file mode 100644 index 00000000000..d0259da0e24 --- /dev/null +++ b/tooling/nargo_fmt/tests/input/assert.nr @@ -0,0 +1,8 @@ +fn main(x: Field) { + assert(x == 0, "with a message"); + assert_eq( + x, + 1 + ); + assert( x, message ); +} diff --git a/tooling/nargo_fmt/tests/input/contract.nr b/tooling/nargo_fmt/tests/input/contract.nr index 2e3f4d7c8c4..a03b8774700 100644 --- a/tooling/nargo_fmt/tests/input/contract.nr +++ b/tooling/nargo_fmt/tests/input/contract.nr @@ -5,30 +5,34 @@ contract Benchmarking { use dep::aztec::protocol_types::abis::function_selector::FunctionSelector; - use dep::value_note::{ - utils::{increment, decrement}, - value_note::{VALUE_NOTE_LEN, ValueNote, ValueNoteMethods}, - }; + use dep::value_note::{utils::{increment, decrement}, value_note::{VALUE_NOTE_LEN, ValueNote, ValueNoteMethods}}; use dep::aztec::{ context::{Context}, note::{utils as note_utils, note_getter_options::NoteGetterOptions, note_header::NoteHeader}, - log::emit_unencrypted_log, - state_vars::{map::Map, public_state::PublicState, set::Set}, + log::emit_unencrypted_log, state_vars::{Map, PublicMutable, PrivateSet}, types::type_serialization::field_serialization::{FieldSerializationMethods, FIELD_SERIALIZED_LEN}, - types::address::{AztecAddress}, + types::address::{AztecAddress} }; struct Storage { - notes: Map>, - balances: Map>, + notes: Map>, + balances: Map>, } impl Storage { fn init(context: Context) -> pub Self { Storage { - notes: Map::new(context, 1, |context, slot| { Set::new(context, slot, ValueNoteMethods) }), - balances: Map::new(context, 2, |context, slot| { PublicState::new(context, slot, FieldSerializationMethods) }), + notes: Map::new( + context, + 1, + |context, slot| { PrivateSet::new(context, slot, ValueNoteMethods) } + ), + balances: Map::new( + context, + 2, + |context, slot| { PublicMutable::new(context, slot, FieldSerializationMethods) } + ) } } } @@ -70,16 +74,6 @@ contract Benchmarking { fn broadcast(owner: Field) { emit_unencrypted_log(&mut context, storage.balances.at(owner).read()); } - - unconstrained fn compute_note_hash_and_nullifier( - contract_address: AztecAddress, - nonce: Field, - storage_slot: Field, - preimage: [Field; VALUE_NOTE_LEN] - ) -> [Field; 4] { - let note_header = NoteHeader::new(contract_address, nonce, storage_slot); - note_utils::compute_note_hash_and_nullifier(ValueNoteMethods, note_header, preimage) - } } // Uses the token bridge contract, which tells which input token we need to talk to and handles the exit funds to L1 diff --git a/tooling/nargo_fmt/tests/input/databus.nr b/tooling/nargo_fmt/tests/input/databus.nr new file mode 100644 index 00000000000..60934b60b2f --- /dev/null +++ b/tooling/nargo_fmt/tests/input/databus.nr @@ -0,0 +1,2 @@ +fn main(x: pub u8, y: call_data u8) -> return_data u32 {} + diff --git a/tooling/nargo_fmt/tests/input/impl.nr b/tooling/nargo_fmt/tests/input/impl.nr new file mode 100644 index 00000000000..1f111371a43 --- /dev/null +++ b/tooling/nargo_fmt/tests/input/impl.nr @@ -0,0 +1,21 @@ +impl Type {} + +impl Type {} + +impl Type {} + +impl Type { + fn method(self) {} + + fn method(mut self) {} + + fn method(&mut self) {} +} + +impl Type { +fn method(self) {} +} + +impl Type { + fn method(self) {} +} \ No newline at end of file diff --git a/tooling/nargo_toml/src/errors.rs b/tooling/nargo_toml/src/errors.rs index 440895056c3..77fe77bcdbb 100644 --- a/tooling/nargo_toml/src/errors.rs +++ b/tooling/nargo_toml/src/errors.rs @@ -28,7 +28,7 @@ pub enum ManifestError { #[error("Nargo.toml is badly formed, could not parse.\n\n {0}")] MalformedFile(#[from] toml::de::Error), - #[error("Unexpected workspace definition found in {0}")] + #[error("Unexpected workspace definition found in {0}. If you're attempting to load this as a dependency, you may need to add a `directory` field to your `Nargo.toml` to show which package within the workspace to use")] UnexpectedWorkspace(PathBuf), #[error("Cannot find file {entry} which was specified as the `entry` field in {toml}")] diff --git a/tooling/noir_codegen/package.json b/tooling/noir_codegen/package.json index 60ccf5ec2a5..6bb9d06f718 100644 --- a/tooling/noir_codegen/package.json +++ b/tooling/noir_codegen/package.json @@ -3,7 +3,7 @@ "contributors": [ "The Noir Team " ], - "version": "0.23.0", + "version": "0.24.0", "packageManager": "yarn@3.5.1", "license": "(MIT OR Apache-2.0)", "type": "module", @@ -52,7 +52,7 @@ "@types/node": "^20.6.2", "@types/prettier": "^3", "chai": "^4.3.8", - "eslint": "^8.50.0", + "eslint": "^8.56.0", "eslint-plugin-prettier": "^5.0.0", "mocha": "^10.2.0", "prettier": "3.0.3", diff --git a/tooling/noir_js/.gitignore b/tooling/noir_js/.gitignore index 5b57ba1708d..a55d1794141 100644 --- a/tooling/noir_js/.gitignore +++ b/tooling/noir_js/.gitignore @@ -1,3 +1 @@ crs - -!test/noir_compiled_examples/*/target diff --git a/tooling/noir_js/package.json b/tooling/noir_js/package.json index 356909a1e35..6f1899fae52 100644 --- a/tooling/noir_js/package.json +++ b/tooling/noir_js/package.json @@ -3,7 +3,7 @@ "contributors": [ "The Noir Team " ], - "version": "0.23.0", + "version": "0.24.0", "packageManager": "yarn@3.5.1", "license": "(MIT OR Apache-2.0)", "type": "module", @@ -37,7 +37,8 @@ "scripts": { "dev": "tsc-multi --watch", "build": "tsc-multi", - "test": "yarn test:node:esm && yarn test:node:cjs", + "test": "yarn test:compile_program && yarn test:node:esm && yarn test:node:cjs", + "test:compile_program": "./scripts/compile_test_programs.sh", "test:node:esm": "mocha --timeout 25000 --exit --config ./.mocharc.json", "test:node:cjs": "mocha --timeout 25000 --exit --config ./.mocharc.cjs.json", "prettier": "prettier 'src/**/*.ts'", @@ -53,7 +54,7 @@ "@types/node": "^20.6.2", "@types/prettier": "^3", "chai": "^4.3.8", - "eslint": "^8.50.0", + "eslint": "^8.56.0", "eslint-plugin-prettier": "^5.0.0", "mocha": "^10.2.0", "prettier": "3.0.3", diff --git a/tooling/noir_js/scripts/compile_test_programs.sh b/tooling/noir_js/scripts/compile_test_programs.sh new file mode 100755 index 00000000000..5257aaae696 --- /dev/null +++ b/tooling/noir_js/scripts/compile_test_programs.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +rm -rf ./test/noir_compiled_examples/**/target +nargo --program-dir ./test/noir_compiled_examples/assert_lt compile --force +nargo --program-dir ./test/noir_compiled_examples/assert_msg_runtime compile --force diff --git a/tooling/noir_js/src/program.ts b/tooling/noir_js/src/program.ts index 809943727eb..8d80ec3a247 100644 --- a/tooling/noir_js/src/program.ts +++ b/tooling/noir_js/src/program.ts @@ -67,13 +67,13 @@ export class Noir { * * @example * ```typescript - * async generateFinalProof(input) + * async generateProof(input) * ``` * */ - async generateFinalProof(inputs: InputMap, foreignCallHandler?: ForeignCallHandler): Promise { + async generateProof(inputs: InputMap, foreignCallHandler?: ForeignCallHandler): Promise { const { witness } = await this.execute(inputs, foreignCallHandler); - return this.getBackend().generateFinalProof(witness); + return this.getBackend().generateProof(witness); } /** @@ -84,11 +84,11 @@ export class Noir { * * @example * ```typescript - * async verifyFinalProof(proof) + * async verifyProof(proof) * ``` * */ - async verifyFinalProof(proofData: ProofData): Promise { - return this.getBackend().verifyFinalProof(proofData); + async verifyProof(proofData: ProofData): Promise { + return this.getBackend().verifyProof(proofData); } } diff --git a/tooling/noir_js/src/witness_generation.ts b/tooling/noir_js/src/witness_generation.ts index 1f233422061..cef1d817d9b 100644 --- a/tooling/noir_js/src/witness_generation.ts +++ b/tooling/noir_js/src/witness_generation.ts @@ -26,6 +26,12 @@ const defaultForeignCallHandler: ForeignCallHandler = async (name: string, args: // // If a user needs to print values then they should provide a custom foreign call handler. return []; + } else if (name == 'assert_message') { + // By default we do not do anything for `assert_message` foreign calls due to a need for formatting, + // however we provide an empty response in order to not halt execution. + // + // If a user needs to use dynamic assertion messages then they should provide a custom foreign call handler. + return []; } throw Error(`Unexpected oracle during execution: ${name}(${args.join(', ')})`); }; diff --git a/tooling/noir_js/test/node/e2e.test.ts b/tooling/noir_js/test/node/e2e.test.ts index 33d64377b06..8921314e8ea 100644 --- a/tooling/noir_js/test/node/e2e.test.ts +++ b/tooling/noir_js/test/node/e2e.test.ts @@ -21,10 +21,10 @@ it('end-to-end proof creation and verification (outer)', async () => { // // Proof creation const prover = new Backend(assert_lt_program); - const proof = await prover.generateFinalProof(witness); + const proof = await prover.generateProof(witness); // Proof verification - const isValid = await prover.verifyFinalProof(proof); + const isValid = await prover.verifyProof(proof); expect(isValid).to.be.true; }); @@ -40,13 +40,14 @@ it('end-to-end proof creation and verification (outer) -- Program API', async () // Initialize program const program = new Noir(assert_lt_program, backend); // Generate proof - const proof = await program.generateFinalProof(inputs); + const proof = await program.generateProof(inputs); // Proof verification - const isValid = await program.verifyFinalProof(proof); + const isValid = await program.verifyProof(proof); expect(isValid).to.be.true; }); +// TODO: maybe switch to using assert_statement_recursive here to test both options it('end-to-end proof creation and verification (inner)', async () => { // Noir.Js part const inputs = { @@ -62,10 +63,10 @@ it('end-to-end proof creation and verification (inner)', async () => { // // Proof creation const prover = new Backend(assert_lt_program); - const proof = await prover.generateIntermediateProof(witness); + const proof = await prover.generateProof(witness); // Proof verification - const isValid = await prover.verifyIntermediateProof(proof); + const isValid = await prover.verifyProof(proof); expect(isValid).to.be.true; }); @@ -83,10 +84,10 @@ it('end-to-end proving and verification with different instances', async () => { // bb.js part const prover = new Backend(assert_lt_program); - const proof = await prover.generateFinalProof(witness); + const proof = await prover.generateProof(witness); const verifier = new Backend(assert_lt_program); - const proof_is_valid = await verifier.verifyFinalProof(proof); + const proof_is_valid = await verifier.verifyProof(proof); expect(proof_is_valid).to.be.true; }); @@ -115,14 +116,14 @@ it('[BUG] -- bb.js null function or function signature mismatch (outer-inner) ', const prover = new Backend(assert_lt_program); // Create a proof using both proving systems, the majority of the time // one would only use outer proofs. - const proofOuter = await prover.generateFinalProof(witness); - const _proofInner = await prover.generateIntermediateProof(witness); + const proofOuter = await prover.generateProof(witness); + const _proofInner = await prover.generateProof(witness); // Proof verification // - const isValidOuter = await prover.verifyFinalProof(proofOuter); + const isValidOuter = await prover.verifyProof(proofOuter); expect(isValidOuter).to.be.true; // We can also try verifying an inner proof and it will fail. - const isValidInner = await prover.verifyIntermediateProof(_proofInner); + const isValidInner = await prover.verifyProof(_proofInner); expect(isValidInner).to.be.true; }); diff --git a/tooling/noir_js/test/node/execute.test.ts b/tooling/noir_js/test/node/execute.test.ts index bfaf80882ab..491bcb0dfc4 100644 --- a/tooling/noir_js/test/node/execute.test.ts +++ b/tooling/noir_js/test/node/execute.test.ts @@ -1,9 +1,11 @@ import assert_lt_json from '../noir_compiled_examples/assert_lt/target/assert_lt.json' assert { type: 'json' }; +import assert_msg_json from '../noir_compiled_examples/assert_msg_runtime/target/assert_msg_runtime.json' assert { type: 'json' }; import { Noir } from '@noir-lang/noir_js'; import { CompiledCircuit } from '@noir-lang/types'; import { expect } from 'chai'; const assert_lt_program = assert_lt_json as CompiledCircuit; +const assert_msg_runtime = assert_msg_json as CompiledCircuit; it('returns the return value of the circuit', async () => { const inputs = { @@ -14,3 +16,16 @@ it('returns the return value of the circuit', async () => { expect(returnValue).to.be.eq('0x05'); }); + +it('circuit with a dynamic assert message should fail on an assert failure not the foreign call handler', async () => { + const inputs = { + x: '10', + y: '5', + }; + try { + await new Noir(assert_msg_runtime).execute(inputs); + } catch (error) { + const knownError = error as Error; + expect(knownError.message).to.equal('Circuit execution failed: Error: Cannot satisfy constraint'); + } +}); diff --git a/tooling/noir_js/test/noir_compiled_examples/assert_lt/src/main.nr b/tooling/noir_js/test/noir_compiled_examples/assert_lt/src/main.nr index 693e7285736..a9aaae5f2f7 100644 --- a/tooling/noir_js/test/noir_compiled_examples/assert_lt/src/main.nr +++ b/tooling/noir_js/test/noir_compiled_examples/assert_lt/src/main.nr @@ -4,6 +4,10 @@ fn main(x: u64, y: pub u64) -> pub u64 { // We include a println statement to show that noirJS will ignore this and continue execution std::println("foo"); + // A dynamic assertion message is used to show that noirJS will ignore the call and continue execution + // The assertion passes and thus the foreign call for resolving an assertion message should not be called. + assert(x < y, f"Expected x < y but got {x} < {y}"); + assert(x < y); x + y } diff --git a/tooling/noir_js/test/noir_compiled_examples/assert_lt/target/assert_lt.json b/tooling/noir_js/test/noir_compiled_examples/assert_lt/target/assert_lt.json deleted file mode 100644 index 5b511cdc140..00000000000 --- a/tooling/noir_js/test/noir_compiled_examples/assert_lt/target/assert_lt.json +++ /dev/null @@ -1 +0,0 @@ -{"noir_version":"0.20.0+010fdb69616f47fc0a9f252a65a903316d3cbe80","hash":17538653710107541030,"backend":"acvm-backend-barretenberg","abi":{"parameters":[{"name":"x","type":{"kind":"integer","sign":"unsigned","width":64},"visibility":"private"},{"name":"y","type":{"kind":"integer","sign":"unsigned","width":64},"visibility":"public"}],"param_witnesses":{"x":[{"start":1,"end":2}],"y":[{"start":2,"end":3}]},"return_type":{"abi_type":{"kind":"integer","sign":"unsigned","width":64},"visibility":"public"},"return_witnesses":[5]},"bytecode":"H4sIAAAAAAAA/9Wa627aMBiGHcKhBBLOR+0Hl+Ak5PQP9U6gBK3SNqoqWm9kFzzMYu2DujhbPjvFEkrsJu/7vJ+TFDAtQkiH/GnG6VXLtxvQr+V9Mx/jx5pE3Db5lpZqYahI96Ba91e+bee1g60J6objS90mehbqN8nfuUbSpLAeNVAjXg++bZxeD/m+k9esjlyz6+t3A/p1MFfYvkyzharpPrXzmsFmXPU3YL8F8jUV5HvA1aRMs42qGe2YhgVqwuvH2Tvg721QLwu5Xgbw5Lq8bynz9Tye8Vb+joCjozF/R5lveJ7/riR/V8DR1Zi/q8w3TJiGLclvCzhsjfltZb5hyjQcSX5HwOFozO8o8w3XTKMnyd8TcPQ05od8RVmtilnxff0t0+hL8vcFHH2N+SFfUVarYlZ838hnGgNJ/oGAY6Ax/0CZb3R+rgwl+YcCjqHG/ENlvtH5fdVIkn8k4BhpzA/5irJ274jVrpgV3zeMmMZYkn8s4BhrzA/5irJaFbPi+3pPTGMiyT8RcEw05od8RVmtilnxfcPzXE0l+acCjqnG/FNlvmHANGaS/DMBx0xjfshXlNW+I9bRHbEOKmbF9w1jpjGX5J8LOOYa80O+oqzWHbH2KmbF9/XPnwUXkvwLAcdCY/6FMt9ozzSWkvxLAcdSY/4l8MVet3gAmV9en39kHKAOYPg+X2xlzQRjXOALOIeDtsj7hR60qpnk/eolAWNYPgbQ8k/fTK7TyEtd391SL9nFAV0HuzB2YzeIg70X+34ar+Mo2SURTdy1n7qHIPEPuVjt/7nc6wFBdDRtWFe46tgAE18D44/geLgCb4A5eQTniI4xPtBpgzF+vkMUXljQHEvTJJc/T+C6ZS8oE4+R8kmtk8vlWELwfxKg6qYqq9VErOet+v0jJ73idE3EzHXEeS1Rv5sPuM9839yaZ1quXdwntFxzMe+TBsF/7nDNj/6B8P0GuXz4848R/B3INsvS7y/ZKjuutvv96u05+7o6/kxfD9+Ob78Bhjydn08mAAA="} \ No newline at end of file diff --git a/tooling/noir_js/test/noir_compiled_examples/assert_msg_runtime/Nargo.toml b/tooling/noir_js/test/noir_compiled_examples/assert_msg_runtime/Nargo.toml new file mode 100644 index 00000000000..765f632ff74 --- /dev/null +++ b/tooling/noir_js/test/noir_compiled_examples/assert_msg_runtime/Nargo.toml @@ -0,0 +1,7 @@ +[package] +name = "assert_msg_runtime" +type = "bin" +authors = [""] +compiler_version = ">=0.23.0" + +[dependencies] \ No newline at end of file diff --git a/tooling/noir_js/test/noir_compiled_examples/assert_msg_runtime/src/main.nr b/tooling/noir_js/test/noir_compiled_examples/assert_msg_runtime/src/main.nr new file mode 100644 index 00000000000..40e447cad02 --- /dev/null +++ b/tooling/noir_js/test/noir_compiled_examples/assert_msg_runtime/src/main.nr @@ -0,0 +1,6 @@ +fn main(x: u64, y: pub u64) { + // A dynamic assertion message is used to show that noirJS will ignore the call and continue execution + // We need this assertion to fail as the `assert_message` oracle in Noir is only called + // upon a failing condition in an assert. + assert(x < y, f"Expected x < y but got {x} < {y}"); +} diff --git a/tooling/noir_js_backend_barretenberg/package.json b/tooling/noir_js_backend_barretenberg/package.json index a0123883efd..28c3609fd14 100644 --- a/tooling/noir_js_backend_barretenberg/package.json +++ b/tooling/noir_js_backend_barretenberg/package.json @@ -3,7 +3,7 @@ "contributors": [ "The Noir Team " ], - "version": "0.23.0", + "version": "0.24.0", "packageManager": "yarn@3.5.1", "license": "(MIT OR Apache-2.0)", "type": "module", @@ -42,7 +42,7 @@ "lint": "NODE_NO_WARNINGS=1 eslint . --ext .ts --ignore-path ./.eslintignore --max-warnings 0" }, "dependencies": { - "@aztec/bb.js": "0.21.0", + "@aztec/bb.js": "0.24.0", "@noir-lang/types": "workspace:*", "fflate": "^0.8.0" }, @@ -50,7 +50,7 @@ "@types/node": "^20.6.2", "@types/prettier": "^3", "chai": "^4.3.8", - "eslint": "^8.50.0", + "eslint": "^8.56.0", "eslint-plugin-prettier": "^5.0.0", "mocha": "^10.2.0", "prettier": "3.0.3", diff --git a/tooling/noir_js_backend_barretenberg/src/index.ts b/tooling/noir_js_backend_barretenberg/src/index.ts index 61094a3451f..af03743eb2f 100644 --- a/tooling/noir_js_backend_barretenberg/src/index.ts +++ b/tooling/noir_js_backend_barretenberg/src/index.ts @@ -33,8 +33,18 @@ export class BarretenbergBackend implements Backend { /** @ignore */ async instantiate(): Promise { if (!this.api) { + if (typeof navigator !== 'undefined' && navigator.hardwareConcurrency) { + this.options.threads = navigator.hardwareConcurrency; + } else { + try { + const os = await import('os'); + this.options.threads = os.cpus().length; + } catch (e) { + console.log('Could not detect environment. Falling back to one thread.', e); + } + } const { Barretenberg, RawBuffer, Crs } = await import('@aztec/bb.js'); - const api = await Barretenberg.new({ threads: this.options.threads }); + const api = await Barretenberg.new(this.options); const [_exact, _total, subgroupSize] = await api.acirGetCircuitSizes(this.acirUncompressedBytecode); const crs = await Crs.new(subgroupSize + 1); @@ -47,47 +57,14 @@ export class BarretenbergBackend implements Backend { } } - /** - * Generate a final proof. This is the proof for the circuit which will verify - * intermediate proofs and or can be seen as the proof created for regular circuits. - */ - async generateFinalProof(decompressedWitness: Uint8Array): Promise { - // The settings for this proof are the same as the settings for a "normal" proof - // i.e. one that is not in the recursive setting. - const makeEasyToVerifyInCircuit = false; - return this.generateProof(decompressedWitness, makeEasyToVerifyInCircuit); - } - - /** - * Generates an intermediate proof. This is the proof that can be verified - * in another circuit. - * - * This is sometimes referred to as a recursive proof. - * We avoid this terminology as the only property of this proof - * that matters is the fact that it is easy to verify in another circuit. - * We _could_ choose to verify this proof outside of a circuit just as easily. - * - * @example - * ```typescript - * const intermediateProof = await backend.generateIntermediateProof(witness); - * ``` - */ - async generateIntermediateProof(witness: Uint8Array): Promise { - // We set `makeEasyToVerifyInCircuit` to true, which will tell the backend to - // generate the proof using components that will make the proof - // easier to verify in a circuit. - const makeEasyToVerifyInCircuit = true; - return this.generateProof(witness, makeEasyToVerifyInCircuit); - } - - /** @ignore */ - async generateProof(compressedWitness: Uint8Array, makeEasyToVerifyInCircuit: boolean): Promise { + /** @description Generates a proof */ + async generateProof(compressedWitness: Uint8Array): Promise { await this.instantiate(); + // TODO: Change once `@aztec/bb.js` version is updated to use methods without isRecursive flag const proofWithPublicInputs = await this.api.acirCreateProof( this.acirComposer, this.acirUncompressedBytecode, gunzip(compressedWitness), - makeEasyToVerifyInCircuit, ); const splitIndex = proofWithPublicInputs.length - numBytesInProofWithoutPublicInputs; @@ -105,17 +82,17 @@ export class BarretenbergBackend implements Backend { * Instead of passing the proof and verification key as a byte array, we pass them * as fields which makes it cheaper to verify in a circuit. * - * The proof that is passed here will have been created using the `generateIntermediateProof` - * method. + * The proof that is passed here will have been created using a circuit + * that has the #[recursive] attribute on its `main` method. * * The number of public inputs denotes how many public inputs are in the inner proof. * * @example * ```typescript - * const artifacts = await backend.generateIntermediateProofArtifacts(proof, numOfPublicInputs); + * const artifacts = await backend.generateRecursiveProofArtifacts(proof, numOfPublicInputs); * ``` */ - async generateIntermediateProofArtifacts( + async generateRecursiveProofArtifacts( proofData: ProofData, numOfPublicInputs = 0, ): Promise<{ @@ -143,31 +120,13 @@ export class BarretenbergBackend implements Backend { }; } - async verifyFinalProof(proofData: ProofData): Promise { + /** @description Verifies a proof */ + async verifyProof(proofData: ProofData): Promise { const proof = reconstructProofWithPublicInputs(proofData); - const makeEasyToVerifyInCircuit = false; - const verified = await this.verifyProof(proof, makeEasyToVerifyInCircuit); - return verified; - } - - /** - * - * @example - * ```typescript - * const isValidIntermediate = await backend.verifyIntermediateProof(proof); - * ``` - */ - async verifyIntermediateProof(proofData: ProofData): Promise { - const proof = reconstructProofWithPublicInputs(proofData); - const makeEasyToVerifyInCircuit = true; - return this.verifyProof(proof, makeEasyToVerifyInCircuit); - } - - /** @ignore */ - async verifyProof(proof: Uint8Array, makeEasyToVerifyInCircuit: boolean): Promise { await this.instantiate(); await this.api.acirInitVerificationKey(this.acirComposer); - return await this.api.acirVerifyProof(this.acirComposer, proof, makeEasyToVerifyInCircuit); + // TODO: Change once `@aztec/bb.js` version is updated to use methods without isRecursive flag + return await this.api.acirVerifyProof(this.acirComposer, proof); } async destroy(): Promise { diff --git a/tooling/noir_js_backend_barretenberg/src/types.ts b/tooling/noir_js_backend_barretenberg/src/types.ts index 041e36fdf91..fac23030aad 100644 --- a/tooling/noir_js_backend_barretenberg/src/types.ts +++ b/tooling/noir_js_backend_barretenberg/src/types.ts @@ -5,4 +5,5 @@ export type BackendOptions = { /** @description Number of threads */ threads: number; + memory?: { maximum: number }; }; diff --git a/tooling/noir_js_types/package.json b/tooling/noir_js_types/package.json index ef75f3d2fb3..a3b5c85897a 100644 --- a/tooling/noir_js_types/package.json +++ b/tooling/noir_js_types/package.json @@ -4,7 +4,7 @@ "The Noir Team " ], "packageManager": "yarn@3.5.1", - "version": "0.23.0", + "version": "0.24.0", "license": "(MIT OR Apache-2.0)", "homepage": "https://noir-lang.org/", "repository": { @@ -38,12 +38,9 @@ "types": "./lib/esm/types.d.ts" } }, - "dependencies": { - "@noir-lang/noirc_abi": "workspace:*" - }, "devDependencies": { "@types/prettier": "^3", - "eslint": "^8.50.0", + "eslint": "^8.56.0", "eslint-plugin-prettier": "^5.0.0", "prettier": "3.0.3", "typescript": "^5.2.2" diff --git a/tooling/noir_js_types/src/types.ts b/tooling/noir_js_types/src/types.ts index ee4921bd606..3a62d79a807 100644 --- a/tooling/noir_js_types/src/types.ts +++ b/tooling/noir_js_types/src/types.ts @@ -1,21 +1,44 @@ -import { Abi } from '@noir-lang/noirc_abi'; +export type Field = string | number | boolean; +export type InputValue = Field | InputMap | (Field | InputMap)[]; +export type InputMap = { [key: string]: InputValue }; -export { Abi, WitnessMap } from '@noir-lang/noirc_abi'; +export type Visibility = 'public' | 'private' | 'databus'; +export type Sign = 'unsigned' | 'signed'; +export type AbiType = + | { kind: 'field' } + | { kind: 'boolean' } + | { kind: 'string'; length: number } + | { kind: 'integer'; sign: Sign; width: number } + | { kind: 'array'; length: number; type: AbiType } + | { kind: 'tuple'; fields: AbiType[] } + | { kind: 'struct'; path: string; fields: { name: string; type: AbiType }[] }; -export interface Backend { - /** - * @description Generates a final proof (not meant to be verified in another circuit) */ - generateFinalProof(decompressedWitness: Uint8Array): Promise; +export type AbiParameter = { + name: string; + type: AbiType; + visibility: Visibility; +}; + +// Map from witness index to hex string value of witness. +export type WitnessMap = Map; +export type Abi = { + parameters: AbiParameter[]; + param_witnesses: Record; + return_type: { abi_type: AbiType; visibility: Visibility } | null; + return_witnesses: number[]; +}; + +export interface Backend { /** - * @description Generates an intermediate proof (meant to be verified in another circuit) */ - generateIntermediateProof(decompressedWitness: Uint8Array): Promise; + * @description Generates a proof */ + generateProof(decompressedWitness: Uint8Array): Promise; /** * * @description Retrieves the artifacts from a proof in the Field format */ - generateIntermediateProofArtifacts( + generateRecursiveProofArtifacts( proofData: ProofData, numOfPublicInputs: number, ): Promise<{ @@ -28,11 +51,8 @@ export interface Backend { }>; /** - * @description Verifies a final proof */ - verifyFinalProof(proofData: ProofData): Promise; - - /** @description Verifies an intermediate proof */ - verifyIntermediateProof(proofData: ProofData): Promise; + * @description Verifies a proof */ + verifyProof(proofData: ProofData): Promise; /** * @description Destroys the backend */ diff --git a/tooling/noirc_abi/src/lib.rs b/tooling/noirc_abi/src/lib.rs index 1fc257c1676..26feab65d83 100644 --- a/tooling/noirc_abi/src/lib.rs +++ b/tooling/noirc_abi/src/lib.rs @@ -142,14 +142,13 @@ impl AbiType { Signedness::Signed => Sign::Signed, }; - Self::Integer { sign, width: *bit_width } - } - Type::TypeVariable(binding, TypeVariableKind::IntegerOrField) => { - match &*binding.borrow() { - TypeBinding::Bound(typ) => Self::from_type(context, typ), - TypeBinding::Unbound(_) => Self::from_type(context, &Type::default_int_type()), - } + Self::Integer { sign, width: (*bit_width).into() } } + Type::TypeVariable(binding, TypeVariableKind::IntegerOrField) + | Type::TypeVariable(binding, TypeVariableKind::Integer) => match &*binding.borrow() { + TypeBinding::Bound(typ) => Self::from_type(context, typ), + TypeBinding::Unbound(_) => Self::from_type(context, &Type::default_int_type()), + }, Type::Bool => Self::Boolean, Type::String(size) => { let size = size @@ -158,7 +157,7 @@ impl AbiType { Self::String { length: size } } - Type::Struct(def, ref args) => { + Type::Struct(def, args) => { let struct_type = def.borrow(); let fields = struct_type.get_fields(args); let fields = vecmap(fields, |(name, typ)| (name, Self::from_type(context, &typ))); @@ -167,6 +166,7 @@ impl AbiType { context.fully_qualified_struct_path(context.root_crate_id(), struct_type.id); Self::Struct { fields, path } } + Type::Alias(def, args) => Self::from_type(context, &def.borrow().get_type(args)), Type::Tuple(fields) => { let fields = vecmap(fields, |typ| Self::from_type(context, typ)); Self::Tuple { fields } diff --git a/tooling/noirc_abi_wasm/package.json b/tooling/noirc_abi_wasm/package.json index db0f6c29153..05fcc270402 100644 --- a/tooling/noirc_abi_wasm/package.json +++ b/tooling/noirc_abi_wasm/package.json @@ -3,7 +3,7 @@ "contributors": [ "The Noir Team " ], - "version": "0.23.0", + "version": "0.24.0", "license": "(MIT OR Apache-2.0)", "homepage": "https://noir-lang.org/", "repository": { @@ -37,12 +37,15 @@ "build:nix": "nix build -L .#noirc_abi_wasm", "install:from:nix": "yarn clean && yarn build:nix && cp -rL ./result/noirc_abi_wasm/nodejs ./ && cp -rL ./result/noirc_abi_wasm/web ./" }, + "dependencies": { + "@noir-lang/types": "workspace:*" + }, "devDependencies": { "@esm-bundle/chai": "^4.3.4-fix.0", "@web/dev-server-esbuild": "^0.3.6", "@web/test-runner": "^0.15.3", "@web/test-runner-playwright": "^0.10.0", - "eslint": "^8.50.0", + "eslint": "^8.56.0", "mocha": "^10.2.0" } } diff --git a/tooling/noirc_abi_wasm/src/js_witness_map.rs b/tooling/noirc_abi_wasm/src/js_witness_map.rs index fcc6e75f18c..293c5c089f8 100644 --- a/tooling/noirc_abi_wasm/src/js_witness_map.rs +++ b/tooling/noirc_abi_wasm/src/js_witness_map.rs @@ -7,12 +7,6 @@ use acvm::{ use js_sys::{JsString, Map}; use wasm_bindgen::prelude::{wasm_bindgen, JsValue}; -#[wasm_bindgen(typescript_custom_section)] -const WITNESS_MAP: &'static str = r#" -// Map from witness index to hex string value of witness. -export type WitnessMap = Map; -"#; - // WitnessMap #[wasm_bindgen] extern "C" { diff --git a/tooling/noirc_abi_wasm/src/lib.rs b/tooling/noirc_abi_wasm/src/lib.rs index 5557cc917bf..ce15f6d502e 100644 --- a/tooling/noirc_abi_wasm/src/lib.rs +++ b/tooling/noirc_abi_wasm/src/lib.rs @@ -26,9 +26,8 @@ use js_witness_map::JsWitnessMap; #[wasm_bindgen(typescript_custom_section)] const INPUT_MAP: &'static str = r#" -export type Field = string | number | boolean; -export type InputValue = Field | InputMap | (Field | InputMap)[]; -export type InputMap = { [key: string]: InputValue }; +import { Field, InputValue, InputMap, Visibility, Sign, AbiType, AbiParameter, Abi, WitnessMap } from "@noir-lang/types"; +export { Field, InputValue, InputMap, Visibility, Sign, AbiType, AbiParameter, Abi, WitnessMap } from "@noir-lang/types"; "#; #[wasm_bindgen] @@ -36,44 +35,11 @@ extern "C" { #[wasm_bindgen(extends = js_sys::Object, js_name = "InputMap", typescript_type = "InputMap")] #[derive(Clone, Debug, PartialEq, Eq)] pub type JsInputMap; -} -#[wasm_bindgen] -extern "C" { #[wasm_bindgen(extends = js_sys::Object, js_name = "InputValue", typescript_type = "InputValue")] #[derive(Clone, Debug, PartialEq, Eq)] pub type JsInputValue; -} -#[wasm_bindgen(typescript_custom_section)] -const ABI: &'static str = r#" -export type Visibility = "public" | "private" | "databus"; -export type Sign = "unsigned" | "signed"; -export type AbiType = - { kind: "field" } | - { kind: "boolean" } | - { kind: "string", length: number } | - { kind: "integer", sign: Sign, width: number } | - { kind: "array", length: number, type: AbiType } | - { kind: "tuple", fields: AbiType[] } | - { kind: "struct", path: string, fields: { name: string, type: AbiType }[] }; - -export type AbiParameter = { - name: string, - type: AbiType, - visibility: Visibility, -}; - -export type Abi = { - parameters: AbiParameter[], - param_witnesses: Record, - return_type: {abi_type: AbiType, visibility: Visibility} | null, - return_witnesses: number[], -} -"#; - -#[wasm_bindgen] -extern "C" { #[wasm_bindgen(extends = js_sys::Object, js_name = "Abi", typescript_type = "Abi")] #[derive(Clone, Debug, PartialEq, Eq)] pub type JsAbi; diff --git a/yarn.lock b/yarn.lock index 743068f1907..f5f3a29f08a 100644 --- a/yarn.lock +++ b/yarn.lock @@ -221,9 +221,9 @@ __metadata: languageName: node linkType: hard -"@aztec/bb.js@npm:0.16.0": - version: 0.16.0 - resolution: "@aztec/bb.js@npm:0.16.0" +"@aztec/bb.js@npm:0.24.0": + version: 0.24.0 + resolution: "@aztec/bb.js@npm:0.24.0" dependencies: comlink: ^4.4.1 commander: ^10.0.1 @@ -231,21 +231,7 @@ __metadata: tslib: ^2.4.0 bin: bb.js: dest/node/main.js - checksum: 5f68b4ad16284a3a871e0ad21fea05aed670383bc639c9d07ab3bf9b7a9d15cc8a4e5cda404a9290775ad5023924739543a8aac37d602892dd1fb5087521970b - languageName: node - linkType: hard - -"@aztec/bb.js@npm:0.21.0": - version: 0.21.0 - resolution: "@aztec/bb.js@npm:0.21.0" - dependencies: - comlink: ^4.4.1 - commander: ^10.0.1 - debug: ^4.3.4 - tslib: ^2.4.0 - bin: - bb.js: dest/node/main.js - checksum: a0fb97476f52025f3c31b7a5e890966ac375ed47c5cfd3434f5c3e4265af3c7566a162f37d6c56f394f44bfe4ba67e5002b7c5998ecc4f6abe70e04f5b8abe34 + checksum: a086dabf30084cfa526e512148b9c02f0a0770dcc19b7dca4af9a3e98612b716acc7eaac6b52c0f12d985932e866d1cb9e534ded6ac9d747f3dd021afe25de27 languageName: node linkType: hard @@ -3499,10 +3485,10 @@ __metadata: languageName: node linkType: hard -"@eslint/js@npm:8.55.0": - version: 8.55.0 - resolution: "@eslint/js@npm:8.55.0" - checksum: fa33ef619f0646ed15649b0c2e313e4d9ccee8425884bdbfc78020d6b6b64c0c42fa9d83061d0e6158e1d4274f03f0f9008786540e2efab8fcdc48082259908c +"@eslint/js@npm:8.56.0": + version: 8.56.0 + resolution: "@eslint/js@npm:8.56.0" + checksum: 5804130574ef810207bdf321c265437814e7a26f4e6fac9b496de3206afd52f533e09ec002a3be06cd9adcc9da63e727f1883938e663c4e4751c007d5b58e539 languageName: node linkType: hard @@ -3965,6 +3951,13 @@ __metadata: languageName: node linkType: hard +"@import-maps/resolve@npm:^1.0.1": + version: 1.0.1 + resolution: "@import-maps/resolve@npm:1.0.1" + checksum: 17ee033e26a0fd82294de87eae76d32b553a130fdbf0fb8c70d39f2087a3e8a4a5908970a99aa32bd175153efe9b7dfee6b7f99df36f41abed08c1911dbdb19c + languageName: node + linkType: hard + "@isaacs/cliui@npm:^8.0.2": version: 8.0.2 resolution: "@isaacs/cliui@npm:8.0.2" @@ -4064,6 +4057,16 @@ __metadata: languageName: node linkType: hard +"@jridgewell/trace-mapping@npm:^0.3.20": + version: 0.3.22 + resolution: "@jridgewell/trace-mapping@npm:0.3.22" + dependencies: + "@jridgewell/resolve-uri": ^3.1.0 + "@jridgewell/sourcemap-codec": ^1.4.14 + checksum: ac7dd2cfe0b479aa1b81776d40d789243131cc792dc8b6b6a028c70fcd6171958ae1a71bf67b618ffe3c0c3feead9870c095ee46a5e30319410d92976b28f498 + languageName: node + linkType: hard + "@leichtgewicht/ip-codec@npm:^2.0.1": version: 2.0.4 resolution: "@leichtgewicht/ip-codec@npm:2.0.4" @@ -4175,30 +4178,6 @@ __metadata: languageName: node linkType: hard -"@monaco-editor/loader@npm:^1.4.0": - version: 1.4.0 - resolution: "@monaco-editor/loader@npm:1.4.0" - dependencies: - state-local: ^1.0.6 - peerDependencies: - monaco-editor: ">= 0.21.0 < 1" - checksum: 374ec0ea872ee15b33310e105a43217148161480d3955c5cece87d0f801754cd2c45a3f6c539a75da18a066c1615756fb87eaf1003f1df6a64a0cbce5d2c3749 - languageName: node - linkType: hard - -"@monaco-editor/react@npm:4.6.0": - version: 4.6.0 - resolution: "@monaco-editor/react@npm:4.6.0" - dependencies: - "@monaco-editor/loader": ^1.4.0 - peerDependencies: - monaco-editor: ">= 0.25.0 < 1" - react: ^16.8.0 || ^17.0.0 || ^18.0.0 - react-dom: ^16.8.0 || ^17.0.0 || ^18.0.0 - checksum: 9d44e76c5baad6db5f84c90a5540fbd3c9af691b97d76cf2a99b3c8273004d0efe44c2572d80e9d975c9af10022c21e4a66923924950a5201e82017c8b20428c - languageName: node - linkType: hard - "@noble/curves@npm:1.2.0": version: 1.2.0 resolution: "@noble/curves@npm:1.2.0" @@ -4395,13 +4374,6 @@ __metadata: languageName: node linkType: hard -"@noir-lang/acvm_js@npm:0.38.0": - version: 0.38.0 - resolution: "@noir-lang/acvm_js@npm:0.38.0" - checksum: 42a5bba45135d1df0d0eb3f7b65439733e016580bad610e859e140638d42200d6b856ff11c4b30417b74ce011da7c39861aafb1c5b8c7211de2172aea449c635 - languageName: node - linkType: hard - "@noir-lang/acvm_js@workspace:*, @noir-lang/acvm_js@workspace:acvm-repo/acvm_js": version: 0.0.0-use.local resolution: "@noir-lang/acvm_js@workspace:acvm-repo/acvm_js" @@ -4411,7 +4383,7 @@ __metadata: "@web/test-runner": ^0.15.3 "@web/test-runner-playwright": ^0.10.0 chai: ^4.3.7 - eslint: ^8.50.0 + eslint: ^8.56.0 eslint-plugin-prettier: ^5.0.0 mocha: ^10.2.0 prettier: 3.0.3 @@ -4420,27 +4392,16 @@ __metadata: languageName: unknown linkType: soft -"@noir-lang/backend_barretenberg@npm:^0.22.0": - version: 0.22.0 - resolution: "@noir-lang/backend_barretenberg@npm:0.22.0" - dependencies: - "@aztec/bb.js": 0.16.0 - "@noir-lang/types": 0.22.0 - fflate: ^0.8.0 - checksum: ead456218ba61d925e0fc5b47d1b94272e980b44a220f1262fb6cdc73cff7cd4232ddc69dd67bb21e50f0b43e7696d4a96fde15e3eadc0bf223ec6d59e014e23 - languageName: node - linkType: hard - "@noir-lang/backend_barretenberg@workspace:*, @noir-lang/backend_barretenberg@workspace:tooling/noir_js_backend_barretenberg": version: 0.0.0-use.local resolution: "@noir-lang/backend_barretenberg@workspace:tooling/noir_js_backend_barretenberg" dependencies: - "@aztec/bb.js": 0.21.0 + "@aztec/bb.js": 0.24.0 "@noir-lang/types": "workspace:*" "@types/node": ^20.6.2 "@types/prettier": ^3 chai: ^4.3.8 - eslint: ^8.50.0 + eslint: ^8.56.0 eslint-plugin-prettier: ^5.0.0 fflate: ^0.8.0 mocha: ^10.2.0 @@ -4461,7 +4422,7 @@ __metadata: "@types/node": ^20.6.2 "@types/prettier": ^3 chai: ^4.3.8 - eslint: ^8.50.0 + eslint: ^8.56.0 eslint-plugin-prettier: ^5.0.0 glob: ^10.3.10 mocha: ^10.2.0 @@ -4475,17 +4436,6 @@ __metadata: languageName: unknown linkType: soft -"@noir-lang/noir_js@npm:^0.22.0": - version: 0.22.0 - resolution: "@noir-lang/noir_js@npm:0.22.0" - dependencies: - "@noir-lang/acvm_js": 0.38.0 - "@noir-lang/noirc_abi": 0.22.0 - "@noir-lang/types": 0.22.0 - checksum: 3b0873ad87521415af11208bebe5690191d03fa06dcd515789f0a63f7641146cdcb01d292b208452856ea3967e196c8332cb2618e013f9e7e5ce7d6e09de043d - languageName: node - linkType: hard - "@noir-lang/noir_js@workspace:*, @noir-lang/noir_js@workspace:tooling/noir_js": version: 0.0.0-use.local resolution: "@noir-lang/noir_js@workspace:tooling/noir_js" @@ -4498,7 +4448,7 @@ __metadata: "@types/node": ^20.6.2 "@types/prettier": ^3 chai: ^4.3.8 - eslint: ^8.50.0 + eslint: ^8.56.0 eslint-plugin-prettier: ^5.0.0 mocha: ^10.2.0 prettier: 3.0.3 @@ -4509,20 +4459,13 @@ __metadata: languageName: unknown linkType: soft -"@noir-lang/noir_wasm@npm:^0.22.0": - version: 0.22.0 - resolution: "@noir-lang/noir_wasm@npm:0.22.0" - checksum: 7ac0ca170bf312df761d7ccfd32a67a27f88f15ad4eed1807864295d761d3b2176ffb82f4c6931e1bc06b225d6f738519962c79ffbce9a33d5ef8a6a2bdea82c - languageName: node - linkType: hard - "@noir-lang/noir_wasm@workspace:*, @noir-lang/noir_wasm@workspace:compiler/wasm": version: 0.0.0-use.local resolution: "@noir-lang/noir_wasm@workspace:compiler/wasm" dependencies: "@esm-bundle/chai": ^4.3.4-fix.0 "@ltd/j-toml": ^1.38.0 - "@noir-lang/noirc_abi": "workspace:*" + "@noir-lang/types": "workspace:*" "@types/adm-zip": ^0.5.0 "@types/chai": ^4 "@types/mocha": ^10.0.6 @@ -4540,13 +4483,16 @@ __metadata: assert: ^2.1.0 browserify-fs: ^1.0.0 chai: ^4.3.10 - copy-webpack-plugin: ^11.0.0 - html-webpack-plugin: ^5.5.4 + copy-webpack-plugin: ^12.0.2 + eslint: ^8.56.0 + eslint-plugin-prettier: ^5.0.0 + html-webpack-plugin: ^5.6.0 memfs: ^4.6.0 mocha: ^10.2.0 mocha-each: ^2.0.1 pako: ^2.1.0 path-browserify: ^1.0.1 + prettier: 3.0.3 process: ^0.11.10 readable-stream: ^4.4.2 sinon: ^17.0.1 @@ -4555,27 +4501,22 @@ __metadata: typescript: ~5.2.2 unzipit: ^1.4.3 url: ^0.11.3 - webpack: ^5.49.0 - webpack-cli: ^4.7.2 + webpack: ^5.90.1 + webpack-cli: ^5.1.4 + webpack-dev-server: ^5.0.0 languageName: unknown linkType: soft -"@noir-lang/noirc_abi@npm:0.22.0": - version: 0.22.0 - resolution: "@noir-lang/noirc_abi@npm:0.22.0" - checksum: a250c6cc5ca37fcf02663f8d6b027776f0e58920fb8f8a84efcf74f079f235bb11bbad682ba332211d9b9a79b6a3eb7faede7701cd88582b682971a41ca6212d - languageName: node - linkType: hard - "@noir-lang/noirc_abi@workspace:*, @noir-lang/noirc_abi@workspace:tooling/noirc_abi_wasm": version: 0.0.0-use.local resolution: "@noir-lang/noirc_abi@workspace:tooling/noirc_abi_wasm" dependencies: "@esm-bundle/chai": ^4.3.4-fix.0 + "@noir-lang/types": "workspace:*" "@web/dev-server-esbuild": ^0.3.6 "@web/test-runner": ^0.15.3 "@web/test-runner-playwright": ^0.10.0 - eslint: ^8.50.0 + eslint: ^8.56.0 mocha: ^10.2.0 languageName: unknown linkType: soft @@ -4588,7 +4529,7 @@ __metadata: "@typescript-eslint/parser": ^6.7.3 chai: ^4.3.7 cspell: ^8.3.2 - eslint: ^8.50.0 + eslint: ^8.56.0 eslint-plugin-prettier: ^5.0.0 mocha: ^10.2.0 prettier: 3.0.3 @@ -4597,22 +4538,12 @@ __metadata: languageName: unknown linkType: soft -"@noir-lang/types@npm:0.22.0, @noir-lang/types@npm:^0.22.0": - version: 0.22.0 - resolution: "@noir-lang/types@npm:0.22.0" - dependencies: - "@noir-lang/noirc_abi": 0.22.0 - checksum: 5dd1badf0449c518e755172de1d2f2c1b95bfaf7b7328b7de00b8ce9ba68bd447ca65e827185da7d737e7e88dcaf296b29687ffe2e1f5b4d5cc31ce3e3b4f208 - languageName: node - linkType: hard - "@noir-lang/types@workspace:*, @noir-lang/types@workspace:tooling/noir_js_types": version: 0.0.0-use.local resolution: "@noir-lang/types@workspace:tooling/noir_js_types" dependencies: - "@noir-lang/noirc_abi": "workspace:*" "@types/prettier": ^3 - eslint: ^8.50.0 + eslint: ^8.56.0 eslint-plugin-prettier: ^5.0.0 prettier: 3.0.3 typescript: ^5.2.2 @@ -5326,26 +5257,6 @@ __metadata: languageName: node linkType: hard -"@signorecello/noir_playground@npm:^0.7.0": - version: 0.7.0 - resolution: "@signorecello/noir_playground@npm:0.7.0" - dependencies: - "@monaco-editor/react": 4.6.0 - "@noir-lang/backend_barretenberg": ^0.22.0 - "@noir-lang/noir_js": ^0.22.0 - "@noir-lang/noir_wasm": ^0.22.0 - "@noir-lang/types": ^0.22.0 - fflate: ^0.8.1 - js-base64: ^3.7.5 - monaco-editor: ^0.44.0 - monaco-editor-textmate: ^4.0.0 - monaco-textmate: ^3.0.1 - onigasm: ^2.2.5 - react-toastify: ^9.1.3 - checksum: 360bd1dbc8964a6ab8a6e8d0eb0cd11d7446cc23bf63c253083b18b5d6d5ccf2ec6ca847614106cd93490bb815aac651a6e4584ac63ea0fda182cdb1aadf3f45 - languageName: node - linkType: hard - "@sinclair/typebox@npm:^0.27.8": version: 0.27.8 resolution: "@sinclair/typebox@npm:0.27.8" @@ -5374,6 +5285,13 @@ __metadata: languageName: node linkType: hard +"@sindresorhus/merge-streams@npm:^2.1.0": + version: 2.1.0 + resolution: "@sindresorhus/merge-streams@npm:2.1.0" + checksum: 8aa91a3fca68d4ba78f81cad80f2dc280fa82b6c49c9fa5fe37438b6b9082cf993adb2309163f924bef9d7173b2fae6bb40fc4070a344cbab8bcc19eb1ee0b7c + languageName: node + linkType: hard + "@sinonjs/commons@npm:^2.0.0": version: 2.0.0 resolution: "@sinonjs/commons@npm:2.0.0" @@ -5728,7 +5646,7 @@ __metadata: languageName: node linkType: hard -"@types/bonjour@npm:^3.5.9": +"@types/bonjour@npm:^3.5.13, @types/bonjour@npm:^3.5.9": version: 3.5.13 resolution: "@types/bonjour@npm:3.5.13" dependencies: @@ -5770,7 +5688,7 @@ __metadata: languageName: node linkType: hard -"@types/connect-history-api-fallback@npm:^1.3.5": +"@types/connect-history-api-fallback@npm:^1.3.5, @types/connect-history-api-fallback@npm:^1.5.4": version: 1.5.4 resolution: "@types/connect-history-api-fallback@npm:1.5.4" dependencies: @@ -5860,7 +5778,7 @@ __metadata: languageName: node linkType: hard -"@types/estree@npm:*, @types/estree@npm:1.0.5, @types/estree@npm:^1.0.0": +"@types/estree@npm:*, @types/estree@npm:1.0.5, @types/estree@npm:^1.0.0, @types/estree@npm:^1.0.5": version: 1.0.5 resolution: "@types/estree@npm:1.0.5" checksum: dd8b5bed28e6213b7acd0fb665a84e693554d850b0df423ac8076cc3ad5823a6bc26b0251d080bdc545af83179ede51dd3f6fa78cad2c46ed1f29624ddf3e41a @@ -5886,7 +5804,7 @@ __metadata: languageName: node linkType: hard -"@types/express@npm:*, @types/express@npm:^4.17.13": +"@types/express@npm:*, @types/express@npm:^4.17.13, @types/express@npm:^4.17.21": version: 4.17.21 resolution: "@types/express@npm:4.17.21" dependencies: @@ -6340,6 +6258,13 @@ __metadata: languageName: node linkType: hard +"@types/retry@npm:0.12.2": + version: 0.12.2 + resolution: "@types/retry@npm:0.12.2" + checksum: e5675035717b39ce4f42f339657cae9637cf0c0051cf54314a6a2c44d38d91f6544be9ddc0280587789b6afd056be5d99dbe3e9f4df68c286c36321579b1bf4a + languageName: node + linkType: hard + "@types/sax@npm:^1.2.1": version: 1.2.7 resolution: "@types/sax@npm:1.2.7" @@ -6382,7 +6307,7 @@ __metadata: languageName: node linkType: hard -"@types/serve-index@npm:^1.9.1": +"@types/serve-index@npm:^1.9.1, @types/serve-index@npm:^1.9.4": version: 1.9.4 resolution: "@types/serve-index@npm:1.9.4" dependencies: @@ -6391,7 +6316,7 @@ __metadata: languageName: node linkType: hard -"@types/serve-static@npm:*, @types/serve-static@npm:^1.13.10": +"@types/serve-static@npm:*, @types/serve-static@npm:^1.13.10, @types/serve-static@npm:^1.15.5": version: 1.15.5 resolution: "@types/serve-static@npm:1.15.5" dependencies: @@ -6418,7 +6343,7 @@ __metadata: languageName: node linkType: hard -"@types/sockjs@npm:^0.3.33": +"@types/sockjs@npm:^0.3.33, @types/sockjs@npm:^0.3.36": version: 0.3.36 resolution: "@types/sockjs@npm:0.3.36" dependencies: @@ -6450,7 +6375,7 @@ __metadata: languageName: node linkType: hard -"@types/ws@npm:^8.5.5": +"@types/ws@npm:^8.5.10, @types/ws@npm:^8.5.5": version: 8.5.10 resolution: "@types/ws@npm:8.5.10" dependencies: @@ -6759,6 +6684,20 @@ __metadata: languageName: node linkType: hard +"@web/dev-server-import-maps@npm:^0.2.0": + version: 0.2.0 + resolution: "@web/dev-server-import-maps@npm:0.2.0" + dependencies: + "@import-maps/resolve": ^1.0.1 + "@types/parse5": ^6.0.1 + "@web/dev-server-core": ^0.7.0 + "@web/parse5-utils": ^2.1.0 + parse5: ^6.0.1 + picomatch: ^2.2.2 + checksum: 15dabfa385f023bab70758b80cc09443455830799793c1a404a7230d90ebf60e40984a10d8a6ceea2afb8f057e90a9f7356a76f867d5e5a2eeacbc397e41535a + languageName: node + linkType: hard + "@web/dev-server-rollup@npm:^0.4.1": version: 0.4.1 resolution: "@web/dev-server-rollup@npm:0.4.1" @@ -7288,36 +7227,36 @@ __metadata: languageName: node linkType: hard -"@webpack-cli/configtest@npm:^1.2.0": - version: 1.2.0 - resolution: "@webpack-cli/configtest@npm:1.2.0" +"@webpack-cli/configtest@npm:^2.1.1": + version: 2.1.1 + resolution: "@webpack-cli/configtest@npm:2.1.1" peerDependencies: - webpack: 4.x.x || 5.x.x - webpack-cli: 4.x.x - checksum: a2726cd9ec601d2b57e5fc15e0ebf5200a8892065e735911269ac2038e62be4bfc176ea1f88c2c46ff09b4d05d4c10ae045e87b3679372483d47da625a327e28 + webpack: 5.x.x + webpack-cli: 5.x.x + checksum: 9f9f9145c2d05471fc83d426db1df85cf49f329836b0c4b9f46b6948bed4b013464c00622b136d2a0a26993ce2306976682592245b08ee717500b1db45009a72 languageName: node linkType: hard -"@webpack-cli/info@npm:^1.5.0": - version: 1.5.0 - resolution: "@webpack-cli/info@npm:1.5.0" - dependencies: - envinfo: ^7.7.3 +"@webpack-cli/info@npm:^2.0.2": + version: 2.0.2 + resolution: "@webpack-cli/info@npm:2.0.2" peerDependencies: - webpack-cli: 4.x.x - checksum: 7f56fe037cd7d1fd5c7428588519fbf04a0cad33925ee4202ffbafd00f8ec1f2f67d991245e687d50e0f3e23f7b7814273d56cb9f7da4b05eed47c8d815c6296 + webpack: 5.x.x + webpack-cli: 5.x.x + checksum: 8f9a178afca5c82e113aed1efa552d64ee5ae4fdff63fe747c096a981ec74f18a5d07bd6e89bbe6715c3e57d96eea024a410e58977169489fe1df044c10dd94e languageName: node linkType: hard -"@webpack-cli/serve@npm:^1.7.0": - version: 1.7.0 - resolution: "@webpack-cli/serve@npm:1.7.0" +"@webpack-cli/serve@npm:^2.0.5": + version: 2.0.5 + resolution: "@webpack-cli/serve@npm:2.0.5" peerDependencies: - webpack-cli: 4.x.x + webpack: 5.x.x + webpack-cli: 5.x.x peerDependenciesMeta: webpack-dev-server: optional: true - checksum: d475e8effa23eb7ff9a48b14d4de425989fd82f906ce71c210921cc3852327c22873be00c35e181a25a6bd03d424ae2b83e7f3b3f410ac7ee31b128ab4ac7713 + checksum: 75f0e54681796d567a71ac3e2781d2901a8d8cf1cdfc82f261034dddac59a8343e8c3bc5e32b4bb9d6766759ba49fb29a5cd86ef1701d79c506fe886bb63ac75 languageName: node linkType: hard @@ -8157,6 +8096,16 @@ __metadata: languageName: node linkType: hard +"bonjour-service@npm:^1.2.1": + version: 1.2.1 + resolution: "bonjour-service@npm:1.2.1" + dependencies: + fast-deep-equal: ^3.1.3 + multicast-dns: ^7.2.5 + checksum: b65b3e6e3a07e97f2da5806afb76f3946d5a6426b72e849a0236dc3c9d3612fb8c5359ebade4be7eb63f74a37670c53a53be2ff17f4f709811fda77f600eb25b + languageName: node + linkType: hard + "boolbase@npm:^1.0.0": version: 1.0.0 resolution: "boolbase@npm:1.0.0" @@ -8407,6 +8356,15 @@ __metadata: languageName: node linkType: hard +"bundle-name@npm:^4.1.0": + version: 4.1.0 + resolution: "bundle-name@npm:4.1.0" + dependencies: + run-applescript: ^7.0.0 + checksum: 1d966c8d2dbf4d9d394e53b724ac756c2414c45c01340b37743621f59cc565a435024b394ddcb62b9b335d1c9a31f4640eb648c3fec7f97ee74dc0694c9beb6c + languageName: node + linkType: hard + "bytes@npm:3.0.0": version: 3.0.0 resolution: "bytes@npm:3.0.0" @@ -8776,6 +8734,25 @@ __metadata: languageName: node linkType: hard +"chokidar@npm:^3.6.0": + version: 3.6.0 + resolution: "chokidar@npm:3.6.0" + dependencies: + anymatch: ~3.1.2 + braces: ~3.0.2 + fsevents: ~2.3.2 + glob-parent: ~5.1.2 + is-binary-path: ~2.1.0 + is-glob: ~4.0.1 + normalize-path: ~3.0.0 + readdirp: ~3.6.0 + dependenciesMeta: + fsevents: + optional: true + checksum: d2f29f499705dcd4f6f3bbed79a9ce2388cf530460122eed3b9c48efeab7a4e28739c6551fd15bec9245c6b9eeca7a32baa64694d64d9b6faeb74ddb8c4a413d + languageName: node + linkType: hard + "chownr@npm:^1.1.1": version: 1.1.4 resolution: "chownr@npm:1.1.4" @@ -9208,7 +9185,7 @@ __metadata: languageName: node linkType: hard -"commander@npm:^7.0.0, commander@npm:^7.2.0": +"commander@npm:^7.2.0": version: 7.2.0 resolution: "commander@npm:7.2.0" checksum: 53501cbeee61d5157546c0bef0fedb6cdfc763a882136284bed9a07225f09a14b82d2a84e7637edfd1a679fb35ed9502fd58ef1d091e6287f60d790147f68ddc @@ -9434,6 +9411,22 @@ __metadata: languageName: node linkType: hard +"copy-webpack-plugin@npm:^12.0.2": + version: 12.0.2 + resolution: "copy-webpack-plugin@npm:12.0.2" + dependencies: + fast-glob: ^3.3.2 + glob-parent: ^6.0.1 + globby: ^14.0.0 + normalize-path: ^3.0.0 + schema-utils: ^4.2.0 + serialize-javascript: ^6.0.2 + peerDependencies: + webpack: ^5.1.0 + checksum: 98127735336c6db5924688486d3a1854a41835963d0c0b81695b2e3d58c6675164be7d23dee7090b84a56d3c9923175d3d0863ac1942bcc3317d2efc1962b927 + languageName: node + linkType: hard + "core-js-compat@npm:^3.31.0, core-js-compat@npm:^3.33.1": version: 3.34.0 resolution: "core-js-compat@npm:3.34.0" @@ -10056,6 +10049,13 @@ __metadata: languageName: node linkType: hard +"default-browser-id@npm:^5.0.0": + version: 5.0.0 + resolution: "default-browser-id@npm:5.0.0" + checksum: 185bfaecec2c75fa423544af722a3469b20704c8d1942794a86e4364fe7d9e8e9f63241a5b769d61c8151993bc65833a5b959026fa1ccea343b3db0a33aa6deb + languageName: node + linkType: hard + "default-browser@npm:^4.0.0": version: 4.0.0 resolution: "default-browser@npm:4.0.0" @@ -10068,6 +10068,16 @@ __metadata: languageName: node linkType: hard +"default-browser@npm:^5.2.1": + version: 5.2.1 + resolution: "default-browser@npm:5.2.1" + dependencies: + bundle-name: ^4.1.0 + default-browser-id: ^5.0.0 + checksum: afab7eff7b7f5f7a94d9114d1ec67273d3fbc539edf8c0f80019879d53aa71e867303c6f6d7cffeb10a6f3cfb59d4f963dba3f9c96830b4540cc7339a1bf9840 + languageName: node + linkType: hard + "default-gateway@npm:^6.0.3": version: 6.0.3 resolution: "default-gateway@npm:6.0.3" @@ -10337,7 +10347,6 @@ __metadata: "@noir-lang/noir_js": "workspace:*" "@noir-lang/noirc_abi": "workspace:*" "@noir-lang/types": "workspace:*" - "@signorecello/noir_playground": ^0.7.0 "@types/prettier": ^3 axios: ^1.4.0 clsx: ^1.2.1 @@ -10962,14 +10971,14 @@ __metadata: languageName: node linkType: hard -"eslint@npm:^8.50.0": - version: 8.55.0 - resolution: "eslint@npm:8.55.0" +"eslint@npm:^8.56.0": + version: 8.56.0 + resolution: "eslint@npm:8.56.0" dependencies: "@eslint-community/eslint-utils": ^4.2.0 "@eslint-community/regexpp": ^4.6.1 "@eslint/eslintrc": ^2.1.4 - "@eslint/js": 8.55.0 + "@eslint/js": 8.56.0 "@humanwhocodes/config-array": ^0.11.13 "@humanwhocodes/module-importer": ^1.0.1 "@nodelib/fs.walk": ^1.2.8 @@ -11006,7 +11015,7 @@ __metadata: text-table: ^0.2.0 bin: eslint: bin/eslint.js - checksum: 83f82a604559dc1faae79d28fdf3dfc9e592ca221052e2ea516e1b379b37e77e4597705a16880e2f5ece4f79087c1dd13fd7f6e9746f794a401175519db18b41 + checksum: 883436d1e809b4a25d9eb03d42f584b84c408dbac28b0019f6ea07b5177940bf3cca86208f749a6a1e0039b63e085ee47aca1236c30721e91f0deef5cc5a5136 languageName: node linkType: hard @@ -11499,13 +11508,6 @@ __metadata: languageName: node linkType: hard -"fast-plist@npm:^0.1.2": - version: 0.1.3 - resolution: "fast-plist@npm:0.1.3" - checksum: e879f548db3a1fc89c654c476e9c9846f4335fdcd2283ec99e5f234c897f5616cee2a0a4201bf4b64ab6269e75c09daafc3933bd4a038c85af943fac0f113caa - languageName: node - linkType: hard - "fast-url-parser@npm:1.1.3": version: 1.1.3 resolution: "fast-url-parser@npm:1.1.3" @@ -11567,7 +11569,7 @@ __metadata: languageName: node linkType: hard -"fflate@npm:^0.8.0, fflate@npm:^0.8.1": +"fflate@npm:^0.8.0": version: 0.8.1 resolution: "fflate@npm:0.8.1" checksum: 7207e2d333243724485d2488095256b776184bd4545aa9967b655feaee5dc18e9525ed9b6d75f94cfd71d98fb285336f4902641683472f1d0c19a99137084cec @@ -12317,6 +12319,20 @@ __metadata: languageName: node linkType: hard +"globby@npm:^14.0.0": + version: 14.0.1 + resolution: "globby@npm:14.0.1" + dependencies: + "@sindresorhus/merge-streams": ^2.1.0 + fast-glob: ^3.3.2 + ignore: ^5.2.4 + path-type: ^5.0.0 + slash: ^5.1.0 + unicorn-magic: ^0.1.0 + checksum: 33568444289afb1135ad62d52d5e8412900cec620e3b6ece533afa46d004066f14b97052b643833d7cf4ee03e7fac571430130cde44c333df91a45d313105170 + languageName: node + linkType: hard + "gopd@npm:^1.0.1": version: 1.0.1 resolution: "gopd@npm:1.0.1" @@ -12906,7 +12922,7 @@ __metadata: languageName: node linkType: hard -"html-entities@npm:^2.3.2": +"html-entities@npm:^2.3.2, html-entities@npm:^2.4.0": version: 2.4.0 resolution: "html-entities@npm:2.4.0" checksum: 25bea32642ce9ebd0eedc4d24381883ecb0335ccb8ac26379a0958b9b16652fdbaa725d70207ce54a51db24103436a698a8e454397d3ba8ad81460224751f1dc @@ -12975,7 +12991,7 @@ __metadata: languageName: node linkType: hard -"html-webpack-plugin@npm:^5.5.0, html-webpack-plugin@npm:^5.5.3, html-webpack-plugin@npm:^5.5.4": +"html-webpack-plugin@npm:^5.5.0, html-webpack-plugin@npm:^5.5.3": version: 5.5.4 resolution: "html-webpack-plugin@npm:5.5.4" dependencies: @@ -12990,6 +13006,27 @@ __metadata: languageName: node linkType: hard +"html-webpack-plugin@npm:^5.6.0": + version: 5.6.0 + resolution: "html-webpack-plugin@npm:5.6.0" + dependencies: + "@types/html-minifier-terser": ^6.0.0 + html-minifier-terser: ^6.0.2 + lodash: ^4.17.21 + pretty-error: ^4.0.0 + tapable: ^2.0.0 + peerDependencies: + "@rspack/core": 0.x || 1.x + webpack: ^5.20.0 + peerDependenciesMeta: + "@rspack/core": + optional: true + webpack: + optional: true + checksum: 32a6e41da538e798fd0be476637d7611a5e8a98a3508f031996e9eb27804dcdc282cb01f847cf5d066f21b49cfb8e21627fcf977ffd0c9bea81cf80e5a65070d + languageName: node + linkType: hard + "htmlparser2@npm:^6.1.0": version: 6.1.0 resolution: "htmlparser2@npm:6.1.0" @@ -13400,9 +13437,10 @@ __metadata: "@nomicfoundation/hardhat-chai-matchers": ^2.0.0 "@nomicfoundation/hardhat-ethers": ^3.0.0 "@web/dev-server-esbuild": ^0.3.6 + "@web/dev-server-import-maps": ^0.2.0 "@web/test-runner": ^0.15.3 "@web/test-runner-playwright": ^0.10.0 - eslint: ^8.50.0 + eslint: ^8.56.0 eslint-plugin-prettier: ^5.0.0 ethers: ^6.7.1 hardhat: ^2.17.4 @@ -13420,10 +13458,10 @@ __metadata: languageName: node linkType: hard -"interpret@npm:^2.2.0": - version: 2.2.0 - resolution: "interpret@npm:2.2.0" - checksum: f51efef7cb8d02da16408ffa3504cd6053014c5aeb7bb8c223727e053e4235bf565e45d67028b0c8740d917c603807aa3c27d7bd2f21bf20b6417e2bb3e5fd6e +"interpret@npm:^3.1.1": + version: 3.1.1 + resolution: "interpret@npm:3.1.1" + checksum: 35cebcf48c7351130437596d9ab8c8fe131ce4038da4561e6d665f25640e0034702a031cf7e3a5cea60ac7ac548bf17465e0571ede126f3d3a6933152171ac82 languageName: node linkType: hard @@ -13466,7 +13504,7 @@ __metadata: languageName: node linkType: hard -"ipaddr.js@npm:^2.0.1": +"ipaddr.js@npm:^2.0.1, ipaddr.js@npm:^2.1.0": version: 2.1.0 resolution: "ipaddr.js@npm:2.1.0" checksum: 807a054f2bd720c4d97ee479d6c9e865c233bea21f139fb8dabd5a35c4226d2621c42e07b4ad94ff3f82add926a607d8d9d37c625ad0319f0e08f9f2bd1968e2 @@ -13724,6 +13762,13 @@ __metadata: languageName: node linkType: hard +"is-network-error@npm:^1.0.0": + version: 1.0.1 + resolution: "is-network-error@npm:1.0.1" + checksum: 165d61500c4186c62db5a3a693d6bfa14ca40fe9b471ef4cd4f27b20ef6760880faf5386dc01ca9867531631782941fedaa94521d09959edf71f046e393c7b91 + languageName: node + linkType: hard + "is-npm@npm:^5.0.0": version: 5.0.0 resolution: "is-npm@npm:5.0.0" @@ -13907,6 +13952,15 @@ __metadata: languageName: node linkType: hard +"is-wsl@npm:^3.1.0": + version: 3.1.0 + resolution: "is-wsl@npm:3.1.0" + dependencies: + is-inside-container: ^1.0.0 + checksum: f9734c81f2f9cf9877c5db8356bfe1ff61680f1f4c1011e91278a9c0564b395ae796addb4bf33956871041476ec82c3e5260ed57b22ac91794d4ae70a1d2f0a9 + languageName: node + linkType: hard + "is-yarn-global@npm:^0.3.0": version: 0.3.0 resolution: "is-yarn-global@npm:0.3.0" @@ -14077,13 +14131,6 @@ __metadata: languageName: node linkType: hard -"js-base64@npm:^3.7.5": - version: 3.7.5 - resolution: "js-base64@npm:3.7.5" - checksum: 67a78c8b1c47b73f1c6fba1957e9fe6fd9dc78ac93ac46cc2e43472dcb9cf150d126fb0e593192e88e0497354fa634d17d255add7cc6ee3c7b4d29870faa8e18 - languageName: node - linkType: hard - "js-sdsl@npm:^4.1.4": version: 4.4.2 resolution: "js-sdsl@npm:4.4.2" @@ -14452,7 +14499,7 @@ __metadata: languageName: node linkType: hard -"launch-editor@npm:^2.6.0": +"launch-editor@npm:^2.6.0, launch-editor@npm:^2.6.1": version: 2.6.1 resolution: "launch-editor@npm:2.6.1" dependencies: @@ -16249,34 +16296,6 @@ __metadata: languageName: node linkType: hard -"monaco-editor-textmate@npm:^4.0.0": - version: 4.0.0 - resolution: "monaco-editor-textmate@npm:4.0.0" - peerDependencies: - monaco-editor: 0.x.x - monaco-textmate: ^3.0.0 - checksum: 9d3f5f24982f928c5f4b7c5c5170a549cb19eb1471eb157aa07bd97cf15cd75dd941585eeb6924b05c54109a55d48adc45191eda9db5d2793b1d8c462181c100 - languageName: node - linkType: hard - -"monaco-editor@npm:^0.44.0": - version: 0.44.0 - resolution: "monaco-editor@npm:0.44.0" - checksum: 6e561b23e5e9090cbdbb820dae5895a8bf9d537acc09281756a8c428960da0481461c72f387cc9a2e14bff69ab4359186c98df2dd29d6d109f1ab7189b573a35 - languageName: node - linkType: hard - -"monaco-textmate@npm:^3.0.1": - version: 3.0.1 - resolution: "monaco-textmate@npm:3.0.1" - dependencies: - fast-plist: ^0.1.2 - peerDependencies: - onigasm: ^2.0.0 - checksum: 0f2ec07ee3e9a37bb880e2aaef802f82cb666660b40fc3c7c3e35553d740aed34ae94399b06296fddf1f96efdaf27eaf347b39cb14bc08ccfb65162c52771d56 - languageName: node - linkType: hard - "mrmime@npm:^1.0.0": version: 1.0.1 resolution: "mrmime@npm:1.0.1" @@ -16708,15 +16727,6 @@ __metadata: languageName: node linkType: hard -"onigasm@npm:^2.2.5": - version: 2.2.5 - resolution: "onigasm@npm:2.2.5" - dependencies: - lru-cache: ^5.1.1 - checksum: 97aedde610ef561f05853609d6a5b720ec1e123f867bdac1b38b5aeb3bc90ed60209678c75a5f0f9821aa793c720b6d17aabfb956e26ab115ee9b81d6e56bdf7 - languageName: node - linkType: hard - "only@npm:~0.0.2": version: 0.0.2 resolution: "only@npm:0.0.2" @@ -16724,6 +16734,18 @@ __metadata: languageName: node linkType: hard +"open@npm:^10.0.3": + version: 10.0.3 + resolution: "open@npm:10.0.3" + dependencies: + default-browser: ^5.2.1 + define-lazy-prop: ^3.0.0 + is-inside-container: ^1.0.0 + is-wsl: ^3.1.0 + checksum: 3c4b4eb3c08210f7b7b3f3311d36440f4b83f0641ac70e5e56d637f48d4a7736e0fd49a604eebe0a55c51223d77f9ced11912223cab12d5e9fdc866727c6cb1d + languageName: node + linkType: hard + "open@npm:^8.0.2, open@npm:^8.0.9, open@npm:^8.4.0": version: 8.4.2 resolution: "open@npm:8.4.2" @@ -16907,6 +16929,17 @@ __metadata: languageName: node linkType: hard +"p-retry@npm:^6.2.0": + version: 6.2.0 + resolution: "p-retry@npm:6.2.0" + dependencies: + "@types/retry": 0.12.2 + is-network-error: ^1.0.0 + retry: ^0.13.1 + checksum: 6003573c559ee812329c9c3ede7ba12a783fdc8dd70602116646e850c920b4597dc502fe001c3f9526fca4e93275045db7a27341c458e51db179c1374a01ac44 + languageName: node + linkType: hard + "p-try@npm:^1.0.0": version: 1.0.0 resolution: "p-try@npm:1.0.0" @@ -17202,6 +17235,13 @@ __metadata: languageName: node linkType: hard +"path-type@npm:^5.0.0": + version: 5.0.0 + resolution: "path-type@npm:5.0.0" + checksum: 15ec24050e8932c2c98d085b72cfa0d6b4eeb4cbde151a0a05726d8afae85784fc5544f733d8dfc68536587d5143d29c0bd793623fad03d7e61cc00067291cd5 + languageName: node + linkType: hard + "pathval@npm:^1.1.1": version: 1.1.1 resolution: "pathval@npm:1.1.1" @@ -18335,18 +18375,6 @@ __metadata: languageName: node linkType: hard -"react-toastify@npm:^9.1.3": - version: 9.1.3 - resolution: "react-toastify@npm:9.1.3" - dependencies: - clsx: ^1.1.1 - peerDependencies: - react: ">=16" - react-dom: ">=16" - checksum: e8bd92c5cbf831b43a042644ab9bc69abe6ceb3ce91ba71f5cd2d8b6a2c9885ca52770e1f1ba64c5632607f6df962db344a26c7fba57606faf5aa0e7bfc8535f - languageName: node - linkType: hard - "react@npm:^18.2.0": version: 18.2.0 resolution: "react@npm:18.2.0" @@ -18444,12 +18472,12 @@ __metadata: languageName: node linkType: hard -"rechoir@npm:^0.7.0": - version: 0.7.1 - resolution: "rechoir@npm:0.7.1" +"rechoir@npm:^0.8.0": + version: 0.8.0 + resolution: "rechoir@npm:0.8.0" dependencies: - resolve: ^1.9.0 - checksum: 2a04aab4e28c05fcd6ee6768446bc8b859d8f108e71fc7f5bcbc5ef25e53330ce2c11d10f82a24591a2df4c49c4f61feabe1fd11f844c66feedd4cd7bb61146a + resolve: ^1.20.0 + checksum: ad3caed8afdefbc33fbc30e6d22b86c35b3d51c2005546f4e79bcc03c074df804b3640ad18945e6bef9ed12caedc035655ec1082f64a5e94c849ff939dc0a788 languageName: node linkType: hard @@ -18901,7 +18929,7 @@ __metadata: languageName: node linkType: hard -"resolve@npm:^1.1.6, resolve@npm:^1.14.2, resolve@npm:^1.19.0, resolve@npm:^1.22.1, resolve@npm:^1.3.2, resolve@npm:^1.9.0": +"resolve@npm:^1.1.6, resolve@npm:^1.14.2, resolve@npm:^1.19.0, resolve@npm:^1.20.0, resolve@npm:^1.22.1, resolve@npm:^1.3.2": version: 1.22.8 resolution: "resolve@npm:1.22.8" dependencies: @@ -18923,7 +18951,7 @@ __metadata: languageName: node linkType: hard -"resolve@patch:resolve@^1.1.6#~builtin, resolve@patch:resolve@^1.14.2#~builtin, resolve@patch:resolve@^1.19.0#~builtin, resolve@patch:resolve@^1.22.1#~builtin, resolve@patch:resolve@^1.3.2#~builtin, resolve@patch:resolve@^1.9.0#~builtin": +"resolve@patch:resolve@^1.1.6#~builtin, resolve@patch:resolve@^1.14.2#~builtin, resolve@patch:resolve@^1.19.0#~builtin, resolve@patch:resolve@^1.20.0#~builtin, resolve@patch:resolve@^1.22.1#~builtin, resolve@patch:resolve@^1.3.2#~builtin": version: 1.22.8 resolution: "resolve@patch:resolve@npm%3A1.22.8#~builtin::version=1.22.8&hash=c3c19d" dependencies: @@ -19137,6 +19165,13 @@ __metadata: languageName: node linkType: hard +"run-applescript@npm:^7.0.0": + version: 7.0.0 + resolution: "run-applescript@npm:7.0.0" + checksum: b02462454d8b182ad4117e5d4626e9e6782eb2072925c9fac582170b0627ae3c1ea92ee9b2df7daf84b5e9ffe14eb1cf5fb70bc44b15c8a0bfcdb47987e2410c + languageName: node + linkType: hard + "run-parallel-limit@npm:^1.1.0": version: 1.1.0 resolution: "run-parallel-limit@npm:1.1.0" @@ -19241,7 +19276,7 @@ __metadata: languageName: node linkType: hard -"schema-utils@npm:^4.0.0": +"schema-utils@npm:^4.0.0, schema-utils@npm:^4.2.0": version: 4.2.0 resolution: "schema-utils@npm:4.2.0" dependencies: @@ -19289,7 +19324,7 @@ __metadata: languageName: node linkType: hard -"selfsigned@npm:^2.1.1": +"selfsigned@npm:^2.1.1, selfsigned@npm:^2.4.1": version: 2.4.1 resolution: "selfsigned@npm:2.4.1" dependencies: @@ -19394,6 +19429,15 @@ __metadata: languageName: node linkType: hard +"serialize-javascript@npm:^6.0.2": + version: 6.0.2 + resolution: "serialize-javascript@npm:6.0.2" + dependencies: + randombytes: ^2.1.0 + checksum: c4839c6206c1d143c0f80763997a361310305751171dd95e4b57efee69b8f6edd8960a0b7fbfc45042aadff98b206d55428aee0dc276efe54f100899c7fa8ab7 + languageName: node + linkType: hard + "serve-handler@npm:6.1.5, serve-handler@npm:^6.1.3, serve-handler@npm:^6.1.5": version: 6.1.5 resolution: "serve-handler@npm:6.1.5" @@ -19661,6 +19705,13 @@ __metadata: languageName: node linkType: hard +"slash@npm:^5.1.0": + version: 5.1.0 + resolution: "slash@npm:5.1.0" + checksum: 70434b34c50eb21b741d37d455110258c42d2cf18c01e6518aeb7299f3c6e626330c889c0c552b5ca2ef54a8f5a74213ab48895f0640717cacefeef6830a1ba4 + languageName: node + linkType: hard + "slice-ansi@npm:^4.0.0": version: 4.0.0 resolution: "slice-ansi@npm:4.0.0" @@ -19869,13 +19920,6 @@ __metadata: languageName: node linkType: hard -"state-local@npm:^1.0.6": - version: 1.0.7 - resolution: "state-local@npm:1.0.7" - checksum: d1afcf1429e7e6eb08685b3a94be8797db847369316d4776fd51f3962b15b984dacc7f8e401ad20968e5798c9565b4b377afedf4e4c4d60fe7495e1cbe14a251 - languageName: node - linkType: hard - "state-toggle@npm:^1.0.0": version: 1.0.3 resolution: "state-toggle@npm:1.0.3" @@ -20292,6 +20336,28 @@ __metadata: languageName: node linkType: hard +"terser-webpack-plugin@npm:^5.3.10": + version: 5.3.10 + resolution: "terser-webpack-plugin@npm:5.3.10" + dependencies: + "@jridgewell/trace-mapping": ^0.3.20 + jest-worker: ^27.4.5 + schema-utils: ^3.1.1 + serialize-javascript: ^6.0.1 + terser: ^5.26.0 + peerDependencies: + webpack: ^5.1.0 + peerDependenciesMeta: + "@swc/core": + optional: true + esbuild: + optional: true + uglify-js: + optional: true + checksum: bd6e7596cf815f3353e2a53e79cbdec959a1b0276f5e5d4e63e9d7c3c5bb5306df567729da287d1c7b39d79093e56863c569c42c6c24cc34c76aa313bd2cbcea + languageName: node + linkType: hard + "terser-webpack-plugin@npm:^5.3.3, terser-webpack-plugin@npm:^5.3.7, terser-webpack-plugin@npm:^5.3.9": version: 5.3.9 resolution: "terser-webpack-plugin@npm:5.3.9" @@ -20328,6 +20394,20 @@ __metadata: languageName: node linkType: hard +"terser@npm:^5.26.0": + version: 5.27.0 + resolution: "terser@npm:5.27.0" + dependencies: + "@jridgewell/source-map": ^0.3.3 + acorn: ^8.8.2 + commander: ^2.20.0 + source-map-support: ~0.5.20 + bin: + terser: bin/terser + checksum: c165052cfea061e8512e9b9ba42a098c2ff6382886ae122b040fd5b6153443070cc2dcb4862269f1669c09c716763e856125a355ff984aa72be525d6fffd8729 + languageName: node + linkType: hard + "text-table@npm:^0.2.0": version: 0.2.0 resolution: "text-table@npm:0.2.0" @@ -20943,6 +21023,13 @@ __metadata: languageName: node linkType: hard +"unicorn-magic@npm:^0.1.0": + version: 0.1.0 + resolution: "unicorn-magic@npm:0.1.0" + checksum: 48c5882ca3378f380318c0b4eb1d73b7e3c5b728859b060276e0a490051d4180966beeb48962d850fd0c6816543bcdfc28629dcd030bb62a286a2ae2acb5acb6 + languageName: node + linkType: hard + "unified@npm:9.2.0": version: 9.2.0 resolution: "unified@npm:9.2.0" @@ -21587,36 +21674,35 @@ __metadata: languageName: node linkType: hard -"webpack-cli@npm:^4.7.2": - version: 4.10.0 - resolution: "webpack-cli@npm:4.10.0" +"webpack-cli@npm:^5.1.4": + version: 5.1.4 + resolution: "webpack-cli@npm:5.1.4" dependencies: "@discoveryjs/json-ext": ^0.5.0 - "@webpack-cli/configtest": ^1.2.0 - "@webpack-cli/info": ^1.5.0 - "@webpack-cli/serve": ^1.7.0 + "@webpack-cli/configtest": ^2.1.1 + "@webpack-cli/info": ^2.0.2 + "@webpack-cli/serve": ^2.0.5 colorette: ^2.0.14 - commander: ^7.0.0 + commander: ^10.0.1 cross-spawn: ^7.0.3 + envinfo: ^7.7.3 fastest-levenshtein: ^1.0.12 import-local: ^3.0.2 - interpret: ^2.2.0 - rechoir: ^0.7.0 + interpret: ^3.1.1 + rechoir: ^0.8.0 webpack-merge: ^5.7.3 peerDependencies: - webpack: 4.x.x || 5.x.x + webpack: 5.x.x peerDependenciesMeta: "@webpack-cli/generators": optional: true - "@webpack-cli/migrate": - optional: true webpack-bundle-analyzer: optional: true webpack-dev-server: optional: true bin: webpack-cli: bin/cli.js - checksum: 2ff5355ac348e6b40f2630a203b981728834dca96d6d621be96249764b2d0fc01dd54edfcc37f02214d02935de2cf0eefd6ce689d970d154ef493f01ba922390 + checksum: 3a4ad0d0342a6815c850ee4633cc2a8a5dae04f918e7847f180bf24ab400803cf8a8943707ffbed03eb20fe6ce647f996f60a2aade87b0b4a9954da3da172ce0 languageName: node linkType: hard @@ -21635,6 +21721,24 @@ __metadata: languageName: node linkType: hard +"webpack-dev-middleware@npm:^7.0.0": + version: 7.0.0 + resolution: "webpack-dev-middleware@npm:7.0.0" + dependencies: + colorette: ^2.0.10 + memfs: ^4.6.0 + mime-types: ^2.1.31 + range-parser: ^1.2.1 + schema-utils: ^4.0.0 + peerDependencies: + webpack: ^5.0.0 + peerDependenciesMeta: + webpack: + optional: true + checksum: 90f6c87c80bd5849c34f3a1761ac7dc1b123def2e6e9922f55102ff4b7532538641fa8c7169ce8254b0d471c27d882cdf4a1c32979952474fc8eacc8b3447915 + languageName: node + linkType: hard + "webpack-dev-server@npm:^4.15.1, webpack-dev-server@npm:^4.9.3": version: 4.15.1 resolution: "webpack-dev-server@npm:4.15.1" @@ -21682,6 +21786,53 @@ __metadata: languageName: node linkType: hard +"webpack-dev-server@npm:^5.0.0": + version: 5.0.0 + resolution: "webpack-dev-server@npm:5.0.0" + dependencies: + "@types/bonjour": ^3.5.13 + "@types/connect-history-api-fallback": ^1.5.4 + "@types/express": ^4.17.21 + "@types/serve-index": ^1.9.4 + "@types/serve-static": ^1.15.5 + "@types/sockjs": ^0.3.36 + "@types/ws": ^8.5.10 + ansi-html-community: ^0.0.8 + bonjour-service: ^1.2.1 + chokidar: ^3.6.0 + colorette: ^2.0.10 + compression: ^1.7.4 + connect-history-api-fallback: ^2.0.0 + default-gateway: ^6.0.3 + express: ^4.17.3 + graceful-fs: ^4.2.6 + html-entities: ^2.4.0 + http-proxy-middleware: ^2.0.3 + ipaddr.js: ^2.1.0 + launch-editor: ^2.6.1 + open: ^10.0.3 + p-retry: ^6.2.0 + rimraf: ^5.0.5 + schema-utils: ^4.2.0 + selfsigned: ^2.4.1 + serve-index: ^1.9.1 + sockjs: ^0.3.24 + spdy: ^4.0.2 + webpack-dev-middleware: ^7.0.0 + ws: ^8.16.0 + peerDependencies: + webpack: ^5.0.0 + peerDependenciesMeta: + webpack: + optional: true + webpack-cli: + optional: true + bin: + webpack-dev-server: bin/webpack-dev-server.js + checksum: 419d1af6b6164900fb01168c3ef965fe8d27a78939ef8f5c602f82af5be8a2b68a0b015df564623dd69996d5265c679202c5970b59797e83cf322e47bbcd6022 + languageName: node + linkType: hard + "webpack-merge@npm:^5.7.3, webpack-merge@npm:^5.8.0, webpack-merge@npm:^5.9.0": version: 5.10.0 resolution: "webpack-merge@npm:5.10.0" @@ -21700,7 +21851,7 @@ __metadata: languageName: node linkType: hard -"webpack@npm:^5.49.0, webpack@npm:^5.73.0, webpack@npm:^5.88.1": +"webpack@npm:^5.73.0, webpack@npm:^5.88.1": version: 5.89.0 resolution: "webpack@npm:5.89.0" dependencies: @@ -21737,6 +21888,43 @@ __metadata: languageName: node linkType: hard +"webpack@npm:^5.90.1": + version: 5.90.1 + resolution: "webpack@npm:5.90.1" + dependencies: + "@types/eslint-scope": ^3.7.3 + "@types/estree": ^1.0.5 + "@webassemblyjs/ast": ^1.11.5 + "@webassemblyjs/wasm-edit": ^1.11.5 + "@webassemblyjs/wasm-parser": ^1.11.5 + acorn: ^8.7.1 + acorn-import-assertions: ^1.9.0 + browserslist: ^4.21.10 + chrome-trace-event: ^1.0.2 + enhanced-resolve: ^5.15.0 + es-module-lexer: ^1.2.1 + eslint-scope: 5.1.1 + events: ^3.2.0 + glob-to-regexp: ^0.4.1 + graceful-fs: ^4.2.9 + json-parse-even-better-errors: ^2.3.1 + loader-runner: ^4.2.0 + mime-types: ^2.1.27 + neo-async: ^2.6.2 + schema-utils: ^3.2.0 + tapable: ^2.1.1 + terser-webpack-plugin: ^5.3.10 + watchpack: ^2.4.0 + webpack-sources: ^3.2.3 + peerDependenciesMeta: + webpack-cli: + optional: true + bin: + webpack: bin/webpack.js + checksum: a7be844d5720a0c6282fec012e6fa34b1137dff953c5d48bf2ef066a6c27c1dbc92a9b9effc05ee61c9fe269499266db9782073f2d82a589d3c5c966ffc56584 + languageName: node + linkType: hard + "webpackbar@npm:^5.0.2": version: 5.0.2 resolution: "webpackbar@npm:5.0.2" @@ -22011,6 +22199,21 @@ __metadata: languageName: node linkType: hard +"ws@npm:^8.16.0": + version: 8.16.0 + resolution: "ws@npm:8.16.0" + peerDependencies: + bufferutil: ^4.0.1 + utf-8-validate: ">=5.0.2" + peerDependenciesMeta: + bufferutil: + optional: true + utf-8-validate: + optional: true + checksum: feb3eecd2bae82fa8a8beef800290ce437d8b8063bdc69712725f21aef77c49cb2ff45c6e5e7fce622248f9c7abaee506bae0a9064067ffd6935460c7357321b + languageName: node + linkType: hard + "xdg-basedir@npm:^4.0.0": version: 4.0.0 resolution: "xdg-basedir@npm:4.0.0"